1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2010 Exar Corp.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik		: For pointing out the improper error condition
15  *			  check in the s2io_xmit routine and also some
16  *			  issues in the Tx watch dog function. Also for
17  *			  patiently answering all those innumerable
18  *			  questions regaring the 2.6 porting issues.
19  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
20  *			  macros available only in 2.6 Kernel.
21  * Francois Romieu	: For pointing out all code part that were
22  *			  deprecated and also styling related comments.
23  * Grant Grundler	: For helping me get rid of some Architecture
24  *			  dependent code.
25  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explanation of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *		values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_max_pkts: This parameter defines maximum number of packets can be
42  *     aggregated as a single large packet
43  * napi: This parameter used to enable/disable NAPI (polling Rx)
44  *     Possible values '1' for enable and '0' for disable. Default is '1'
45  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46  *      Possible values '1' for enable and '0' for disable. Default is '0'
47  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48  *                 Possible values '1' for enable , '0' for disable.
49  *                 Default is '2' - which means disable in promisc mode
50  *                 and enable in non-promiscuous mode.
51  * multiq: This parameter used to enable/disable MULTIQUEUE support.
52  *      Possible values '1' for enable and '0' for disable. Default is '0'
53  ************************************************************************/
54 
55 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/mdio.h>
67 #include <linux/skbuff.h>
68 #include <linux/init.h>
69 #include <linux/delay.h>
70 #include <linux/stddef.h>
71 #include <linux/ioctl.h>
72 #include <linux/timex.h>
73 #include <linux/ethtool.h>
74 #include <linux/workqueue.h>
75 #include <linux/if_vlan.h>
76 #include <linux/ip.h>
77 #include <linux/tcp.h>
78 #include <linux/uaccess.h>
79 #include <linux/io.h>
80 #include <linux/slab.h>
81 #include <linux/prefetch.h>
82 #include <net/tcp.h>
83 #include <net/checksum.h>
84 
85 #include <asm/div64.h>
86 #include <asm/irq.h>
87 
88 /* local include */
89 #include "s2io.h"
90 #include "s2io-regs.h"
91 
92 #define DRV_VERSION "2.0.26.28"
93 
94 /* S2io Driver name & version. */
95 static const char s2io_driver_name[] = "Neterion";
96 static const char s2io_driver_version[] = DRV_VERSION;
97 
98 static const int rxd_size[2] = {32, 48};
99 static const int rxd_count[2] = {127, 85};
100 
101 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
102 {
103 	int ret;
104 
105 	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
106 	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 
108 	return ret;
109 }
110 
111 /*
112  * Cards with following subsystem_id have a link state indication
113  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
114  * macro below identifies these cards given the subsystem_id.
115  */
116 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
117 	(dev_type == XFRAME_I_DEVICE) ?					\
118 	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
119 	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
120 
121 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
122 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
123 
124 static inline int is_s2io_card_up(const struct s2io_nic *sp)
125 {
126 	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
127 }
128 
129 /* Ethtool related variables and Macros. */
130 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
131 	"Register test\t(offline)",
132 	"Eeprom test\t(offline)",
133 	"Link test\t(online)",
134 	"RLDRAM test\t(offline)",
135 	"BIST Test\t(offline)"
136 };
137 
138 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
139 	{"tmac_frms"},
140 	{"tmac_data_octets"},
141 	{"tmac_drop_frms"},
142 	{"tmac_mcst_frms"},
143 	{"tmac_bcst_frms"},
144 	{"tmac_pause_ctrl_frms"},
145 	{"tmac_ttl_octets"},
146 	{"tmac_ucst_frms"},
147 	{"tmac_nucst_frms"},
148 	{"tmac_any_err_frms"},
149 	{"tmac_ttl_less_fb_octets"},
150 	{"tmac_vld_ip_octets"},
151 	{"tmac_vld_ip"},
152 	{"tmac_drop_ip"},
153 	{"tmac_icmp"},
154 	{"tmac_rst_tcp"},
155 	{"tmac_tcp"},
156 	{"tmac_udp"},
157 	{"rmac_vld_frms"},
158 	{"rmac_data_octets"},
159 	{"rmac_fcs_err_frms"},
160 	{"rmac_drop_frms"},
161 	{"rmac_vld_mcst_frms"},
162 	{"rmac_vld_bcst_frms"},
163 	{"rmac_in_rng_len_err_frms"},
164 	{"rmac_out_rng_len_err_frms"},
165 	{"rmac_long_frms"},
166 	{"rmac_pause_ctrl_frms"},
167 	{"rmac_unsup_ctrl_frms"},
168 	{"rmac_ttl_octets"},
169 	{"rmac_accepted_ucst_frms"},
170 	{"rmac_accepted_nucst_frms"},
171 	{"rmac_discarded_frms"},
172 	{"rmac_drop_events"},
173 	{"rmac_ttl_less_fb_octets"},
174 	{"rmac_ttl_frms"},
175 	{"rmac_usized_frms"},
176 	{"rmac_osized_frms"},
177 	{"rmac_frag_frms"},
178 	{"rmac_jabber_frms"},
179 	{"rmac_ttl_64_frms"},
180 	{"rmac_ttl_65_127_frms"},
181 	{"rmac_ttl_128_255_frms"},
182 	{"rmac_ttl_256_511_frms"},
183 	{"rmac_ttl_512_1023_frms"},
184 	{"rmac_ttl_1024_1518_frms"},
185 	{"rmac_ip"},
186 	{"rmac_ip_octets"},
187 	{"rmac_hdr_err_ip"},
188 	{"rmac_drop_ip"},
189 	{"rmac_icmp"},
190 	{"rmac_tcp"},
191 	{"rmac_udp"},
192 	{"rmac_err_drp_udp"},
193 	{"rmac_xgmii_err_sym"},
194 	{"rmac_frms_q0"},
195 	{"rmac_frms_q1"},
196 	{"rmac_frms_q2"},
197 	{"rmac_frms_q3"},
198 	{"rmac_frms_q4"},
199 	{"rmac_frms_q5"},
200 	{"rmac_frms_q6"},
201 	{"rmac_frms_q7"},
202 	{"rmac_full_q0"},
203 	{"rmac_full_q1"},
204 	{"rmac_full_q2"},
205 	{"rmac_full_q3"},
206 	{"rmac_full_q4"},
207 	{"rmac_full_q5"},
208 	{"rmac_full_q6"},
209 	{"rmac_full_q7"},
210 	{"rmac_pause_cnt"},
211 	{"rmac_xgmii_data_err_cnt"},
212 	{"rmac_xgmii_ctrl_err_cnt"},
213 	{"rmac_accepted_ip"},
214 	{"rmac_err_tcp"},
215 	{"rd_req_cnt"},
216 	{"new_rd_req_cnt"},
217 	{"new_rd_req_rtry_cnt"},
218 	{"rd_rtry_cnt"},
219 	{"wr_rtry_rd_ack_cnt"},
220 	{"wr_req_cnt"},
221 	{"new_wr_req_cnt"},
222 	{"new_wr_req_rtry_cnt"},
223 	{"wr_rtry_cnt"},
224 	{"wr_disc_cnt"},
225 	{"rd_rtry_wr_ack_cnt"},
226 	{"txp_wr_cnt"},
227 	{"txd_rd_cnt"},
228 	{"txd_wr_cnt"},
229 	{"rxd_rd_cnt"},
230 	{"rxd_wr_cnt"},
231 	{"txf_rd_cnt"},
232 	{"rxf_wr_cnt"}
233 };
234 
235 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
236 	{"rmac_ttl_1519_4095_frms"},
237 	{"rmac_ttl_4096_8191_frms"},
238 	{"rmac_ttl_8192_max_frms"},
239 	{"rmac_ttl_gt_max_frms"},
240 	{"rmac_osized_alt_frms"},
241 	{"rmac_jabber_alt_frms"},
242 	{"rmac_gt_max_alt_frms"},
243 	{"rmac_vlan_frms"},
244 	{"rmac_len_discard"},
245 	{"rmac_fcs_discard"},
246 	{"rmac_pf_discard"},
247 	{"rmac_da_discard"},
248 	{"rmac_red_discard"},
249 	{"rmac_rts_discard"},
250 	{"rmac_ingm_full_discard"},
251 	{"link_fault_cnt"}
252 };
253 
254 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
255 	{"\n DRIVER STATISTICS"},
256 	{"single_bit_ecc_errs"},
257 	{"double_bit_ecc_errs"},
258 	{"parity_err_cnt"},
259 	{"serious_err_cnt"},
260 	{"soft_reset_cnt"},
261 	{"fifo_full_cnt"},
262 	{"ring_0_full_cnt"},
263 	{"ring_1_full_cnt"},
264 	{"ring_2_full_cnt"},
265 	{"ring_3_full_cnt"},
266 	{"ring_4_full_cnt"},
267 	{"ring_5_full_cnt"},
268 	{"ring_6_full_cnt"},
269 	{"ring_7_full_cnt"},
270 	{"alarm_transceiver_temp_high"},
271 	{"alarm_transceiver_temp_low"},
272 	{"alarm_laser_bias_current_high"},
273 	{"alarm_laser_bias_current_low"},
274 	{"alarm_laser_output_power_high"},
275 	{"alarm_laser_output_power_low"},
276 	{"warn_transceiver_temp_high"},
277 	{"warn_transceiver_temp_low"},
278 	{"warn_laser_bias_current_high"},
279 	{"warn_laser_bias_current_low"},
280 	{"warn_laser_output_power_high"},
281 	{"warn_laser_output_power_low"},
282 	{"lro_aggregated_pkts"},
283 	{"lro_flush_both_count"},
284 	{"lro_out_of_sequence_pkts"},
285 	{"lro_flush_due_to_max_pkts"},
286 	{"lro_avg_aggr_pkts"},
287 	{"mem_alloc_fail_cnt"},
288 	{"pci_map_fail_cnt"},
289 	{"watchdog_timer_cnt"},
290 	{"mem_allocated"},
291 	{"mem_freed"},
292 	{"link_up_cnt"},
293 	{"link_down_cnt"},
294 	{"link_up_time"},
295 	{"link_down_time"},
296 	{"tx_tcode_buf_abort_cnt"},
297 	{"tx_tcode_desc_abort_cnt"},
298 	{"tx_tcode_parity_err_cnt"},
299 	{"tx_tcode_link_loss_cnt"},
300 	{"tx_tcode_list_proc_err_cnt"},
301 	{"rx_tcode_parity_err_cnt"},
302 	{"rx_tcode_abort_cnt"},
303 	{"rx_tcode_parity_abort_cnt"},
304 	{"rx_tcode_rda_fail_cnt"},
305 	{"rx_tcode_unkn_prot_cnt"},
306 	{"rx_tcode_fcs_err_cnt"},
307 	{"rx_tcode_buf_size_err_cnt"},
308 	{"rx_tcode_rxd_corrupt_cnt"},
309 	{"rx_tcode_unkn_err_cnt"},
310 	{"tda_err_cnt"},
311 	{"pfc_err_cnt"},
312 	{"pcc_err_cnt"},
313 	{"tti_err_cnt"},
314 	{"tpa_err_cnt"},
315 	{"sm_err_cnt"},
316 	{"lso_err_cnt"},
317 	{"mac_tmac_err_cnt"},
318 	{"mac_rmac_err_cnt"},
319 	{"xgxs_txgxs_err_cnt"},
320 	{"xgxs_rxgxs_err_cnt"},
321 	{"rc_err_cnt"},
322 	{"prc_pcix_err_cnt"},
323 	{"rpa_err_cnt"},
324 	{"rda_err_cnt"},
325 	{"rti_err_cnt"},
326 	{"mc_err_cnt"}
327 };
328 
329 #define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
330 #define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
331 #define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
332 
333 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
334 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
335 
336 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
337 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
338 
339 #define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
340 #define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
341 
342 #define S2IO_TIMER_CONF(timer, handle, arg, exp)	\
343 	init_timer(&timer);				\
344 	timer.function = handle;			\
345 	timer.data = (unsigned long)arg;		\
346 	mod_timer(&timer, (jiffies + exp))		\
347 
348 /* copy mac addr to def_mac_addr array */
349 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
350 {
351 	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
352 	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
353 	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
354 	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
355 	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
356 	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
357 }
358 
359 /*
360  * Constants to be programmed into the Xena's registers, to configure
361  * the XAUI.
362  */
363 
364 #define	END_SIGN	0x0
365 static const u64 herc_act_dtx_cfg[] = {
366 	/* Set address */
367 	0x8000051536750000ULL, 0x80000515367500E0ULL,
368 	/* Write data */
369 	0x8000051536750004ULL, 0x80000515367500E4ULL,
370 	/* Set address */
371 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
372 	/* Write data */
373 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
374 	/* Set address */
375 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
376 	/* Write data */
377 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
378 	/* Set address */
379 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
380 	/* Write data */
381 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
382 	/* Done */
383 	END_SIGN
384 };
385 
386 static const u64 xena_dtx_cfg[] = {
387 	/* Set address */
388 	0x8000051500000000ULL, 0x80000515000000E0ULL,
389 	/* Write data */
390 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
391 	/* Set address */
392 	0x8001051500000000ULL, 0x80010515000000E0ULL,
393 	/* Write data */
394 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
395 	/* Set address */
396 	0x8002051500000000ULL, 0x80020515000000E0ULL,
397 	/* Write data */
398 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
399 	END_SIGN
400 };
401 
402 /*
403  * Constants for Fixing the MacAddress problem seen mostly on
404  * Alpha machines.
405  */
406 static const u64 fix_mac[] = {
407 	0x0060000000000000ULL, 0x0060600000000000ULL,
408 	0x0040600000000000ULL, 0x0000600000000000ULL,
409 	0x0020600000000000ULL, 0x0060600000000000ULL,
410 	0x0020600000000000ULL, 0x0060600000000000ULL,
411 	0x0020600000000000ULL, 0x0060600000000000ULL,
412 	0x0020600000000000ULL, 0x0060600000000000ULL,
413 	0x0020600000000000ULL, 0x0060600000000000ULL,
414 	0x0020600000000000ULL, 0x0060600000000000ULL,
415 	0x0020600000000000ULL, 0x0060600000000000ULL,
416 	0x0020600000000000ULL, 0x0060600000000000ULL,
417 	0x0020600000000000ULL, 0x0060600000000000ULL,
418 	0x0020600000000000ULL, 0x0060600000000000ULL,
419 	0x0020600000000000ULL, 0x0000600000000000ULL,
420 	0x0040600000000000ULL, 0x0060600000000000ULL,
421 	END_SIGN
422 };
423 
424 MODULE_LICENSE("GPL");
425 MODULE_VERSION(DRV_VERSION);
426 
427 
428 /* Module Loadable parameters. */
429 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
430 S2IO_PARM_INT(rx_ring_num, 1);
431 S2IO_PARM_INT(multiq, 0);
432 S2IO_PARM_INT(rx_ring_mode, 1);
433 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
434 S2IO_PARM_INT(rmac_pause_time, 0x100);
435 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
436 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
437 S2IO_PARM_INT(shared_splits, 0);
438 S2IO_PARM_INT(tmac_util_period, 5);
439 S2IO_PARM_INT(rmac_util_period, 5);
440 S2IO_PARM_INT(l3l4hdr_size, 128);
441 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
442 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
443 /* Frequency of Rx desc syncs expressed as power of 2 */
444 S2IO_PARM_INT(rxsync_frequency, 3);
445 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
446 S2IO_PARM_INT(intr_type, 2);
447 /* Large receive offload feature */
448 
449 /* Max pkts to be aggregated by LRO at one time. If not specified,
450  * aggregation happens until we hit max IP pkt size(64K)
451  */
452 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
453 S2IO_PARM_INT(indicate_max_pkts, 0);
454 
455 S2IO_PARM_INT(napi, 1);
456 S2IO_PARM_INT(ufo, 0);
457 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
458 
459 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
460 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
461 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
462 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
463 static unsigned int rts_frm_len[MAX_RX_RINGS] =
464 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
465 
466 module_param_array(tx_fifo_len, uint, NULL, 0);
467 module_param_array(rx_ring_sz, uint, NULL, 0);
468 module_param_array(rts_frm_len, uint, NULL, 0);
469 
470 /*
471  * S2IO device table.
472  * This table lists all the devices that this driver supports.
473  */
474 static const struct pci_device_id s2io_tbl[] = {
475 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
476 	 PCI_ANY_ID, PCI_ANY_ID},
477 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
478 	 PCI_ANY_ID, PCI_ANY_ID},
479 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
480 	 PCI_ANY_ID, PCI_ANY_ID},
481 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
482 	 PCI_ANY_ID, PCI_ANY_ID},
483 	{0,}
484 };
485 
486 MODULE_DEVICE_TABLE(pci, s2io_tbl);
487 
488 static const struct pci_error_handlers s2io_err_handler = {
489 	.error_detected = s2io_io_error_detected,
490 	.slot_reset = s2io_io_slot_reset,
491 	.resume = s2io_io_resume,
492 };
493 
494 static struct pci_driver s2io_driver = {
495 	.name = "S2IO",
496 	.id_table = s2io_tbl,
497 	.probe = s2io_init_nic,
498 	.remove = s2io_rem_nic,
499 	.err_handler = &s2io_err_handler,
500 };
501 
502 /* A simplifier macro used both by init and free shared_mem Fns(). */
503 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
504 
505 /* netqueue manipulation helper functions */
506 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
507 {
508 	if (!sp->config.multiq) {
509 		int i;
510 
511 		for (i = 0; i < sp->config.tx_fifo_num; i++)
512 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
513 	}
514 	netif_tx_stop_all_queues(sp->dev);
515 }
516 
517 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
518 {
519 	if (!sp->config.multiq)
520 		sp->mac_control.fifos[fifo_no].queue_state =
521 			FIFO_QUEUE_STOP;
522 
523 	netif_tx_stop_all_queues(sp->dev);
524 }
525 
526 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
527 {
528 	if (!sp->config.multiq) {
529 		int i;
530 
531 		for (i = 0; i < sp->config.tx_fifo_num; i++)
532 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
533 	}
534 	netif_tx_start_all_queues(sp->dev);
535 }
536 
537 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
538 {
539 	if (!sp->config.multiq) {
540 		int i;
541 
542 		for (i = 0; i < sp->config.tx_fifo_num; i++)
543 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
544 	}
545 	netif_tx_wake_all_queues(sp->dev);
546 }
547 
548 static inline void s2io_wake_tx_queue(
549 	struct fifo_info *fifo, int cnt, u8 multiq)
550 {
551 
552 	if (multiq) {
553 		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
554 			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
555 	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
556 		if (netif_queue_stopped(fifo->dev)) {
557 			fifo->queue_state = FIFO_QUEUE_START;
558 			netif_wake_queue(fifo->dev);
559 		}
560 	}
561 }
562 
563 /**
564  * init_shared_mem - Allocation and Initialization of Memory
565  * @nic: Device private variable.
566  * Description: The function allocates all the memory areas shared
567  * between the NIC and the driver. This includes Tx descriptors,
568  * Rx descriptors and the statistics block.
569  */
570 
571 static int init_shared_mem(struct s2io_nic *nic)
572 {
573 	u32 size;
574 	void *tmp_v_addr, *tmp_v_addr_next;
575 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
576 	struct RxD_block *pre_rxd_blk = NULL;
577 	int i, j, blk_cnt;
578 	int lst_size, lst_per_page;
579 	struct net_device *dev = nic->dev;
580 	unsigned long tmp;
581 	struct buffAdd *ba;
582 	struct config_param *config = &nic->config;
583 	struct mac_info *mac_control = &nic->mac_control;
584 	unsigned long long mem_allocated = 0;
585 
586 	/* Allocation and initialization of TXDLs in FIFOs */
587 	size = 0;
588 	for (i = 0; i < config->tx_fifo_num; i++) {
589 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
590 
591 		size += tx_cfg->fifo_len;
592 	}
593 	if (size > MAX_AVAILABLE_TXDS) {
594 		DBG_PRINT(ERR_DBG,
595 			  "Too many TxDs requested: %d, max supported: %d\n",
596 			  size, MAX_AVAILABLE_TXDS);
597 		return -EINVAL;
598 	}
599 
600 	size = 0;
601 	for (i = 0; i < config->tx_fifo_num; i++) {
602 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
603 
604 		size = tx_cfg->fifo_len;
605 		/*
606 		 * Legal values are from 2 to 8192
607 		 */
608 		if (size < 2) {
609 			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
610 				  "Valid lengths are 2 through 8192\n",
611 				  i, size);
612 			return -EINVAL;
613 		}
614 	}
615 
616 	lst_size = (sizeof(struct TxD) * config->max_txds);
617 	lst_per_page = PAGE_SIZE / lst_size;
618 
619 	for (i = 0; i < config->tx_fifo_num; i++) {
620 		struct fifo_info *fifo = &mac_control->fifos[i];
621 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
622 		int fifo_len = tx_cfg->fifo_len;
623 		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
624 
625 		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
626 		if (!fifo->list_info) {
627 			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
628 			return -ENOMEM;
629 		}
630 		mem_allocated += list_holder_size;
631 	}
632 	for (i = 0; i < config->tx_fifo_num; i++) {
633 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
634 						lst_per_page);
635 		struct fifo_info *fifo = &mac_control->fifos[i];
636 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
637 
638 		fifo->tx_curr_put_info.offset = 0;
639 		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
640 		fifo->tx_curr_get_info.offset = 0;
641 		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
642 		fifo->fifo_no = i;
643 		fifo->nic = nic;
644 		fifo->max_txds = MAX_SKB_FRAGS + 2;
645 		fifo->dev = dev;
646 
647 		for (j = 0; j < page_num; j++) {
648 			int k = 0;
649 			dma_addr_t tmp_p;
650 			void *tmp_v;
651 			tmp_v = pci_alloc_consistent(nic->pdev,
652 						     PAGE_SIZE, &tmp_p);
653 			if (!tmp_v) {
654 				DBG_PRINT(INFO_DBG,
655 					  "pci_alloc_consistent failed for TxDL\n");
656 				return -ENOMEM;
657 			}
658 			/* If we got a zero DMA address(can happen on
659 			 * certain platforms like PPC), reallocate.
660 			 * Store virtual address of page we don't want,
661 			 * to be freed later.
662 			 */
663 			if (!tmp_p) {
664 				mac_control->zerodma_virt_addr = tmp_v;
665 				DBG_PRINT(INIT_DBG,
666 					  "%s: Zero DMA address for TxDL. "
667 					  "Virtual address %p\n",
668 					  dev->name, tmp_v);
669 				tmp_v = pci_alloc_consistent(nic->pdev,
670 							     PAGE_SIZE, &tmp_p);
671 				if (!tmp_v) {
672 					DBG_PRINT(INFO_DBG,
673 						  "pci_alloc_consistent failed for TxDL\n");
674 					return -ENOMEM;
675 				}
676 				mem_allocated += PAGE_SIZE;
677 			}
678 			while (k < lst_per_page) {
679 				int l = (j * lst_per_page) + k;
680 				if (l == tx_cfg->fifo_len)
681 					break;
682 				fifo->list_info[l].list_virt_addr =
683 					tmp_v + (k * lst_size);
684 				fifo->list_info[l].list_phy_addr =
685 					tmp_p + (k * lst_size);
686 				k++;
687 			}
688 		}
689 	}
690 
691 	for (i = 0; i < config->tx_fifo_num; i++) {
692 		struct fifo_info *fifo = &mac_control->fifos[i];
693 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
694 
695 		size = tx_cfg->fifo_len;
696 		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
697 		if (!fifo->ufo_in_band_v)
698 			return -ENOMEM;
699 		mem_allocated += (size * sizeof(u64));
700 	}
701 
702 	/* Allocation and initialization of RXDs in Rings */
703 	size = 0;
704 	for (i = 0; i < config->rx_ring_num; i++) {
705 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
706 		struct ring_info *ring = &mac_control->rings[i];
707 
708 		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
709 			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
710 				  "multiple of RxDs per Block\n",
711 				  dev->name, i);
712 			return FAILURE;
713 		}
714 		size += rx_cfg->num_rxd;
715 		ring->block_count = rx_cfg->num_rxd /
716 			(rxd_count[nic->rxd_mode] + 1);
717 		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
718 	}
719 	if (nic->rxd_mode == RXD_MODE_1)
720 		size = (size * (sizeof(struct RxD1)));
721 	else
722 		size = (size * (sizeof(struct RxD3)));
723 
724 	for (i = 0; i < config->rx_ring_num; i++) {
725 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
726 		struct ring_info *ring = &mac_control->rings[i];
727 
728 		ring->rx_curr_get_info.block_index = 0;
729 		ring->rx_curr_get_info.offset = 0;
730 		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
731 		ring->rx_curr_put_info.block_index = 0;
732 		ring->rx_curr_put_info.offset = 0;
733 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
734 		ring->nic = nic;
735 		ring->ring_no = i;
736 
737 		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
738 		/*  Allocating all the Rx blocks */
739 		for (j = 0; j < blk_cnt; j++) {
740 			struct rx_block_info *rx_blocks;
741 			int l;
742 
743 			rx_blocks = &ring->rx_blocks[j];
744 			size = SIZE_OF_BLOCK;	/* size is always page size */
745 			tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
746 							  &tmp_p_addr);
747 			if (tmp_v_addr == NULL) {
748 				/*
749 				 * In case of failure, free_shared_mem()
750 				 * is called, which should free any
751 				 * memory that was alloced till the
752 				 * failure happened.
753 				 */
754 				rx_blocks->block_virt_addr = tmp_v_addr;
755 				return -ENOMEM;
756 			}
757 			mem_allocated += size;
758 			memset(tmp_v_addr, 0, size);
759 
760 			size = sizeof(struct rxd_info) *
761 				rxd_count[nic->rxd_mode];
762 			rx_blocks->block_virt_addr = tmp_v_addr;
763 			rx_blocks->block_dma_addr = tmp_p_addr;
764 			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
765 			if (!rx_blocks->rxds)
766 				return -ENOMEM;
767 			mem_allocated += size;
768 			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
769 				rx_blocks->rxds[l].virt_addr =
770 					rx_blocks->block_virt_addr +
771 					(rxd_size[nic->rxd_mode] * l);
772 				rx_blocks->rxds[l].dma_addr =
773 					rx_blocks->block_dma_addr +
774 					(rxd_size[nic->rxd_mode] * l);
775 			}
776 		}
777 		/* Interlinking all Rx Blocks */
778 		for (j = 0; j < blk_cnt; j++) {
779 			int next = (j + 1) % blk_cnt;
780 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
781 			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
782 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
783 			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
784 
785 			pre_rxd_blk = tmp_v_addr;
786 			pre_rxd_blk->reserved_2_pNext_RxD_block =
787 				(unsigned long)tmp_v_addr_next;
788 			pre_rxd_blk->pNext_RxD_Blk_physical =
789 				(u64)tmp_p_addr_next;
790 		}
791 	}
792 	if (nic->rxd_mode == RXD_MODE_3B) {
793 		/*
794 		 * Allocation of Storages for buffer addresses in 2BUFF mode
795 		 * and the buffers as well.
796 		 */
797 		for (i = 0; i < config->rx_ring_num; i++) {
798 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
799 			struct ring_info *ring = &mac_control->rings[i];
800 
801 			blk_cnt = rx_cfg->num_rxd /
802 				(rxd_count[nic->rxd_mode] + 1);
803 			size = sizeof(struct buffAdd *) * blk_cnt;
804 			ring->ba = kmalloc(size, GFP_KERNEL);
805 			if (!ring->ba)
806 				return -ENOMEM;
807 			mem_allocated += size;
808 			for (j = 0; j < blk_cnt; j++) {
809 				int k = 0;
810 
811 				size = sizeof(struct buffAdd) *
812 					(rxd_count[nic->rxd_mode] + 1);
813 				ring->ba[j] = kmalloc(size, GFP_KERNEL);
814 				if (!ring->ba[j])
815 					return -ENOMEM;
816 				mem_allocated += size;
817 				while (k != rxd_count[nic->rxd_mode]) {
818 					ba = &ring->ba[j][k];
819 					size = BUF0_LEN + ALIGN_SIZE;
820 					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
821 					if (!ba->ba_0_org)
822 						return -ENOMEM;
823 					mem_allocated += size;
824 					tmp = (unsigned long)ba->ba_0_org;
825 					tmp += ALIGN_SIZE;
826 					tmp &= ~((unsigned long)ALIGN_SIZE);
827 					ba->ba_0 = (void *)tmp;
828 
829 					size = BUF1_LEN + ALIGN_SIZE;
830 					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
831 					if (!ba->ba_1_org)
832 						return -ENOMEM;
833 					mem_allocated += size;
834 					tmp = (unsigned long)ba->ba_1_org;
835 					tmp += ALIGN_SIZE;
836 					tmp &= ~((unsigned long)ALIGN_SIZE);
837 					ba->ba_1 = (void *)tmp;
838 					k++;
839 				}
840 			}
841 		}
842 	}
843 
844 	/* Allocation and initialization of Statistics block */
845 	size = sizeof(struct stat_block);
846 	mac_control->stats_mem =
847 		pci_alloc_consistent(nic->pdev, size,
848 				     &mac_control->stats_mem_phy);
849 
850 	if (!mac_control->stats_mem) {
851 		/*
852 		 * In case of failure, free_shared_mem() is called, which
853 		 * should free any memory that was alloced till the
854 		 * failure happened.
855 		 */
856 		return -ENOMEM;
857 	}
858 	mem_allocated += size;
859 	mac_control->stats_mem_sz = size;
860 
861 	tmp_v_addr = mac_control->stats_mem;
862 	mac_control->stats_info = tmp_v_addr;
863 	memset(tmp_v_addr, 0, size);
864 	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
865 		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
866 	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
867 	return SUCCESS;
868 }
869 
870 /**
871  * free_shared_mem - Free the allocated Memory
872  * @nic:  Device private variable.
873  * Description: This function is to free all memory locations allocated by
874  * the init_shared_mem() function and return it to the kernel.
875  */
876 
877 static void free_shared_mem(struct s2io_nic *nic)
878 {
879 	int i, j, blk_cnt, size;
880 	void *tmp_v_addr;
881 	dma_addr_t tmp_p_addr;
882 	int lst_size, lst_per_page;
883 	struct net_device *dev;
884 	int page_num = 0;
885 	struct config_param *config;
886 	struct mac_info *mac_control;
887 	struct stat_block *stats;
888 	struct swStat *swstats;
889 
890 	if (!nic)
891 		return;
892 
893 	dev = nic->dev;
894 
895 	config = &nic->config;
896 	mac_control = &nic->mac_control;
897 	stats = mac_control->stats_info;
898 	swstats = &stats->sw_stat;
899 
900 	lst_size = sizeof(struct TxD) * config->max_txds;
901 	lst_per_page = PAGE_SIZE / lst_size;
902 
903 	for (i = 0; i < config->tx_fifo_num; i++) {
904 		struct fifo_info *fifo = &mac_control->fifos[i];
905 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
906 
907 		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
908 		for (j = 0; j < page_num; j++) {
909 			int mem_blks = (j * lst_per_page);
910 			struct list_info_hold *fli;
911 
912 			if (!fifo->list_info)
913 				return;
914 
915 			fli = &fifo->list_info[mem_blks];
916 			if (!fli->list_virt_addr)
917 				break;
918 			pci_free_consistent(nic->pdev, PAGE_SIZE,
919 					    fli->list_virt_addr,
920 					    fli->list_phy_addr);
921 			swstats->mem_freed += PAGE_SIZE;
922 		}
923 		/* If we got a zero DMA address during allocation,
924 		 * free the page now
925 		 */
926 		if (mac_control->zerodma_virt_addr) {
927 			pci_free_consistent(nic->pdev, PAGE_SIZE,
928 					    mac_control->zerodma_virt_addr,
929 					    (dma_addr_t)0);
930 			DBG_PRINT(INIT_DBG,
931 				  "%s: Freeing TxDL with zero DMA address. "
932 				  "Virtual address %p\n",
933 				  dev->name, mac_control->zerodma_virt_addr);
934 			swstats->mem_freed += PAGE_SIZE;
935 		}
936 		kfree(fifo->list_info);
937 		swstats->mem_freed += tx_cfg->fifo_len *
938 			sizeof(struct list_info_hold);
939 	}
940 
941 	size = SIZE_OF_BLOCK;
942 	for (i = 0; i < config->rx_ring_num; i++) {
943 		struct ring_info *ring = &mac_control->rings[i];
944 
945 		blk_cnt = ring->block_count;
946 		for (j = 0; j < blk_cnt; j++) {
947 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
948 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
949 			if (tmp_v_addr == NULL)
950 				break;
951 			pci_free_consistent(nic->pdev, size,
952 					    tmp_v_addr, tmp_p_addr);
953 			swstats->mem_freed += size;
954 			kfree(ring->rx_blocks[j].rxds);
955 			swstats->mem_freed += sizeof(struct rxd_info) *
956 				rxd_count[nic->rxd_mode];
957 		}
958 	}
959 
960 	if (nic->rxd_mode == RXD_MODE_3B) {
961 		/* Freeing buffer storage addresses in 2BUFF mode. */
962 		for (i = 0; i < config->rx_ring_num; i++) {
963 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
964 			struct ring_info *ring = &mac_control->rings[i];
965 
966 			blk_cnt = rx_cfg->num_rxd /
967 				(rxd_count[nic->rxd_mode] + 1);
968 			for (j = 0; j < blk_cnt; j++) {
969 				int k = 0;
970 				if (!ring->ba[j])
971 					continue;
972 				while (k != rxd_count[nic->rxd_mode]) {
973 					struct buffAdd *ba = &ring->ba[j][k];
974 					kfree(ba->ba_0_org);
975 					swstats->mem_freed +=
976 						BUF0_LEN + ALIGN_SIZE;
977 					kfree(ba->ba_1_org);
978 					swstats->mem_freed +=
979 						BUF1_LEN + ALIGN_SIZE;
980 					k++;
981 				}
982 				kfree(ring->ba[j]);
983 				swstats->mem_freed += sizeof(struct buffAdd) *
984 					(rxd_count[nic->rxd_mode] + 1);
985 			}
986 			kfree(ring->ba);
987 			swstats->mem_freed += sizeof(struct buffAdd *) *
988 				blk_cnt;
989 		}
990 	}
991 
992 	for (i = 0; i < nic->config.tx_fifo_num; i++) {
993 		struct fifo_info *fifo = &mac_control->fifos[i];
994 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
995 
996 		if (fifo->ufo_in_band_v) {
997 			swstats->mem_freed += tx_cfg->fifo_len *
998 				sizeof(u64);
999 			kfree(fifo->ufo_in_band_v);
1000 		}
1001 	}
1002 
1003 	if (mac_control->stats_mem) {
1004 		swstats->mem_freed += mac_control->stats_mem_sz;
1005 		pci_free_consistent(nic->pdev,
1006 				    mac_control->stats_mem_sz,
1007 				    mac_control->stats_mem,
1008 				    mac_control->stats_mem_phy);
1009 	}
1010 }
1011 
1012 /**
1013  * s2io_verify_pci_mode -
1014  */
1015 
1016 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1017 {
1018 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1019 	register u64 val64 = 0;
1020 	int     mode;
1021 
1022 	val64 = readq(&bar0->pci_mode);
1023 	mode = (u8)GET_PCI_MODE(val64);
1024 
1025 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1026 		return -1;      /* Unknown PCI mode */
1027 	return mode;
1028 }
1029 
1030 #define NEC_VENID   0x1033
1031 #define NEC_DEVID   0x0125
1032 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1033 {
1034 	struct pci_dev *tdev = NULL;
1035 	for_each_pci_dev(tdev) {
1036 		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1037 			if (tdev->bus == s2io_pdev->bus->parent) {
1038 				pci_dev_put(tdev);
1039 				return 1;
1040 			}
1041 		}
1042 	}
1043 	return 0;
1044 }
1045 
1046 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1047 /**
1048  * s2io_print_pci_mode -
1049  */
1050 static int s2io_print_pci_mode(struct s2io_nic *nic)
1051 {
1052 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1053 	register u64 val64 = 0;
1054 	int	mode;
1055 	struct config_param *config = &nic->config;
1056 	const char *pcimode;
1057 
1058 	val64 = readq(&bar0->pci_mode);
1059 	mode = (u8)GET_PCI_MODE(val64);
1060 
1061 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1062 		return -1;	/* Unknown PCI mode */
1063 
1064 	config->bus_speed = bus_speed[mode];
1065 
1066 	if (s2io_on_nec_bridge(nic->pdev)) {
1067 		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1068 			  nic->dev->name);
1069 		return mode;
1070 	}
1071 
1072 	switch (mode) {
1073 	case PCI_MODE_PCI_33:
1074 		pcimode = "33MHz PCI bus";
1075 		break;
1076 	case PCI_MODE_PCI_66:
1077 		pcimode = "66MHz PCI bus";
1078 		break;
1079 	case PCI_MODE_PCIX_M1_66:
1080 		pcimode = "66MHz PCIX(M1) bus";
1081 		break;
1082 	case PCI_MODE_PCIX_M1_100:
1083 		pcimode = "100MHz PCIX(M1) bus";
1084 		break;
1085 	case PCI_MODE_PCIX_M1_133:
1086 		pcimode = "133MHz PCIX(M1) bus";
1087 		break;
1088 	case PCI_MODE_PCIX_M2_66:
1089 		pcimode = "133MHz PCIX(M2) bus";
1090 		break;
1091 	case PCI_MODE_PCIX_M2_100:
1092 		pcimode = "200MHz PCIX(M2) bus";
1093 		break;
1094 	case PCI_MODE_PCIX_M2_133:
1095 		pcimode = "266MHz PCIX(M2) bus";
1096 		break;
1097 	default:
1098 		pcimode = "unsupported bus!";
1099 		mode = -1;
1100 	}
1101 
1102 	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1103 		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1104 
1105 	return mode;
1106 }
1107 
1108 /**
1109  *  init_tti - Initialization transmit traffic interrupt scheme
1110  *  @nic: device private variable
1111  *  @link: link status (UP/DOWN) used to enable/disable continuous
1112  *  transmit interrupts
1113  *  Description: The function configures transmit traffic interrupts
1114  *  Return Value:  SUCCESS on success and
1115  *  '-1' on failure
1116  */
1117 
1118 static int init_tti(struct s2io_nic *nic, int link)
1119 {
1120 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1121 	register u64 val64 = 0;
1122 	int i;
1123 	struct config_param *config = &nic->config;
1124 
1125 	for (i = 0; i < config->tx_fifo_num; i++) {
1126 		/*
1127 		 * TTI Initialization. Default Tx timer gets us about
1128 		 * 250 interrupts per sec. Continuous interrupts are enabled
1129 		 * by default.
1130 		 */
1131 		if (nic->device_type == XFRAME_II_DEVICE) {
1132 			int count = (nic->config.bus_speed * 125)/2;
1133 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1134 		} else
1135 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1136 
1137 		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1138 			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1139 			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1140 			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1141 		if (i == 0)
1142 			if (use_continuous_tx_intrs && (link == LINK_UP))
1143 				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1144 		writeq(val64, &bar0->tti_data1_mem);
1145 
1146 		if (nic->config.intr_type == MSI_X) {
1147 			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1148 				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1149 				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1150 				TTI_DATA2_MEM_TX_UFC_D(0x300);
1151 		} else {
1152 			if ((nic->config.tx_steering_type ==
1153 			     TX_DEFAULT_STEERING) &&
1154 			    (config->tx_fifo_num > 1) &&
1155 			    (i >= nic->udp_fifo_idx) &&
1156 			    (i < (nic->udp_fifo_idx +
1157 				  nic->total_udp_fifos)))
1158 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1159 					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1160 					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1161 					TTI_DATA2_MEM_TX_UFC_D(0x120);
1162 			else
1163 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1164 					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1165 					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1166 					TTI_DATA2_MEM_TX_UFC_D(0x80);
1167 		}
1168 
1169 		writeq(val64, &bar0->tti_data2_mem);
1170 
1171 		val64 = TTI_CMD_MEM_WE |
1172 			TTI_CMD_MEM_STROBE_NEW_CMD |
1173 			TTI_CMD_MEM_OFFSET(i);
1174 		writeq(val64, &bar0->tti_command_mem);
1175 
1176 		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1177 					  TTI_CMD_MEM_STROBE_NEW_CMD,
1178 					  S2IO_BIT_RESET) != SUCCESS)
1179 			return FAILURE;
1180 	}
1181 
1182 	return SUCCESS;
1183 }
1184 
1185 /**
1186  *  init_nic - Initialization of hardware
1187  *  @nic: device private variable
1188  *  Description: The function sequentially configures every block
1189  *  of the H/W from their reset values.
1190  *  Return Value:  SUCCESS on success and
1191  *  '-1' on failure (endian settings incorrect).
1192  */
1193 
1194 static int init_nic(struct s2io_nic *nic)
1195 {
1196 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1197 	struct net_device *dev = nic->dev;
1198 	register u64 val64 = 0;
1199 	void __iomem *add;
1200 	u32 time;
1201 	int i, j;
1202 	int dtx_cnt = 0;
1203 	unsigned long long mem_share;
1204 	int mem_size;
1205 	struct config_param *config = &nic->config;
1206 	struct mac_info *mac_control = &nic->mac_control;
1207 
1208 	/* to set the swapper controle on the card */
1209 	if (s2io_set_swapper(nic)) {
1210 		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1211 		return -EIO;
1212 	}
1213 
1214 	/*
1215 	 * Herc requires EOI to be removed from reset before XGXS, so..
1216 	 */
1217 	if (nic->device_type & XFRAME_II_DEVICE) {
1218 		val64 = 0xA500000000ULL;
1219 		writeq(val64, &bar0->sw_reset);
1220 		msleep(500);
1221 		val64 = readq(&bar0->sw_reset);
1222 	}
1223 
1224 	/* Remove XGXS from reset state */
1225 	val64 = 0;
1226 	writeq(val64, &bar0->sw_reset);
1227 	msleep(500);
1228 	val64 = readq(&bar0->sw_reset);
1229 
1230 	/* Ensure that it's safe to access registers by checking
1231 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1232 	 */
1233 	if (nic->device_type == XFRAME_II_DEVICE) {
1234 		for (i = 0; i < 50; i++) {
1235 			val64 = readq(&bar0->adapter_status);
1236 			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1237 				break;
1238 			msleep(10);
1239 		}
1240 		if (i == 50)
1241 			return -ENODEV;
1242 	}
1243 
1244 	/*  Enable Receiving broadcasts */
1245 	add = &bar0->mac_cfg;
1246 	val64 = readq(&bar0->mac_cfg);
1247 	val64 |= MAC_RMAC_BCAST_ENABLE;
1248 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1249 	writel((u32)val64, add);
1250 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1251 	writel((u32) (val64 >> 32), (add + 4));
1252 
1253 	/* Read registers in all blocks */
1254 	val64 = readq(&bar0->mac_int_mask);
1255 	val64 = readq(&bar0->mc_int_mask);
1256 	val64 = readq(&bar0->xgxs_int_mask);
1257 
1258 	/*  Set MTU */
1259 	val64 = dev->mtu;
1260 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1261 
1262 	if (nic->device_type & XFRAME_II_DEVICE) {
1263 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1264 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1265 					  &bar0->dtx_control, UF);
1266 			if (dtx_cnt & 0x1)
1267 				msleep(1); /* Necessary!! */
1268 			dtx_cnt++;
1269 		}
1270 	} else {
1271 		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1272 			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1273 					  &bar0->dtx_control, UF);
1274 			val64 = readq(&bar0->dtx_control);
1275 			dtx_cnt++;
1276 		}
1277 	}
1278 
1279 	/*  Tx DMA Initialization */
1280 	val64 = 0;
1281 	writeq(val64, &bar0->tx_fifo_partition_0);
1282 	writeq(val64, &bar0->tx_fifo_partition_1);
1283 	writeq(val64, &bar0->tx_fifo_partition_2);
1284 	writeq(val64, &bar0->tx_fifo_partition_3);
1285 
1286 	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1287 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1288 
1289 		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1290 			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1291 
1292 		if (i == (config->tx_fifo_num - 1)) {
1293 			if (i % 2 == 0)
1294 				i++;
1295 		}
1296 
1297 		switch (i) {
1298 		case 1:
1299 			writeq(val64, &bar0->tx_fifo_partition_0);
1300 			val64 = 0;
1301 			j = 0;
1302 			break;
1303 		case 3:
1304 			writeq(val64, &bar0->tx_fifo_partition_1);
1305 			val64 = 0;
1306 			j = 0;
1307 			break;
1308 		case 5:
1309 			writeq(val64, &bar0->tx_fifo_partition_2);
1310 			val64 = 0;
1311 			j = 0;
1312 			break;
1313 		case 7:
1314 			writeq(val64, &bar0->tx_fifo_partition_3);
1315 			val64 = 0;
1316 			j = 0;
1317 			break;
1318 		default:
1319 			j++;
1320 			break;
1321 		}
1322 	}
1323 
1324 	/*
1325 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1326 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1327 	 */
1328 	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1329 		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1330 
1331 	val64 = readq(&bar0->tx_fifo_partition_0);
1332 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1333 		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1334 
1335 	/*
1336 	 * Initialization of Tx_PA_CONFIG register to ignore packet
1337 	 * integrity checking.
1338 	 */
1339 	val64 = readq(&bar0->tx_pa_cfg);
1340 	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1341 		TX_PA_CFG_IGNORE_SNAP_OUI |
1342 		TX_PA_CFG_IGNORE_LLC_CTRL |
1343 		TX_PA_CFG_IGNORE_L2_ERR;
1344 	writeq(val64, &bar0->tx_pa_cfg);
1345 
1346 	/* Rx DMA initialization. */
1347 	val64 = 0;
1348 	for (i = 0; i < config->rx_ring_num; i++) {
1349 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1350 
1351 		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1352 	}
1353 	writeq(val64, &bar0->rx_queue_priority);
1354 
1355 	/*
1356 	 * Allocating equal share of memory to all the
1357 	 * configured Rings.
1358 	 */
1359 	val64 = 0;
1360 	if (nic->device_type & XFRAME_II_DEVICE)
1361 		mem_size = 32;
1362 	else
1363 		mem_size = 64;
1364 
1365 	for (i = 0; i < config->rx_ring_num; i++) {
1366 		switch (i) {
1367 		case 0:
1368 			mem_share = (mem_size / config->rx_ring_num +
1369 				     mem_size % config->rx_ring_num);
1370 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1371 			continue;
1372 		case 1:
1373 			mem_share = (mem_size / config->rx_ring_num);
1374 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1375 			continue;
1376 		case 2:
1377 			mem_share = (mem_size / config->rx_ring_num);
1378 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1379 			continue;
1380 		case 3:
1381 			mem_share = (mem_size / config->rx_ring_num);
1382 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1383 			continue;
1384 		case 4:
1385 			mem_share = (mem_size / config->rx_ring_num);
1386 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1387 			continue;
1388 		case 5:
1389 			mem_share = (mem_size / config->rx_ring_num);
1390 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1391 			continue;
1392 		case 6:
1393 			mem_share = (mem_size / config->rx_ring_num);
1394 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1395 			continue;
1396 		case 7:
1397 			mem_share = (mem_size / config->rx_ring_num);
1398 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1399 			continue;
1400 		}
1401 	}
1402 	writeq(val64, &bar0->rx_queue_cfg);
1403 
1404 	/*
1405 	 * Filling Tx round robin registers
1406 	 * as per the number of FIFOs for equal scheduling priority
1407 	 */
1408 	switch (config->tx_fifo_num) {
1409 	case 1:
1410 		val64 = 0x0;
1411 		writeq(val64, &bar0->tx_w_round_robin_0);
1412 		writeq(val64, &bar0->tx_w_round_robin_1);
1413 		writeq(val64, &bar0->tx_w_round_robin_2);
1414 		writeq(val64, &bar0->tx_w_round_robin_3);
1415 		writeq(val64, &bar0->tx_w_round_robin_4);
1416 		break;
1417 	case 2:
1418 		val64 = 0x0001000100010001ULL;
1419 		writeq(val64, &bar0->tx_w_round_robin_0);
1420 		writeq(val64, &bar0->tx_w_round_robin_1);
1421 		writeq(val64, &bar0->tx_w_round_robin_2);
1422 		writeq(val64, &bar0->tx_w_round_robin_3);
1423 		val64 = 0x0001000100000000ULL;
1424 		writeq(val64, &bar0->tx_w_round_robin_4);
1425 		break;
1426 	case 3:
1427 		val64 = 0x0001020001020001ULL;
1428 		writeq(val64, &bar0->tx_w_round_robin_0);
1429 		val64 = 0x0200010200010200ULL;
1430 		writeq(val64, &bar0->tx_w_round_robin_1);
1431 		val64 = 0x0102000102000102ULL;
1432 		writeq(val64, &bar0->tx_w_round_robin_2);
1433 		val64 = 0x0001020001020001ULL;
1434 		writeq(val64, &bar0->tx_w_round_robin_3);
1435 		val64 = 0x0200010200000000ULL;
1436 		writeq(val64, &bar0->tx_w_round_robin_4);
1437 		break;
1438 	case 4:
1439 		val64 = 0x0001020300010203ULL;
1440 		writeq(val64, &bar0->tx_w_round_robin_0);
1441 		writeq(val64, &bar0->tx_w_round_robin_1);
1442 		writeq(val64, &bar0->tx_w_round_robin_2);
1443 		writeq(val64, &bar0->tx_w_round_robin_3);
1444 		val64 = 0x0001020300000000ULL;
1445 		writeq(val64, &bar0->tx_w_round_robin_4);
1446 		break;
1447 	case 5:
1448 		val64 = 0x0001020304000102ULL;
1449 		writeq(val64, &bar0->tx_w_round_robin_0);
1450 		val64 = 0x0304000102030400ULL;
1451 		writeq(val64, &bar0->tx_w_round_robin_1);
1452 		val64 = 0x0102030400010203ULL;
1453 		writeq(val64, &bar0->tx_w_round_robin_2);
1454 		val64 = 0x0400010203040001ULL;
1455 		writeq(val64, &bar0->tx_w_round_robin_3);
1456 		val64 = 0x0203040000000000ULL;
1457 		writeq(val64, &bar0->tx_w_round_robin_4);
1458 		break;
1459 	case 6:
1460 		val64 = 0x0001020304050001ULL;
1461 		writeq(val64, &bar0->tx_w_round_robin_0);
1462 		val64 = 0x0203040500010203ULL;
1463 		writeq(val64, &bar0->tx_w_round_robin_1);
1464 		val64 = 0x0405000102030405ULL;
1465 		writeq(val64, &bar0->tx_w_round_robin_2);
1466 		val64 = 0x0001020304050001ULL;
1467 		writeq(val64, &bar0->tx_w_round_robin_3);
1468 		val64 = 0x0203040500000000ULL;
1469 		writeq(val64, &bar0->tx_w_round_robin_4);
1470 		break;
1471 	case 7:
1472 		val64 = 0x0001020304050600ULL;
1473 		writeq(val64, &bar0->tx_w_round_robin_0);
1474 		val64 = 0x0102030405060001ULL;
1475 		writeq(val64, &bar0->tx_w_round_robin_1);
1476 		val64 = 0x0203040506000102ULL;
1477 		writeq(val64, &bar0->tx_w_round_robin_2);
1478 		val64 = 0x0304050600010203ULL;
1479 		writeq(val64, &bar0->tx_w_round_robin_3);
1480 		val64 = 0x0405060000000000ULL;
1481 		writeq(val64, &bar0->tx_w_round_robin_4);
1482 		break;
1483 	case 8:
1484 		val64 = 0x0001020304050607ULL;
1485 		writeq(val64, &bar0->tx_w_round_robin_0);
1486 		writeq(val64, &bar0->tx_w_round_robin_1);
1487 		writeq(val64, &bar0->tx_w_round_robin_2);
1488 		writeq(val64, &bar0->tx_w_round_robin_3);
1489 		val64 = 0x0001020300000000ULL;
1490 		writeq(val64, &bar0->tx_w_round_robin_4);
1491 		break;
1492 	}
1493 
1494 	/* Enable all configured Tx FIFO partitions */
1495 	val64 = readq(&bar0->tx_fifo_partition_0);
1496 	val64 |= (TX_FIFO_PARTITION_EN);
1497 	writeq(val64, &bar0->tx_fifo_partition_0);
1498 
1499 	/* Filling the Rx round robin registers as per the
1500 	 * number of Rings and steering based on QoS with
1501 	 * equal priority.
1502 	 */
1503 	switch (config->rx_ring_num) {
1504 	case 1:
1505 		val64 = 0x0;
1506 		writeq(val64, &bar0->rx_w_round_robin_0);
1507 		writeq(val64, &bar0->rx_w_round_robin_1);
1508 		writeq(val64, &bar0->rx_w_round_robin_2);
1509 		writeq(val64, &bar0->rx_w_round_robin_3);
1510 		writeq(val64, &bar0->rx_w_round_robin_4);
1511 
1512 		val64 = 0x8080808080808080ULL;
1513 		writeq(val64, &bar0->rts_qos_steering);
1514 		break;
1515 	case 2:
1516 		val64 = 0x0001000100010001ULL;
1517 		writeq(val64, &bar0->rx_w_round_robin_0);
1518 		writeq(val64, &bar0->rx_w_round_robin_1);
1519 		writeq(val64, &bar0->rx_w_round_robin_2);
1520 		writeq(val64, &bar0->rx_w_round_robin_3);
1521 		val64 = 0x0001000100000000ULL;
1522 		writeq(val64, &bar0->rx_w_round_robin_4);
1523 
1524 		val64 = 0x8080808040404040ULL;
1525 		writeq(val64, &bar0->rts_qos_steering);
1526 		break;
1527 	case 3:
1528 		val64 = 0x0001020001020001ULL;
1529 		writeq(val64, &bar0->rx_w_round_robin_0);
1530 		val64 = 0x0200010200010200ULL;
1531 		writeq(val64, &bar0->rx_w_round_robin_1);
1532 		val64 = 0x0102000102000102ULL;
1533 		writeq(val64, &bar0->rx_w_round_robin_2);
1534 		val64 = 0x0001020001020001ULL;
1535 		writeq(val64, &bar0->rx_w_round_robin_3);
1536 		val64 = 0x0200010200000000ULL;
1537 		writeq(val64, &bar0->rx_w_round_robin_4);
1538 
1539 		val64 = 0x8080804040402020ULL;
1540 		writeq(val64, &bar0->rts_qos_steering);
1541 		break;
1542 	case 4:
1543 		val64 = 0x0001020300010203ULL;
1544 		writeq(val64, &bar0->rx_w_round_robin_0);
1545 		writeq(val64, &bar0->rx_w_round_robin_1);
1546 		writeq(val64, &bar0->rx_w_round_robin_2);
1547 		writeq(val64, &bar0->rx_w_round_robin_3);
1548 		val64 = 0x0001020300000000ULL;
1549 		writeq(val64, &bar0->rx_w_round_robin_4);
1550 
1551 		val64 = 0x8080404020201010ULL;
1552 		writeq(val64, &bar0->rts_qos_steering);
1553 		break;
1554 	case 5:
1555 		val64 = 0x0001020304000102ULL;
1556 		writeq(val64, &bar0->rx_w_round_robin_0);
1557 		val64 = 0x0304000102030400ULL;
1558 		writeq(val64, &bar0->rx_w_round_robin_1);
1559 		val64 = 0x0102030400010203ULL;
1560 		writeq(val64, &bar0->rx_w_round_robin_2);
1561 		val64 = 0x0400010203040001ULL;
1562 		writeq(val64, &bar0->rx_w_round_robin_3);
1563 		val64 = 0x0203040000000000ULL;
1564 		writeq(val64, &bar0->rx_w_round_robin_4);
1565 
1566 		val64 = 0x8080404020201008ULL;
1567 		writeq(val64, &bar0->rts_qos_steering);
1568 		break;
1569 	case 6:
1570 		val64 = 0x0001020304050001ULL;
1571 		writeq(val64, &bar0->rx_w_round_robin_0);
1572 		val64 = 0x0203040500010203ULL;
1573 		writeq(val64, &bar0->rx_w_round_robin_1);
1574 		val64 = 0x0405000102030405ULL;
1575 		writeq(val64, &bar0->rx_w_round_robin_2);
1576 		val64 = 0x0001020304050001ULL;
1577 		writeq(val64, &bar0->rx_w_round_robin_3);
1578 		val64 = 0x0203040500000000ULL;
1579 		writeq(val64, &bar0->rx_w_round_robin_4);
1580 
1581 		val64 = 0x8080404020100804ULL;
1582 		writeq(val64, &bar0->rts_qos_steering);
1583 		break;
1584 	case 7:
1585 		val64 = 0x0001020304050600ULL;
1586 		writeq(val64, &bar0->rx_w_round_robin_0);
1587 		val64 = 0x0102030405060001ULL;
1588 		writeq(val64, &bar0->rx_w_round_robin_1);
1589 		val64 = 0x0203040506000102ULL;
1590 		writeq(val64, &bar0->rx_w_round_robin_2);
1591 		val64 = 0x0304050600010203ULL;
1592 		writeq(val64, &bar0->rx_w_round_robin_3);
1593 		val64 = 0x0405060000000000ULL;
1594 		writeq(val64, &bar0->rx_w_round_robin_4);
1595 
1596 		val64 = 0x8080402010080402ULL;
1597 		writeq(val64, &bar0->rts_qos_steering);
1598 		break;
1599 	case 8:
1600 		val64 = 0x0001020304050607ULL;
1601 		writeq(val64, &bar0->rx_w_round_robin_0);
1602 		writeq(val64, &bar0->rx_w_round_robin_1);
1603 		writeq(val64, &bar0->rx_w_round_robin_2);
1604 		writeq(val64, &bar0->rx_w_round_robin_3);
1605 		val64 = 0x0001020300000000ULL;
1606 		writeq(val64, &bar0->rx_w_round_robin_4);
1607 
1608 		val64 = 0x8040201008040201ULL;
1609 		writeq(val64, &bar0->rts_qos_steering);
1610 		break;
1611 	}
1612 
1613 	/* UDP Fix */
1614 	val64 = 0;
1615 	for (i = 0; i < 8; i++)
1616 		writeq(val64, &bar0->rts_frm_len_n[i]);
1617 
1618 	/* Set the default rts frame length for the rings configured */
1619 	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1620 	for (i = 0 ; i < config->rx_ring_num ; i++)
1621 		writeq(val64, &bar0->rts_frm_len_n[i]);
1622 
1623 	/* Set the frame length for the configured rings
1624 	 * desired by the user
1625 	 */
1626 	for (i = 0; i < config->rx_ring_num; i++) {
1627 		/* If rts_frm_len[i] == 0 then it is assumed that user not
1628 		 * specified frame length steering.
1629 		 * If the user provides the frame length then program
1630 		 * the rts_frm_len register for those values or else
1631 		 * leave it as it is.
1632 		 */
1633 		if (rts_frm_len[i] != 0) {
1634 			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1635 			       &bar0->rts_frm_len_n[i]);
1636 		}
1637 	}
1638 
1639 	/* Disable differentiated services steering logic */
1640 	for (i = 0; i < 64; i++) {
1641 		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1642 			DBG_PRINT(ERR_DBG,
1643 				  "%s: rts_ds_steer failed on codepoint %d\n",
1644 				  dev->name, i);
1645 			return -ENODEV;
1646 		}
1647 	}
1648 
1649 	/* Program statistics memory */
1650 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1651 
1652 	if (nic->device_type == XFRAME_II_DEVICE) {
1653 		val64 = STAT_BC(0x320);
1654 		writeq(val64, &bar0->stat_byte_cnt);
1655 	}
1656 
1657 	/*
1658 	 * Initializing the sampling rate for the device to calculate the
1659 	 * bandwidth utilization.
1660 	 */
1661 	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1662 		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1663 	writeq(val64, &bar0->mac_link_util);
1664 
1665 	/*
1666 	 * Initializing the Transmit and Receive Traffic Interrupt
1667 	 * Scheme.
1668 	 */
1669 
1670 	/* Initialize TTI */
1671 	if (SUCCESS != init_tti(nic, nic->last_link_state))
1672 		return -ENODEV;
1673 
1674 	/* RTI Initialization */
1675 	if (nic->device_type == XFRAME_II_DEVICE) {
1676 		/*
1677 		 * Programmed to generate Apprx 500 Intrs per
1678 		 * second
1679 		 */
1680 		int count = (nic->config.bus_speed * 125)/4;
1681 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1682 	} else
1683 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1684 	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1685 		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1686 		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1687 		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1688 
1689 	writeq(val64, &bar0->rti_data1_mem);
1690 
1691 	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1692 		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1693 	if (nic->config.intr_type == MSI_X)
1694 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1695 			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1696 	else
1697 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1698 			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1699 	writeq(val64, &bar0->rti_data2_mem);
1700 
1701 	for (i = 0; i < config->rx_ring_num; i++) {
1702 		val64 = RTI_CMD_MEM_WE |
1703 			RTI_CMD_MEM_STROBE_NEW_CMD |
1704 			RTI_CMD_MEM_OFFSET(i);
1705 		writeq(val64, &bar0->rti_command_mem);
1706 
1707 		/*
1708 		 * Once the operation completes, the Strobe bit of the
1709 		 * command register will be reset. We poll for this
1710 		 * particular condition. We wait for a maximum of 500ms
1711 		 * for the operation to complete, if it's not complete
1712 		 * by then we return error.
1713 		 */
1714 		time = 0;
1715 		while (true) {
1716 			val64 = readq(&bar0->rti_command_mem);
1717 			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1718 				break;
1719 
1720 			if (time > 10) {
1721 				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1722 					  dev->name);
1723 				return -ENODEV;
1724 			}
1725 			time++;
1726 			msleep(50);
1727 		}
1728 	}
1729 
1730 	/*
1731 	 * Initializing proper values as Pause threshold into all
1732 	 * the 8 Queues on Rx side.
1733 	 */
1734 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1735 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1736 
1737 	/* Disable RMAC PAD STRIPPING */
1738 	add = &bar0->mac_cfg;
1739 	val64 = readq(&bar0->mac_cfg);
1740 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1741 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1742 	writel((u32) (val64), add);
1743 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1744 	writel((u32) (val64 >> 32), (add + 4));
1745 	val64 = readq(&bar0->mac_cfg);
1746 
1747 	/* Enable FCS stripping by adapter */
1748 	add = &bar0->mac_cfg;
1749 	val64 = readq(&bar0->mac_cfg);
1750 	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1751 	if (nic->device_type == XFRAME_II_DEVICE)
1752 		writeq(val64, &bar0->mac_cfg);
1753 	else {
1754 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1755 		writel((u32) (val64), add);
1756 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1757 		writel((u32) (val64 >> 32), (add + 4));
1758 	}
1759 
1760 	/*
1761 	 * Set the time value to be inserted in the pause frame
1762 	 * generated by xena.
1763 	 */
1764 	val64 = readq(&bar0->rmac_pause_cfg);
1765 	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1766 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1767 	writeq(val64, &bar0->rmac_pause_cfg);
1768 
1769 	/*
1770 	 * Set the Threshold Limit for Generating the pause frame
1771 	 * If the amount of data in any Queue exceeds ratio of
1772 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1773 	 * pause frame is generated
1774 	 */
1775 	val64 = 0;
1776 	for (i = 0; i < 4; i++) {
1777 		val64 |= (((u64)0xFF00 |
1778 			   nic->mac_control.mc_pause_threshold_q0q3)
1779 			  << (i * 2 * 8));
1780 	}
1781 	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1782 
1783 	val64 = 0;
1784 	for (i = 0; i < 4; i++) {
1785 		val64 |= (((u64)0xFF00 |
1786 			   nic->mac_control.mc_pause_threshold_q4q7)
1787 			  << (i * 2 * 8));
1788 	}
1789 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1790 
1791 	/*
1792 	 * TxDMA will stop Read request if the number of read split has
1793 	 * exceeded the limit pointed by shared_splits
1794 	 */
1795 	val64 = readq(&bar0->pic_control);
1796 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1797 	writeq(val64, &bar0->pic_control);
1798 
1799 	if (nic->config.bus_speed == 266) {
1800 		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1801 		writeq(0x0, &bar0->read_retry_delay);
1802 		writeq(0x0, &bar0->write_retry_delay);
1803 	}
1804 
1805 	/*
1806 	 * Programming the Herc to split every write transaction
1807 	 * that does not start on an ADB to reduce disconnects.
1808 	 */
1809 	if (nic->device_type == XFRAME_II_DEVICE) {
1810 		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1811 			MISC_LINK_STABILITY_PRD(3);
1812 		writeq(val64, &bar0->misc_control);
1813 		val64 = readq(&bar0->pic_control2);
1814 		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1815 		writeq(val64, &bar0->pic_control2);
1816 	}
1817 	if (strstr(nic->product_name, "CX4")) {
1818 		val64 = TMAC_AVG_IPG(0x17);
1819 		writeq(val64, &bar0->tmac_avg_ipg);
1820 	}
1821 
1822 	return SUCCESS;
1823 }
1824 #define LINK_UP_DOWN_INTERRUPT		1
1825 #define MAC_RMAC_ERR_TIMER		2
1826 
1827 static int s2io_link_fault_indication(struct s2io_nic *nic)
1828 {
1829 	if (nic->device_type == XFRAME_II_DEVICE)
1830 		return LINK_UP_DOWN_INTERRUPT;
1831 	else
1832 		return MAC_RMAC_ERR_TIMER;
1833 }
1834 
1835 /**
1836  *  do_s2io_write_bits -  update alarm bits in alarm register
1837  *  @value: alarm bits
1838  *  @flag: interrupt status
1839  *  @addr: address value
1840  *  Description: update alarm bits in alarm register
1841  *  Return Value:
1842  *  NONE.
1843  */
1844 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1845 {
1846 	u64 temp64;
1847 
1848 	temp64 = readq(addr);
1849 
1850 	if (flag == ENABLE_INTRS)
1851 		temp64 &= ~((u64)value);
1852 	else
1853 		temp64 |= ((u64)value);
1854 	writeq(temp64, addr);
1855 }
1856 
1857 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1858 {
1859 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1860 	register u64 gen_int_mask = 0;
1861 	u64 interruptible;
1862 
1863 	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1864 	if (mask & TX_DMA_INTR) {
1865 		gen_int_mask |= TXDMA_INT_M;
1866 
1867 		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1868 				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1869 				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1870 				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1871 
1872 		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1873 				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1874 				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1875 				   &bar0->pfc_err_mask);
1876 
1877 		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1878 				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1879 				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1880 
1881 		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1882 				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1883 				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1884 				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1885 				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1886 				   PCC_TXB_ECC_SG_ERR,
1887 				   flag, &bar0->pcc_err_mask);
1888 
1889 		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1890 				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1891 
1892 		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1893 				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1894 				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1895 				   flag, &bar0->lso_err_mask);
1896 
1897 		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1898 				   flag, &bar0->tpa_err_mask);
1899 
1900 		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1901 	}
1902 
1903 	if (mask & TX_MAC_INTR) {
1904 		gen_int_mask |= TXMAC_INT_M;
1905 		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1906 				   &bar0->mac_int_mask);
1907 		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1908 				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1909 				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1910 				   flag, &bar0->mac_tmac_err_mask);
1911 	}
1912 
1913 	if (mask & TX_XGXS_INTR) {
1914 		gen_int_mask |= TXXGXS_INT_M;
1915 		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1916 				   &bar0->xgxs_int_mask);
1917 		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1918 				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1919 				   flag, &bar0->xgxs_txgxs_err_mask);
1920 	}
1921 
1922 	if (mask & RX_DMA_INTR) {
1923 		gen_int_mask |= RXDMA_INT_M;
1924 		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1925 				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1926 				   flag, &bar0->rxdma_int_mask);
1927 		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1928 				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1929 				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1930 				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1931 		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1932 				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1933 				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1934 				   &bar0->prc_pcix_err_mask);
1935 		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1936 				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1937 				   &bar0->rpa_err_mask);
1938 		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1939 				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1940 				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1941 				   RDA_FRM_ECC_SG_ERR |
1942 				   RDA_MISC_ERR|RDA_PCIX_ERR,
1943 				   flag, &bar0->rda_err_mask);
1944 		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1945 				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1946 				   flag, &bar0->rti_err_mask);
1947 	}
1948 
1949 	if (mask & RX_MAC_INTR) {
1950 		gen_int_mask |= RXMAC_INT_M;
1951 		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1952 				   &bar0->mac_int_mask);
1953 		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1954 				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1955 				 RMAC_DOUBLE_ECC_ERR);
1956 		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1957 			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1958 		do_s2io_write_bits(interruptible,
1959 				   flag, &bar0->mac_rmac_err_mask);
1960 	}
1961 
1962 	if (mask & RX_XGXS_INTR) {
1963 		gen_int_mask |= RXXGXS_INT_M;
1964 		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1965 				   &bar0->xgxs_int_mask);
1966 		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1967 				   &bar0->xgxs_rxgxs_err_mask);
1968 	}
1969 
1970 	if (mask & MC_INTR) {
1971 		gen_int_mask |= MC_INT_M;
1972 		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1973 				   flag, &bar0->mc_int_mask);
1974 		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1975 				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1976 				   &bar0->mc_err_mask);
1977 	}
1978 	nic->general_int_mask = gen_int_mask;
1979 
1980 	/* Remove this line when alarm interrupts are enabled */
1981 	nic->general_int_mask = 0;
1982 }
1983 
1984 /**
1985  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1986  *  @nic: device private variable,
1987  *  @mask: A mask indicating which Intr block must be modified and,
1988  *  @flag: A flag indicating whether to enable or disable the Intrs.
1989  *  Description: This function will either disable or enable the interrupts
1990  *  depending on the flag argument. The mask argument can be used to
1991  *  enable/disable any Intr block.
1992  *  Return Value: NONE.
1993  */
1994 
1995 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1996 {
1997 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1998 	register u64 temp64 = 0, intr_mask = 0;
1999 
2000 	intr_mask = nic->general_int_mask;
2001 
2002 	/*  Top level interrupt classification */
2003 	/*  PIC Interrupts */
2004 	if (mask & TX_PIC_INTR) {
2005 		/*  Enable PIC Intrs in the general intr mask register */
2006 		intr_mask |= TXPIC_INT_M;
2007 		if (flag == ENABLE_INTRS) {
2008 			/*
2009 			 * If Hercules adapter enable GPIO otherwise
2010 			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2011 			 * interrupts for now.
2012 			 * TODO
2013 			 */
2014 			if (s2io_link_fault_indication(nic) ==
2015 			    LINK_UP_DOWN_INTERRUPT) {
2016 				do_s2io_write_bits(PIC_INT_GPIO, flag,
2017 						   &bar0->pic_int_mask);
2018 				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2019 						   &bar0->gpio_int_mask);
2020 			} else
2021 				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2022 		} else if (flag == DISABLE_INTRS) {
2023 			/*
2024 			 * Disable PIC Intrs in the general
2025 			 * intr mask register
2026 			 */
2027 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2028 		}
2029 	}
2030 
2031 	/*  Tx traffic interrupts */
2032 	if (mask & TX_TRAFFIC_INTR) {
2033 		intr_mask |= TXTRAFFIC_INT_M;
2034 		if (flag == ENABLE_INTRS) {
2035 			/*
2036 			 * Enable all the Tx side interrupts
2037 			 * writing 0 Enables all 64 TX interrupt levels
2038 			 */
2039 			writeq(0x0, &bar0->tx_traffic_mask);
2040 		} else if (flag == DISABLE_INTRS) {
2041 			/*
2042 			 * Disable Tx Traffic Intrs in the general intr mask
2043 			 * register.
2044 			 */
2045 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2046 		}
2047 	}
2048 
2049 	/*  Rx traffic interrupts */
2050 	if (mask & RX_TRAFFIC_INTR) {
2051 		intr_mask |= RXTRAFFIC_INT_M;
2052 		if (flag == ENABLE_INTRS) {
2053 			/* writing 0 Enables all 8 RX interrupt levels */
2054 			writeq(0x0, &bar0->rx_traffic_mask);
2055 		} else if (flag == DISABLE_INTRS) {
2056 			/*
2057 			 * Disable Rx Traffic Intrs in the general intr mask
2058 			 * register.
2059 			 */
2060 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2061 		}
2062 	}
2063 
2064 	temp64 = readq(&bar0->general_int_mask);
2065 	if (flag == ENABLE_INTRS)
2066 		temp64 &= ~((u64)intr_mask);
2067 	else
2068 		temp64 = DISABLE_ALL_INTRS;
2069 	writeq(temp64, &bar0->general_int_mask);
2070 
2071 	nic->general_int_mask = readq(&bar0->general_int_mask);
2072 }
2073 
2074 /**
2075  *  verify_pcc_quiescent- Checks for PCC quiescent state
2076  *  Return: 1 If PCC is quiescence
2077  *          0 If PCC is not quiescence
2078  */
2079 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2080 {
2081 	int ret = 0, herc;
2082 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2083 	u64 val64 = readq(&bar0->adapter_status);
2084 
2085 	herc = (sp->device_type == XFRAME_II_DEVICE);
2086 
2087 	if (flag == false) {
2088 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2089 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2090 				ret = 1;
2091 		} else {
2092 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2093 				ret = 1;
2094 		}
2095 	} else {
2096 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2097 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2098 			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2099 				ret = 1;
2100 		} else {
2101 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2102 			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2103 				ret = 1;
2104 		}
2105 	}
2106 
2107 	return ret;
2108 }
2109 /**
2110  *  verify_xena_quiescence - Checks whether the H/W is ready
2111  *  Description: Returns whether the H/W is ready to go or not. Depending
2112  *  on whether adapter enable bit was written or not the comparison
2113  *  differs and the calling function passes the input argument flag to
2114  *  indicate this.
2115  *  Return: 1 If xena is quiescence
2116  *          0 If Xena is not quiescence
2117  */
2118 
2119 static int verify_xena_quiescence(struct s2io_nic *sp)
2120 {
2121 	int  mode;
2122 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2123 	u64 val64 = readq(&bar0->adapter_status);
2124 	mode = s2io_verify_pci_mode(sp);
2125 
2126 	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2127 		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2128 		return 0;
2129 	}
2130 	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2131 		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2132 		return 0;
2133 	}
2134 	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2135 		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2136 		return 0;
2137 	}
2138 	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2139 		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2140 		return 0;
2141 	}
2142 	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2143 		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2144 		return 0;
2145 	}
2146 	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2147 		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2148 		return 0;
2149 	}
2150 	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2151 		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2152 		return 0;
2153 	}
2154 	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2155 		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2156 		return 0;
2157 	}
2158 
2159 	/*
2160 	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2161 	 * the the P_PLL_LOCK bit in the adapter_status register will
2162 	 * not be asserted.
2163 	 */
2164 	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2165 	    sp->device_type == XFRAME_II_DEVICE &&
2166 	    mode != PCI_MODE_PCI_33) {
2167 		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2168 		return 0;
2169 	}
2170 	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2171 	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2172 		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2173 		return 0;
2174 	}
2175 	return 1;
2176 }
2177 
2178 /**
2179  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2180  * @sp: Pointer to device specifc structure
2181  * Description :
2182  * New procedure to clear mac address reading  problems on Alpha platforms
2183  *
2184  */
2185 
2186 static void fix_mac_address(struct s2io_nic *sp)
2187 {
2188 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2189 	int i = 0;
2190 
2191 	while (fix_mac[i] != END_SIGN) {
2192 		writeq(fix_mac[i++], &bar0->gpio_control);
2193 		udelay(10);
2194 		(void) readq(&bar0->gpio_control);
2195 	}
2196 }
2197 
2198 /**
2199  *  start_nic - Turns the device on
2200  *  @nic : device private variable.
2201  *  Description:
2202  *  This function actually turns the device on. Before this  function is
2203  *  called,all Registers are configured from their reset states
2204  *  and shared memory is allocated but the NIC is still quiescent. On
2205  *  calling this function, the device interrupts are cleared and the NIC is
2206  *  literally switched on by writing into the adapter control register.
2207  *  Return Value:
2208  *  SUCCESS on success and -1 on failure.
2209  */
2210 
2211 static int start_nic(struct s2io_nic *nic)
2212 {
2213 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2214 	struct net_device *dev = nic->dev;
2215 	register u64 val64 = 0;
2216 	u16 subid, i;
2217 	struct config_param *config = &nic->config;
2218 	struct mac_info *mac_control = &nic->mac_control;
2219 
2220 	/*  PRC Initialization and configuration */
2221 	for (i = 0; i < config->rx_ring_num; i++) {
2222 		struct ring_info *ring = &mac_control->rings[i];
2223 
2224 		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2225 		       &bar0->prc_rxd0_n[i]);
2226 
2227 		val64 = readq(&bar0->prc_ctrl_n[i]);
2228 		if (nic->rxd_mode == RXD_MODE_1)
2229 			val64 |= PRC_CTRL_RC_ENABLED;
2230 		else
2231 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2232 		if (nic->device_type == XFRAME_II_DEVICE)
2233 			val64 |= PRC_CTRL_GROUP_READS;
2234 		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2235 		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2236 		writeq(val64, &bar0->prc_ctrl_n[i]);
2237 	}
2238 
2239 	if (nic->rxd_mode == RXD_MODE_3B) {
2240 		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2241 		val64 = readq(&bar0->rx_pa_cfg);
2242 		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2243 		writeq(val64, &bar0->rx_pa_cfg);
2244 	}
2245 
2246 	if (vlan_tag_strip == 0) {
2247 		val64 = readq(&bar0->rx_pa_cfg);
2248 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2249 		writeq(val64, &bar0->rx_pa_cfg);
2250 		nic->vlan_strip_flag = 0;
2251 	}
2252 
2253 	/*
2254 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2255 	 * for around 100ms, which is approximately the time required
2256 	 * for the device to be ready for operation.
2257 	 */
2258 	val64 = readq(&bar0->mc_rldram_mrs);
2259 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2260 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2261 	val64 = readq(&bar0->mc_rldram_mrs);
2262 
2263 	msleep(100);	/* Delay by around 100 ms. */
2264 
2265 	/* Enabling ECC Protection. */
2266 	val64 = readq(&bar0->adapter_control);
2267 	val64 &= ~ADAPTER_ECC_EN;
2268 	writeq(val64, &bar0->adapter_control);
2269 
2270 	/*
2271 	 * Verify if the device is ready to be enabled, if so enable
2272 	 * it.
2273 	 */
2274 	val64 = readq(&bar0->adapter_status);
2275 	if (!verify_xena_quiescence(nic)) {
2276 		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2277 			  "Adapter status reads: 0x%llx\n",
2278 			  dev->name, (unsigned long long)val64);
2279 		return FAILURE;
2280 	}
2281 
2282 	/*
2283 	 * With some switches, link might be already up at this point.
2284 	 * Because of this weird behavior, when we enable laser,
2285 	 * we may not get link. We need to handle this. We cannot
2286 	 * figure out which switch is misbehaving. So we are forced to
2287 	 * make a global change.
2288 	 */
2289 
2290 	/* Enabling Laser. */
2291 	val64 = readq(&bar0->adapter_control);
2292 	val64 |= ADAPTER_EOI_TX_ON;
2293 	writeq(val64, &bar0->adapter_control);
2294 
2295 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2296 		/*
2297 		 * Dont see link state interrupts initially on some switches,
2298 		 * so directly scheduling the link state task here.
2299 		 */
2300 		schedule_work(&nic->set_link_task);
2301 	}
2302 	/* SXE-002: Initialize link and activity LED */
2303 	subid = nic->pdev->subsystem_device;
2304 	if (((subid & 0xFF) >= 0x07) &&
2305 	    (nic->device_type == XFRAME_I_DEVICE)) {
2306 		val64 = readq(&bar0->gpio_control);
2307 		val64 |= 0x0000800000000000ULL;
2308 		writeq(val64, &bar0->gpio_control);
2309 		val64 = 0x0411040400000000ULL;
2310 		writeq(val64, (void __iomem *)bar0 + 0x2700);
2311 	}
2312 
2313 	return SUCCESS;
2314 }
2315 /**
2316  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2317  */
2318 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2319 					struct TxD *txdlp, int get_off)
2320 {
2321 	struct s2io_nic *nic = fifo_data->nic;
2322 	struct sk_buff *skb;
2323 	struct TxD *txds;
2324 	u16 j, frg_cnt;
2325 
2326 	txds = txdlp;
2327 	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2328 		pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2329 				 sizeof(u64), PCI_DMA_TODEVICE);
2330 		txds++;
2331 	}
2332 
2333 	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2334 	if (!skb) {
2335 		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2336 		return NULL;
2337 	}
2338 	pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2339 			 skb_headlen(skb), PCI_DMA_TODEVICE);
2340 	frg_cnt = skb_shinfo(skb)->nr_frags;
2341 	if (frg_cnt) {
2342 		txds++;
2343 		for (j = 0; j < frg_cnt; j++, txds++) {
2344 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2345 			if (!txds->Buffer_Pointer)
2346 				break;
2347 			pci_unmap_page(nic->pdev,
2348 				       (dma_addr_t)txds->Buffer_Pointer,
2349 				       skb_frag_size(frag), PCI_DMA_TODEVICE);
2350 		}
2351 	}
2352 	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2353 	return skb;
2354 }
2355 
2356 /**
2357  *  free_tx_buffers - Free all queued Tx buffers
2358  *  @nic : device private variable.
2359  *  Description:
2360  *  Free all queued Tx buffers.
2361  *  Return Value: void
2362  */
2363 
2364 static void free_tx_buffers(struct s2io_nic *nic)
2365 {
2366 	struct net_device *dev = nic->dev;
2367 	struct sk_buff *skb;
2368 	struct TxD *txdp;
2369 	int i, j;
2370 	int cnt = 0;
2371 	struct config_param *config = &nic->config;
2372 	struct mac_info *mac_control = &nic->mac_control;
2373 	struct stat_block *stats = mac_control->stats_info;
2374 	struct swStat *swstats = &stats->sw_stat;
2375 
2376 	for (i = 0; i < config->tx_fifo_num; i++) {
2377 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2378 		struct fifo_info *fifo = &mac_control->fifos[i];
2379 		unsigned long flags;
2380 
2381 		spin_lock_irqsave(&fifo->tx_lock, flags);
2382 		for (j = 0; j < tx_cfg->fifo_len; j++) {
2383 			txdp = fifo->list_info[j].list_virt_addr;
2384 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2385 			if (skb) {
2386 				swstats->mem_freed += skb->truesize;
2387 				dev_kfree_skb(skb);
2388 				cnt++;
2389 			}
2390 		}
2391 		DBG_PRINT(INTR_DBG,
2392 			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2393 			  dev->name, cnt, i);
2394 		fifo->tx_curr_get_info.offset = 0;
2395 		fifo->tx_curr_put_info.offset = 0;
2396 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2397 	}
2398 }
2399 
2400 /**
2401  *   stop_nic -  To stop the nic
2402  *   @nic ; device private variable.
2403  *   Description:
2404  *   This function does exactly the opposite of what the start_nic()
2405  *   function does. This function is called to stop the device.
2406  *   Return Value:
2407  *   void.
2408  */
2409 
2410 static void stop_nic(struct s2io_nic *nic)
2411 {
2412 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2413 	register u64 val64 = 0;
2414 	u16 interruptible;
2415 
2416 	/*  Disable all interrupts */
2417 	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2418 	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2419 	interruptible |= TX_PIC_INTR;
2420 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2421 
2422 	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2423 	val64 = readq(&bar0->adapter_control);
2424 	val64 &= ~(ADAPTER_CNTL_EN);
2425 	writeq(val64, &bar0->adapter_control);
2426 }
2427 
2428 /**
2429  *  fill_rx_buffers - Allocates the Rx side skbs
2430  *  @ring_info: per ring structure
2431  *  @from_card_up: If this is true, we will map the buffer to get
2432  *     the dma address for buf0 and buf1 to give it to the card.
2433  *     Else we will sync the already mapped buffer to give it to the card.
2434  *  Description:
2435  *  The function allocates Rx side skbs and puts the physical
2436  *  address of these buffers into the RxD buffer pointers, so that the NIC
2437  *  can DMA the received frame into these locations.
2438  *  The NIC supports 3 receive modes, viz
2439  *  1. single buffer,
2440  *  2. three buffer and
2441  *  3. Five buffer modes.
2442  *  Each mode defines how many fragments the received frame will be split
2443  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2444  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2445  *  is split into 3 fragments. As of now only single buffer mode is
2446  *  supported.
2447  *   Return Value:
2448  *  SUCCESS on success or an appropriate -ve value on failure.
2449  */
2450 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2451 			   int from_card_up)
2452 {
2453 	struct sk_buff *skb;
2454 	struct RxD_t *rxdp;
2455 	int off, size, block_no, block_no1;
2456 	u32 alloc_tab = 0;
2457 	u32 alloc_cnt;
2458 	u64 tmp;
2459 	struct buffAdd *ba;
2460 	struct RxD_t *first_rxdp = NULL;
2461 	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2462 	int rxd_index = 0;
2463 	struct RxD1 *rxdp1;
2464 	struct RxD3 *rxdp3;
2465 	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2466 
2467 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2468 
2469 	block_no1 = ring->rx_curr_get_info.block_index;
2470 	while (alloc_tab < alloc_cnt) {
2471 		block_no = ring->rx_curr_put_info.block_index;
2472 
2473 		off = ring->rx_curr_put_info.offset;
2474 
2475 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2476 
2477 		rxd_index = off + 1;
2478 		if (block_no)
2479 			rxd_index += (block_no * ring->rxd_count);
2480 
2481 		if ((block_no == block_no1) &&
2482 		    (off == ring->rx_curr_get_info.offset) &&
2483 		    (rxdp->Host_Control)) {
2484 			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2485 				  ring->dev->name);
2486 			goto end;
2487 		}
2488 		if (off && (off == ring->rxd_count)) {
2489 			ring->rx_curr_put_info.block_index++;
2490 			if (ring->rx_curr_put_info.block_index ==
2491 			    ring->block_count)
2492 				ring->rx_curr_put_info.block_index = 0;
2493 			block_no = ring->rx_curr_put_info.block_index;
2494 			off = 0;
2495 			ring->rx_curr_put_info.offset = off;
2496 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2497 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2498 				  ring->dev->name, rxdp);
2499 
2500 		}
2501 
2502 		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2503 		    ((ring->rxd_mode == RXD_MODE_3B) &&
2504 		     (rxdp->Control_2 & s2BIT(0)))) {
2505 			ring->rx_curr_put_info.offset = off;
2506 			goto end;
2507 		}
2508 		/* calculate size of skb based on ring mode */
2509 		size = ring->mtu +
2510 			HEADER_ETHERNET_II_802_3_SIZE +
2511 			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2512 		if (ring->rxd_mode == RXD_MODE_1)
2513 			size += NET_IP_ALIGN;
2514 		else
2515 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2516 
2517 		/* allocate skb */
2518 		skb = netdev_alloc_skb(nic->dev, size);
2519 		if (!skb) {
2520 			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2521 				  ring->dev->name);
2522 			if (first_rxdp) {
2523 				dma_wmb();
2524 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2525 			}
2526 			swstats->mem_alloc_fail_cnt++;
2527 
2528 			return -ENOMEM ;
2529 		}
2530 		swstats->mem_allocated += skb->truesize;
2531 
2532 		if (ring->rxd_mode == RXD_MODE_1) {
2533 			/* 1 buffer mode - normal operation mode */
2534 			rxdp1 = (struct RxD1 *)rxdp;
2535 			memset(rxdp, 0, sizeof(struct RxD1));
2536 			skb_reserve(skb, NET_IP_ALIGN);
2537 			rxdp1->Buffer0_ptr =
2538 				pci_map_single(ring->pdev, skb->data,
2539 					       size - NET_IP_ALIGN,
2540 					       PCI_DMA_FROMDEVICE);
2541 			if (pci_dma_mapping_error(nic->pdev,
2542 						  rxdp1->Buffer0_ptr))
2543 				goto pci_map_failed;
2544 
2545 			rxdp->Control_2 =
2546 				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2547 			rxdp->Host_Control = (unsigned long)skb;
2548 		} else if (ring->rxd_mode == RXD_MODE_3B) {
2549 			/*
2550 			 * 2 buffer mode -
2551 			 * 2 buffer mode provides 128
2552 			 * byte aligned receive buffers.
2553 			 */
2554 
2555 			rxdp3 = (struct RxD3 *)rxdp;
2556 			/* save buffer pointers to avoid frequent dma mapping */
2557 			Buffer0_ptr = rxdp3->Buffer0_ptr;
2558 			Buffer1_ptr = rxdp3->Buffer1_ptr;
2559 			memset(rxdp, 0, sizeof(struct RxD3));
2560 			/* restore the buffer pointers for dma sync*/
2561 			rxdp3->Buffer0_ptr = Buffer0_ptr;
2562 			rxdp3->Buffer1_ptr = Buffer1_ptr;
2563 
2564 			ba = &ring->ba[block_no][off];
2565 			skb_reserve(skb, BUF0_LEN);
2566 			tmp = (u64)(unsigned long)skb->data;
2567 			tmp += ALIGN_SIZE;
2568 			tmp &= ~ALIGN_SIZE;
2569 			skb->data = (void *) (unsigned long)tmp;
2570 			skb_reset_tail_pointer(skb);
2571 
2572 			if (from_card_up) {
2573 				rxdp3->Buffer0_ptr =
2574 					pci_map_single(ring->pdev, ba->ba_0,
2575 						       BUF0_LEN,
2576 						       PCI_DMA_FROMDEVICE);
2577 				if (pci_dma_mapping_error(nic->pdev,
2578 							  rxdp3->Buffer0_ptr))
2579 					goto pci_map_failed;
2580 			} else
2581 				pci_dma_sync_single_for_device(ring->pdev,
2582 							       (dma_addr_t)rxdp3->Buffer0_ptr,
2583 							       BUF0_LEN,
2584 							       PCI_DMA_FROMDEVICE);
2585 
2586 			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2587 			if (ring->rxd_mode == RXD_MODE_3B) {
2588 				/* Two buffer mode */
2589 
2590 				/*
2591 				 * Buffer2 will have L3/L4 header plus
2592 				 * L4 payload
2593 				 */
2594 				rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2595 								    skb->data,
2596 								    ring->mtu + 4,
2597 								    PCI_DMA_FROMDEVICE);
2598 
2599 				if (pci_dma_mapping_error(nic->pdev,
2600 							  rxdp3->Buffer2_ptr))
2601 					goto pci_map_failed;
2602 
2603 				if (from_card_up) {
2604 					rxdp3->Buffer1_ptr =
2605 						pci_map_single(ring->pdev,
2606 							       ba->ba_1,
2607 							       BUF1_LEN,
2608 							       PCI_DMA_FROMDEVICE);
2609 
2610 					if (pci_dma_mapping_error(nic->pdev,
2611 								  rxdp3->Buffer1_ptr)) {
2612 						pci_unmap_single(ring->pdev,
2613 								 (dma_addr_t)(unsigned long)
2614 								 skb->data,
2615 								 ring->mtu + 4,
2616 								 PCI_DMA_FROMDEVICE);
2617 						goto pci_map_failed;
2618 					}
2619 				}
2620 				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2621 				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2622 					(ring->mtu + 4);
2623 			}
2624 			rxdp->Control_2 |= s2BIT(0);
2625 			rxdp->Host_Control = (unsigned long) (skb);
2626 		}
2627 		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2628 			rxdp->Control_1 |= RXD_OWN_XENA;
2629 		off++;
2630 		if (off == (ring->rxd_count + 1))
2631 			off = 0;
2632 		ring->rx_curr_put_info.offset = off;
2633 
2634 		rxdp->Control_2 |= SET_RXD_MARKER;
2635 		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2636 			if (first_rxdp) {
2637 				dma_wmb();
2638 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2639 			}
2640 			first_rxdp = rxdp;
2641 		}
2642 		ring->rx_bufs_left += 1;
2643 		alloc_tab++;
2644 	}
2645 
2646 end:
2647 	/* Transfer ownership of first descriptor to adapter just before
2648 	 * exiting. Before that, use memory barrier so that ownership
2649 	 * and other fields are seen by adapter correctly.
2650 	 */
2651 	if (first_rxdp) {
2652 		dma_wmb();
2653 		first_rxdp->Control_1 |= RXD_OWN_XENA;
2654 	}
2655 
2656 	return SUCCESS;
2657 
2658 pci_map_failed:
2659 	swstats->pci_map_fail_cnt++;
2660 	swstats->mem_freed += skb->truesize;
2661 	dev_kfree_skb_irq(skb);
2662 	return -ENOMEM;
2663 }
2664 
2665 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2666 {
2667 	struct net_device *dev = sp->dev;
2668 	int j;
2669 	struct sk_buff *skb;
2670 	struct RxD_t *rxdp;
2671 	struct RxD1 *rxdp1;
2672 	struct RxD3 *rxdp3;
2673 	struct mac_info *mac_control = &sp->mac_control;
2674 	struct stat_block *stats = mac_control->stats_info;
2675 	struct swStat *swstats = &stats->sw_stat;
2676 
2677 	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2678 		rxdp = mac_control->rings[ring_no].
2679 			rx_blocks[blk].rxds[j].virt_addr;
2680 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2681 		if (!skb)
2682 			continue;
2683 		if (sp->rxd_mode == RXD_MODE_1) {
2684 			rxdp1 = (struct RxD1 *)rxdp;
2685 			pci_unmap_single(sp->pdev,
2686 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2687 					 dev->mtu +
2688 					 HEADER_ETHERNET_II_802_3_SIZE +
2689 					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2690 					 PCI_DMA_FROMDEVICE);
2691 			memset(rxdp, 0, sizeof(struct RxD1));
2692 		} else if (sp->rxd_mode == RXD_MODE_3B) {
2693 			rxdp3 = (struct RxD3 *)rxdp;
2694 			pci_unmap_single(sp->pdev,
2695 					 (dma_addr_t)rxdp3->Buffer0_ptr,
2696 					 BUF0_LEN,
2697 					 PCI_DMA_FROMDEVICE);
2698 			pci_unmap_single(sp->pdev,
2699 					 (dma_addr_t)rxdp3->Buffer1_ptr,
2700 					 BUF1_LEN,
2701 					 PCI_DMA_FROMDEVICE);
2702 			pci_unmap_single(sp->pdev,
2703 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2704 					 dev->mtu + 4,
2705 					 PCI_DMA_FROMDEVICE);
2706 			memset(rxdp, 0, sizeof(struct RxD3));
2707 		}
2708 		swstats->mem_freed += skb->truesize;
2709 		dev_kfree_skb(skb);
2710 		mac_control->rings[ring_no].rx_bufs_left -= 1;
2711 	}
2712 }
2713 
2714 /**
2715  *  free_rx_buffers - Frees all Rx buffers
2716  *  @sp: device private variable.
2717  *  Description:
2718  *  This function will free all Rx buffers allocated by host.
2719  *  Return Value:
2720  *  NONE.
2721  */
2722 
2723 static void free_rx_buffers(struct s2io_nic *sp)
2724 {
2725 	struct net_device *dev = sp->dev;
2726 	int i, blk = 0, buf_cnt = 0;
2727 	struct config_param *config = &sp->config;
2728 	struct mac_info *mac_control = &sp->mac_control;
2729 
2730 	for (i = 0; i < config->rx_ring_num; i++) {
2731 		struct ring_info *ring = &mac_control->rings[i];
2732 
2733 		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2734 			free_rxd_blk(sp, i, blk);
2735 
2736 		ring->rx_curr_put_info.block_index = 0;
2737 		ring->rx_curr_get_info.block_index = 0;
2738 		ring->rx_curr_put_info.offset = 0;
2739 		ring->rx_curr_get_info.offset = 0;
2740 		ring->rx_bufs_left = 0;
2741 		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2742 			  dev->name, buf_cnt, i);
2743 	}
2744 }
2745 
2746 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2747 {
2748 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2749 		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2750 			  ring->dev->name);
2751 	}
2752 	return 0;
2753 }
2754 
2755 /**
2756  * s2io_poll - Rx interrupt handler for NAPI support
2757  * @napi : pointer to the napi structure.
2758  * @budget : The number of packets that were budgeted to be processed
2759  * during  one pass through the 'Poll" function.
2760  * Description:
2761  * Comes into picture only if NAPI support has been incorporated. It does
2762  * the same thing that rx_intr_handler does, but not in a interrupt context
2763  * also It will process only a given number of packets.
2764  * Return value:
2765  * 0 on success and 1 if there are No Rx packets to be processed.
2766  */
2767 
2768 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2769 {
2770 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2771 	struct net_device *dev = ring->dev;
2772 	int pkts_processed = 0;
2773 	u8 __iomem *addr = NULL;
2774 	u8 val8 = 0;
2775 	struct s2io_nic *nic = netdev_priv(dev);
2776 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2777 	int budget_org = budget;
2778 
2779 	if (unlikely(!is_s2io_card_up(nic)))
2780 		return 0;
2781 
2782 	pkts_processed = rx_intr_handler(ring, budget);
2783 	s2io_chk_rx_buffers(nic, ring);
2784 
2785 	if (pkts_processed < budget_org) {
2786 		napi_complete(napi);
2787 		/*Re Enable MSI-Rx Vector*/
2788 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2789 		addr += 7 - ring->ring_no;
2790 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2791 		writeb(val8, addr);
2792 		val8 = readb(addr);
2793 	}
2794 	return pkts_processed;
2795 }
2796 
2797 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2798 {
2799 	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2800 	int pkts_processed = 0;
2801 	int ring_pkts_processed, i;
2802 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2803 	int budget_org = budget;
2804 	struct config_param *config = &nic->config;
2805 	struct mac_info *mac_control = &nic->mac_control;
2806 
2807 	if (unlikely(!is_s2io_card_up(nic)))
2808 		return 0;
2809 
2810 	for (i = 0; i < config->rx_ring_num; i++) {
2811 		struct ring_info *ring = &mac_control->rings[i];
2812 		ring_pkts_processed = rx_intr_handler(ring, budget);
2813 		s2io_chk_rx_buffers(nic, ring);
2814 		pkts_processed += ring_pkts_processed;
2815 		budget -= ring_pkts_processed;
2816 		if (budget <= 0)
2817 			break;
2818 	}
2819 	if (pkts_processed < budget_org) {
2820 		napi_complete(napi);
2821 		/* Re enable the Rx interrupts for the ring */
2822 		writeq(0, &bar0->rx_traffic_mask);
2823 		readl(&bar0->rx_traffic_mask);
2824 	}
2825 	return pkts_processed;
2826 }
2827 
2828 #ifdef CONFIG_NET_POLL_CONTROLLER
2829 /**
2830  * s2io_netpoll - netpoll event handler entry point
2831  * @dev : pointer to the device structure.
2832  * Description:
2833  * 	This function will be called by upper layer to check for events on the
2834  * interface in situations where interrupts are disabled. It is used for
2835  * specific in-kernel networking tasks, such as remote consoles and kernel
2836  * debugging over the network (example netdump in RedHat).
2837  */
2838 static void s2io_netpoll(struct net_device *dev)
2839 {
2840 	struct s2io_nic *nic = netdev_priv(dev);
2841 	const int irq = nic->pdev->irq;
2842 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2843 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2844 	int i;
2845 	struct config_param *config = &nic->config;
2846 	struct mac_info *mac_control = &nic->mac_control;
2847 
2848 	if (pci_channel_offline(nic->pdev))
2849 		return;
2850 
2851 	disable_irq(irq);
2852 
2853 	writeq(val64, &bar0->rx_traffic_int);
2854 	writeq(val64, &bar0->tx_traffic_int);
2855 
2856 	/* we need to free up the transmitted skbufs or else netpoll will
2857 	 * run out of skbs and will fail and eventually netpoll application such
2858 	 * as netdump will fail.
2859 	 */
2860 	for (i = 0; i < config->tx_fifo_num; i++)
2861 		tx_intr_handler(&mac_control->fifos[i]);
2862 
2863 	/* check for received packet and indicate up to network */
2864 	for (i = 0; i < config->rx_ring_num; i++) {
2865 		struct ring_info *ring = &mac_control->rings[i];
2866 
2867 		rx_intr_handler(ring, 0);
2868 	}
2869 
2870 	for (i = 0; i < config->rx_ring_num; i++) {
2871 		struct ring_info *ring = &mac_control->rings[i];
2872 
2873 		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2874 			DBG_PRINT(INFO_DBG,
2875 				  "%s: Out of memory in Rx Netpoll!!\n",
2876 				  dev->name);
2877 			break;
2878 		}
2879 	}
2880 	enable_irq(irq);
2881 }
2882 #endif
2883 
2884 /**
2885  *  rx_intr_handler - Rx interrupt handler
2886  *  @ring_info: per ring structure.
2887  *  @budget: budget for napi processing.
2888  *  Description:
2889  *  If the interrupt is because of a received frame or if the
2890  *  receive ring contains fresh as yet un-processed frames,this function is
2891  *  called. It picks out the RxD at which place the last Rx processing had
2892  *  stopped and sends the skb to the OSM's Rx handler and then increments
2893  *  the offset.
2894  *  Return Value:
2895  *  No. of napi packets processed.
2896  */
2897 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2898 {
2899 	int get_block, put_block;
2900 	struct rx_curr_get_info get_info, put_info;
2901 	struct RxD_t *rxdp;
2902 	struct sk_buff *skb;
2903 	int pkt_cnt = 0, napi_pkts = 0;
2904 	int i;
2905 	struct RxD1 *rxdp1;
2906 	struct RxD3 *rxdp3;
2907 
2908 	if (budget <= 0)
2909 		return napi_pkts;
2910 
2911 	get_info = ring_data->rx_curr_get_info;
2912 	get_block = get_info.block_index;
2913 	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2914 	put_block = put_info.block_index;
2915 	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2916 
2917 	while (RXD_IS_UP2DT(rxdp)) {
2918 		/*
2919 		 * If your are next to put index then it's
2920 		 * FIFO full condition
2921 		 */
2922 		if ((get_block == put_block) &&
2923 		    (get_info.offset + 1) == put_info.offset) {
2924 			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2925 				  ring_data->dev->name);
2926 			break;
2927 		}
2928 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2929 		if (skb == NULL) {
2930 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2931 				  ring_data->dev->name);
2932 			return 0;
2933 		}
2934 		if (ring_data->rxd_mode == RXD_MODE_1) {
2935 			rxdp1 = (struct RxD1 *)rxdp;
2936 			pci_unmap_single(ring_data->pdev, (dma_addr_t)
2937 					 rxdp1->Buffer0_ptr,
2938 					 ring_data->mtu +
2939 					 HEADER_ETHERNET_II_802_3_SIZE +
2940 					 HEADER_802_2_SIZE +
2941 					 HEADER_SNAP_SIZE,
2942 					 PCI_DMA_FROMDEVICE);
2943 		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2944 			rxdp3 = (struct RxD3 *)rxdp;
2945 			pci_dma_sync_single_for_cpu(ring_data->pdev,
2946 						    (dma_addr_t)rxdp3->Buffer0_ptr,
2947 						    BUF0_LEN,
2948 						    PCI_DMA_FROMDEVICE);
2949 			pci_unmap_single(ring_data->pdev,
2950 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2951 					 ring_data->mtu + 4,
2952 					 PCI_DMA_FROMDEVICE);
2953 		}
2954 		prefetch(skb->data);
2955 		rx_osm_handler(ring_data, rxdp);
2956 		get_info.offset++;
2957 		ring_data->rx_curr_get_info.offset = get_info.offset;
2958 		rxdp = ring_data->rx_blocks[get_block].
2959 			rxds[get_info.offset].virt_addr;
2960 		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2961 			get_info.offset = 0;
2962 			ring_data->rx_curr_get_info.offset = get_info.offset;
2963 			get_block++;
2964 			if (get_block == ring_data->block_count)
2965 				get_block = 0;
2966 			ring_data->rx_curr_get_info.block_index = get_block;
2967 			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2968 		}
2969 
2970 		if (ring_data->nic->config.napi) {
2971 			budget--;
2972 			napi_pkts++;
2973 			if (!budget)
2974 				break;
2975 		}
2976 		pkt_cnt++;
2977 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2978 			break;
2979 	}
2980 	if (ring_data->lro) {
2981 		/* Clear all LRO sessions before exiting */
2982 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2983 			struct lro *lro = &ring_data->lro0_n[i];
2984 			if (lro->in_use) {
2985 				update_L3L4_header(ring_data->nic, lro);
2986 				queue_rx_frame(lro->parent, lro->vlan_tag);
2987 				clear_lro_session(lro);
2988 			}
2989 		}
2990 	}
2991 	return napi_pkts;
2992 }
2993 
2994 /**
2995  *  tx_intr_handler - Transmit interrupt handler
2996  *  @nic : device private variable
2997  *  Description:
2998  *  If an interrupt was raised to indicate DMA complete of the
2999  *  Tx packet, this function is called. It identifies the last TxD
3000  *  whose buffer was freed and frees all skbs whose data have already
3001  *  DMA'ed into the NICs internal memory.
3002  *  Return Value:
3003  *  NONE
3004  */
3005 
3006 static void tx_intr_handler(struct fifo_info *fifo_data)
3007 {
3008 	struct s2io_nic *nic = fifo_data->nic;
3009 	struct tx_curr_get_info get_info, put_info;
3010 	struct sk_buff *skb = NULL;
3011 	struct TxD *txdlp;
3012 	int pkt_cnt = 0;
3013 	unsigned long flags = 0;
3014 	u8 err_mask;
3015 	struct stat_block *stats = nic->mac_control.stats_info;
3016 	struct swStat *swstats = &stats->sw_stat;
3017 
3018 	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3019 		return;
3020 
3021 	get_info = fifo_data->tx_curr_get_info;
3022 	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3023 	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3024 	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3025 	       (get_info.offset != put_info.offset) &&
3026 	       (txdlp->Host_Control)) {
3027 		/* Check for TxD errors */
3028 		if (txdlp->Control_1 & TXD_T_CODE) {
3029 			unsigned long long err;
3030 			err = txdlp->Control_1 & TXD_T_CODE;
3031 			if (err & 0x1) {
3032 				swstats->parity_err_cnt++;
3033 			}
3034 
3035 			/* update t_code statistics */
3036 			err_mask = err >> 48;
3037 			switch (err_mask) {
3038 			case 2:
3039 				swstats->tx_buf_abort_cnt++;
3040 				break;
3041 
3042 			case 3:
3043 				swstats->tx_desc_abort_cnt++;
3044 				break;
3045 
3046 			case 7:
3047 				swstats->tx_parity_err_cnt++;
3048 				break;
3049 
3050 			case 10:
3051 				swstats->tx_link_loss_cnt++;
3052 				break;
3053 
3054 			case 15:
3055 				swstats->tx_list_proc_err_cnt++;
3056 				break;
3057 			}
3058 		}
3059 
3060 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3061 		if (skb == NULL) {
3062 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3063 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3064 				  __func__);
3065 			return;
3066 		}
3067 		pkt_cnt++;
3068 
3069 		/* Updating the statistics block */
3070 		swstats->mem_freed += skb->truesize;
3071 		dev_kfree_skb_irq(skb);
3072 
3073 		get_info.offset++;
3074 		if (get_info.offset == get_info.fifo_len + 1)
3075 			get_info.offset = 0;
3076 		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3077 		fifo_data->tx_curr_get_info.offset = get_info.offset;
3078 	}
3079 
3080 	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3081 
3082 	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3083 }
3084 
3085 /**
3086  *  s2io_mdio_write - Function to write in to MDIO registers
3087  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3088  *  @addr     : address value
3089  *  @value    : data value
3090  *  @dev      : pointer to net_device structure
3091  *  Description:
3092  *  This function is used to write values to the MDIO registers
3093  *  NONE
3094  */
3095 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3096 			    struct net_device *dev)
3097 {
3098 	u64 val64;
3099 	struct s2io_nic *sp = netdev_priv(dev);
3100 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3101 
3102 	/* address transaction */
3103 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3104 		MDIO_MMD_DEV_ADDR(mmd_type) |
3105 		MDIO_MMS_PRT_ADDR(0x0);
3106 	writeq(val64, &bar0->mdio_control);
3107 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3108 	writeq(val64, &bar0->mdio_control);
3109 	udelay(100);
3110 
3111 	/* Data transaction */
3112 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3113 		MDIO_MMD_DEV_ADDR(mmd_type) |
3114 		MDIO_MMS_PRT_ADDR(0x0) |
3115 		MDIO_MDIO_DATA(value) |
3116 		MDIO_OP(MDIO_OP_WRITE_TRANS);
3117 	writeq(val64, &bar0->mdio_control);
3118 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3119 	writeq(val64, &bar0->mdio_control);
3120 	udelay(100);
3121 
3122 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3123 		MDIO_MMD_DEV_ADDR(mmd_type) |
3124 		MDIO_MMS_PRT_ADDR(0x0) |
3125 		MDIO_OP(MDIO_OP_READ_TRANS);
3126 	writeq(val64, &bar0->mdio_control);
3127 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3128 	writeq(val64, &bar0->mdio_control);
3129 	udelay(100);
3130 }
3131 
3132 /**
3133  *  s2io_mdio_read - Function to write in to MDIO registers
3134  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3135  *  @addr     : address value
3136  *  @dev      : pointer to net_device structure
3137  *  Description:
3138  *  This function is used to read values to the MDIO registers
3139  *  NONE
3140  */
3141 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3142 {
3143 	u64 val64 = 0x0;
3144 	u64 rval64 = 0x0;
3145 	struct s2io_nic *sp = netdev_priv(dev);
3146 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3147 
3148 	/* address transaction */
3149 	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3150 			 | MDIO_MMD_DEV_ADDR(mmd_type)
3151 			 | MDIO_MMS_PRT_ADDR(0x0));
3152 	writeq(val64, &bar0->mdio_control);
3153 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3154 	writeq(val64, &bar0->mdio_control);
3155 	udelay(100);
3156 
3157 	/* Data transaction */
3158 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3159 		MDIO_MMD_DEV_ADDR(mmd_type) |
3160 		MDIO_MMS_PRT_ADDR(0x0) |
3161 		MDIO_OP(MDIO_OP_READ_TRANS);
3162 	writeq(val64, &bar0->mdio_control);
3163 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3164 	writeq(val64, &bar0->mdio_control);
3165 	udelay(100);
3166 
3167 	/* Read the value from regs */
3168 	rval64 = readq(&bar0->mdio_control);
3169 	rval64 = rval64 & 0xFFFF0000;
3170 	rval64 = rval64 >> 16;
3171 	return rval64;
3172 }
3173 
3174 /**
3175  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3176  *  @counter      : counter value to be updated
3177  *  @flag         : flag to indicate the status
3178  *  @type         : counter type
3179  *  Description:
3180  *  This function is to check the status of the xpak counters value
3181  *  NONE
3182  */
3183 
3184 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3185 				  u16 flag, u16 type)
3186 {
3187 	u64 mask = 0x3;
3188 	u64 val64;
3189 	int i;
3190 	for (i = 0; i < index; i++)
3191 		mask = mask << 0x2;
3192 
3193 	if (flag > 0) {
3194 		*counter = *counter + 1;
3195 		val64 = *regs_stat & mask;
3196 		val64 = val64 >> (index * 0x2);
3197 		val64 = val64 + 1;
3198 		if (val64 == 3) {
3199 			switch (type) {
3200 			case 1:
3201 				DBG_PRINT(ERR_DBG,
3202 					  "Take Xframe NIC out of service.\n");
3203 				DBG_PRINT(ERR_DBG,
3204 "Excessive temperatures may result in premature transceiver failure.\n");
3205 				break;
3206 			case 2:
3207 				DBG_PRINT(ERR_DBG,
3208 					  "Take Xframe NIC out of service.\n");
3209 				DBG_PRINT(ERR_DBG,
3210 "Excessive bias currents may indicate imminent laser diode failure.\n");
3211 				break;
3212 			case 3:
3213 				DBG_PRINT(ERR_DBG,
3214 					  "Take Xframe NIC out of service.\n");
3215 				DBG_PRINT(ERR_DBG,
3216 "Excessive laser output power may saturate far-end receiver.\n");
3217 				break;
3218 			default:
3219 				DBG_PRINT(ERR_DBG,
3220 					  "Incorrect XPAK Alarm type\n");
3221 			}
3222 			val64 = 0x0;
3223 		}
3224 		val64 = val64 << (index * 0x2);
3225 		*regs_stat = (*regs_stat & (~mask)) | (val64);
3226 
3227 	} else {
3228 		*regs_stat = *regs_stat & (~mask);
3229 	}
3230 }
3231 
3232 /**
3233  *  s2io_updt_xpak_counter - Function to update the xpak counters
3234  *  @dev         : pointer to net_device struct
3235  *  Description:
3236  *  This function is to upate the status of the xpak counters value
3237  *  NONE
3238  */
3239 static void s2io_updt_xpak_counter(struct net_device *dev)
3240 {
3241 	u16 flag  = 0x0;
3242 	u16 type  = 0x0;
3243 	u16 val16 = 0x0;
3244 	u64 val64 = 0x0;
3245 	u64 addr  = 0x0;
3246 
3247 	struct s2io_nic *sp = netdev_priv(dev);
3248 	struct stat_block *stats = sp->mac_control.stats_info;
3249 	struct xpakStat *xstats = &stats->xpak_stat;
3250 
3251 	/* Check the communication with the MDIO slave */
3252 	addr = MDIO_CTRL1;
3253 	val64 = 0x0;
3254 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3255 	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3256 		DBG_PRINT(ERR_DBG,
3257 			  "ERR: MDIO slave access failed - Returned %llx\n",
3258 			  (unsigned long long)val64);
3259 		return;
3260 	}
3261 
3262 	/* Check for the expected value of control reg 1 */
3263 	if (val64 != MDIO_CTRL1_SPEED10G) {
3264 		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3265 			  "Returned: %llx- Expected: 0x%x\n",
3266 			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3267 		return;
3268 	}
3269 
3270 	/* Loading the DOM register to MDIO register */
3271 	addr = 0xA100;
3272 	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3273 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3274 
3275 	/* Reading the Alarm flags */
3276 	addr = 0xA070;
3277 	val64 = 0x0;
3278 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3279 
3280 	flag = CHECKBIT(val64, 0x7);
3281 	type = 1;
3282 	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3283 			      &xstats->xpak_regs_stat,
3284 			      0x0, flag, type);
3285 
3286 	if (CHECKBIT(val64, 0x6))
3287 		xstats->alarm_transceiver_temp_low++;
3288 
3289 	flag = CHECKBIT(val64, 0x3);
3290 	type = 2;
3291 	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3292 			      &xstats->xpak_regs_stat,
3293 			      0x2, flag, type);
3294 
3295 	if (CHECKBIT(val64, 0x2))
3296 		xstats->alarm_laser_bias_current_low++;
3297 
3298 	flag = CHECKBIT(val64, 0x1);
3299 	type = 3;
3300 	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3301 			      &xstats->xpak_regs_stat,
3302 			      0x4, flag, type);
3303 
3304 	if (CHECKBIT(val64, 0x0))
3305 		xstats->alarm_laser_output_power_low++;
3306 
3307 	/* Reading the Warning flags */
3308 	addr = 0xA074;
3309 	val64 = 0x0;
3310 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3311 
3312 	if (CHECKBIT(val64, 0x7))
3313 		xstats->warn_transceiver_temp_high++;
3314 
3315 	if (CHECKBIT(val64, 0x6))
3316 		xstats->warn_transceiver_temp_low++;
3317 
3318 	if (CHECKBIT(val64, 0x3))
3319 		xstats->warn_laser_bias_current_high++;
3320 
3321 	if (CHECKBIT(val64, 0x2))
3322 		xstats->warn_laser_bias_current_low++;
3323 
3324 	if (CHECKBIT(val64, 0x1))
3325 		xstats->warn_laser_output_power_high++;
3326 
3327 	if (CHECKBIT(val64, 0x0))
3328 		xstats->warn_laser_output_power_low++;
3329 }
3330 
3331 /**
3332  *  wait_for_cmd_complete - waits for a command to complete.
3333  *  @sp : private member of the device structure, which is a pointer to the
3334  *  s2io_nic structure.
3335  *  Description: Function that waits for a command to Write into RMAC
3336  *  ADDR DATA registers to be completed and returns either success or
3337  *  error depending on whether the command was complete or not.
3338  *  Return value:
3339  *   SUCCESS on success and FAILURE on failure.
3340  */
3341 
3342 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3343 				 int bit_state)
3344 {
3345 	int ret = FAILURE, cnt = 0, delay = 1;
3346 	u64 val64;
3347 
3348 	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3349 		return FAILURE;
3350 
3351 	do {
3352 		val64 = readq(addr);
3353 		if (bit_state == S2IO_BIT_RESET) {
3354 			if (!(val64 & busy_bit)) {
3355 				ret = SUCCESS;
3356 				break;
3357 			}
3358 		} else {
3359 			if (val64 & busy_bit) {
3360 				ret = SUCCESS;
3361 				break;
3362 			}
3363 		}
3364 
3365 		if (in_interrupt())
3366 			mdelay(delay);
3367 		else
3368 			msleep(delay);
3369 
3370 		if (++cnt >= 10)
3371 			delay = 50;
3372 	} while (cnt < 20);
3373 	return ret;
3374 }
3375 /**
3376  * check_pci_device_id - Checks if the device id is supported
3377  * @id : device id
3378  * Description: Function to check if the pci device id is supported by driver.
3379  * Return value: Actual device id if supported else PCI_ANY_ID
3380  */
3381 static u16 check_pci_device_id(u16 id)
3382 {
3383 	switch (id) {
3384 	case PCI_DEVICE_ID_HERC_WIN:
3385 	case PCI_DEVICE_ID_HERC_UNI:
3386 		return XFRAME_II_DEVICE;
3387 	case PCI_DEVICE_ID_S2IO_UNI:
3388 	case PCI_DEVICE_ID_S2IO_WIN:
3389 		return XFRAME_I_DEVICE;
3390 	default:
3391 		return PCI_ANY_ID;
3392 	}
3393 }
3394 
3395 /**
3396  *  s2io_reset - Resets the card.
3397  *  @sp : private member of the device structure.
3398  *  Description: Function to Reset the card. This function then also
3399  *  restores the previously saved PCI configuration space registers as
3400  *  the card reset also resets the configuration space.
3401  *  Return value:
3402  *  void.
3403  */
3404 
3405 static void s2io_reset(struct s2io_nic *sp)
3406 {
3407 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3408 	u64 val64;
3409 	u16 subid, pci_cmd;
3410 	int i;
3411 	u16 val16;
3412 	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3413 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3414 	struct stat_block *stats;
3415 	struct swStat *swstats;
3416 
3417 	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3418 		  __func__, pci_name(sp->pdev));
3419 
3420 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3421 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3422 
3423 	val64 = SW_RESET_ALL;
3424 	writeq(val64, &bar0->sw_reset);
3425 	if (strstr(sp->product_name, "CX4"))
3426 		msleep(750);
3427 	msleep(250);
3428 	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3429 
3430 		/* Restore the PCI state saved during initialization. */
3431 		pci_restore_state(sp->pdev);
3432 		pci_save_state(sp->pdev);
3433 		pci_read_config_word(sp->pdev, 0x2, &val16);
3434 		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3435 			break;
3436 		msleep(200);
3437 	}
3438 
3439 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3440 		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3441 
3442 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3443 
3444 	s2io_init_pci(sp);
3445 
3446 	/* Set swapper to enable I/O register access */
3447 	s2io_set_swapper(sp);
3448 
3449 	/* restore mac_addr entries */
3450 	do_s2io_restore_unicast_mc(sp);
3451 
3452 	/* Restore the MSIX table entries from local variables */
3453 	restore_xmsi_data(sp);
3454 
3455 	/* Clear certain PCI/PCI-X fields after reset */
3456 	if (sp->device_type == XFRAME_II_DEVICE) {
3457 		/* Clear "detected parity error" bit */
3458 		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3459 
3460 		/* Clearing PCIX Ecc status register */
3461 		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3462 
3463 		/* Clearing PCI_STATUS error reflected here */
3464 		writeq(s2BIT(62), &bar0->txpic_int_reg);
3465 	}
3466 
3467 	/* Reset device statistics maintained by OS */
3468 	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3469 
3470 	stats = sp->mac_control.stats_info;
3471 	swstats = &stats->sw_stat;
3472 
3473 	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3474 	up_cnt = swstats->link_up_cnt;
3475 	down_cnt = swstats->link_down_cnt;
3476 	up_time = swstats->link_up_time;
3477 	down_time = swstats->link_down_time;
3478 	reset_cnt = swstats->soft_reset_cnt;
3479 	mem_alloc_cnt = swstats->mem_allocated;
3480 	mem_free_cnt = swstats->mem_freed;
3481 	watchdog_cnt = swstats->watchdog_timer_cnt;
3482 
3483 	memset(stats, 0, sizeof(struct stat_block));
3484 
3485 	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3486 	swstats->link_up_cnt = up_cnt;
3487 	swstats->link_down_cnt = down_cnt;
3488 	swstats->link_up_time = up_time;
3489 	swstats->link_down_time = down_time;
3490 	swstats->soft_reset_cnt = reset_cnt;
3491 	swstats->mem_allocated = mem_alloc_cnt;
3492 	swstats->mem_freed = mem_free_cnt;
3493 	swstats->watchdog_timer_cnt = watchdog_cnt;
3494 
3495 	/* SXE-002: Configure link and activity LED to turn it off */
3496 	subid = sp->pdev->subsystem_device;
3497 	if (((subid & 0xFF) >= 0x07) &&
3498 	    (sp->device_type == XFRAME_I_DEVICE)) {
3499 		val64 = readq(&bar0->gpio_control);
3500 		val64 |= 0x0000800000000000ULL;
3501 		writeq(val64, &bar0->gpio_control);
3502 		val64 = 0x0411040400000000ULL;
3503 		writeq(val64, (void __iomem *)bar0 + 0x2700);
3504 	}
3505 
3506 	/*
3507 	 * Clear spurious ECC interrupts that would have occurred on
3508 	 * XFRAME II cards after reset.
3509 	 */
3510 	if (sp->device_type == XFRAME_II_DEVICE) {
3511 		val64 = readq(&bar0->pcc_err_reg);
3512 		writeq(val64, &bar0->pcc_err_reg);
3513 	}
3514 
3515 	sp->device_enabled_once = false;
3516 }
3517 
3518 /**
3519  *  s2io_set_swapper - to set the swapper controle on the card
3520  *  @sp : private member of the device structure,
3521  *  pointer to the s2io_nic structure.
3522  *  Description: Function to set the swapper control on the card
3523  *  correctly depending on the 'endianness' of the system.
3524  *  Return value:
3525  *  SUCCESS on success and FAILURE on failure.
3526  */
3527 
3528 static int s2io_set_swapper(struct s2io_nic *sp)
3529 {
3530 	struct net_device *dev = sp->dev;
3531 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3532 	u64 val64, valt, valr;
3533 
3534 	/*
3535 	 * Set proper endian settings and verify the same by reading
3536 	 * the PIF Feed-back register.
3537 	 */
3538 
3539 	val64 = readq(&bar0->pif_rd_swapper_fb);
3540 	if (val64 != 0x0123456789ABCDEFULL) {
3541 		int i = 0;
3542 		static const u64 value[] = {
3543 			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3544 			0x8100008181000081ULL,	/* FE=1, SE=0 */
3545 			0x4200004242000042ULL,	/* FE=0, SE=1 */
3546 			0			/* FE=0, SE=0 */
3547 		};
3548 
3549 		while (i < 4) {
3550 			writeq(value[i], &bar0->swapper_ctrl);
3551 			val64 = readq(&bar0->pif_rd_swapper_fb);
3552 			if (val64 == 0x0123456789ABCDEFULL)
3553 				break;
3554 			i++;
3555 		}
3556 		if (i == 4) {
3557 			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3558 				  "feedback read %llx\n",
3559 				  dev->name, (unsigned long long)val64);
3560 			return FAILURE;
3561 		}
3562 		valr = value[i];
3563 	} else {
3564 		valr = readq(&bar0->swapper_ctrl);
3565 	}
3566 
3567 	valt = 0x0123456789ABCDEFULL;
3568 	writeq(valt, &bar0->xmsi_address);
3569 	val64 = readq(&bar0->xmsi_address);
3570 
3571 	if (val64 != valt) {
3572 		int i = 0;
3573 		static const u64 value[] = {
3574 			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3575 			0x0081810000818100ULL,	/* FE=1, SE=0 */
3576 			0x0042420000424200ULL,	/* FE=0, SE=1 */
3577 			0			/* FE=0, SE=0 */
3578 		};
3579 
3580 		while (i < 4) {
3581 			writeq((value[i] | valr), &bar0->swapper_ctrl);
3582 			writeq(valt, &bar0->xmsi_address);
3583 			val64 = readq(&bar0->xmsi_address);
3584 			if (val64 == valt)
3585 				break;
3586 			i++;
3587 		}
3588 		if (i == 4) {
3589 			unsigned long long x = val64;
3590 			DBG_PRINT(ERR_DBG,
3591 				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3592 			return FAILURE;
3593 		}
3594 	}
3595 	val64 = readq(&bar0->swapper_ctrl);
3596 	val64 &= 0xFFFF000000000000ULL;
3597 
3598 #ifdef __BIG_ENDIAN
3599 	/*
3600 	 * The device by default set to a big endian format, so a
3601 	 * big endian driver need not set anything.
3602 	 */
3603 	val64 |= (SWAPPER_CTRL_TXP_FE |
3604 		  SWAPPER_CTRL_TXP_SE |
3605 		  SWAPPER_CTRL_TXD_R_FE |
3606 		  SWAPPER_CTRL_TXD_W_FE |
3607 		  SWAPPER_CTRL_TXF_R_FE |
3608 		  SWAPPER_CTRL_RXD_R_FE |
3609 		  SWAPPER_CTRL_RXD_W_FE |
3610 		  SWAPPER_CTRL_RXF_W_FE |
3611 		  SWAPPER_CTRL_XMSI_FE |
3612 		  SWAPPER_CTRL_STATS_FE |
3613 		  SWAPPER_CTRL_STATS_SE);
3614 	if (sp->config.intr_type == INTA)
3615 		val64 |= SWAPPER_CTRL_XMSI_SE;
3616 	writeq(val64, &bar0->swapper_ctrl);
3617 #else
3618 	/*
3619 	 * Initially we enable all bits to make it accessible by the
3620 	 * driver, then we selectively enable only those bits that
3621 	 * we want to set.
3622 	 */
3623 	val64 |= (SWAPPER_CTRL_TXP_FE |
3624 		  SWAPPER_CTRL_TXP_SE |
3625 		  SWAPPER_CTRL_TXD_R_FE |
3626 		  SWAPPER_CTRL_TXD_R_SE |
3627 		  SWAPPER_CTRL_TXD_W_FE |
3628 		  SWAPPER_CTRL_TXD_W_SE |
3629 		  SWAPPER_CTRL_TXF_R_FE |
3630 		  SWAPPER_CTRL_RXD_R_FE |
3631 		  SWAPPER_CTRL_RXD_R_SE |
3632 		  SWAPPER_CTRL_RXD_W_FE |
3633 		  SWAPPER_CTRL_RXD_W_SE |
3634 		  SWAPPER_CTRL_RXF_W_FE |
3635 		  SWAPPER_CTRL_XMSI_FE |
3636 		  SWAPPER_CTRL_STATS_FE |
3637 		  SWAPPER_CTRL_STATS_SE);
3638 	if (sp->config.intr_type == INTA)
3639 		val64 |= SWAPPER_CTRL_XMSI_SE;
3640 	writeq(val64, &bar0->swapper_ctrl);
3641 #endif
3642 	val64 = readq(&bar0->swapper_ctrl);
3643 
3644 	/*
3645 	 * Verifying if endian settings are accurate by reading a
3646 	 * feedback register.
3647 	 */
3648 	val64 = readq(&bar0->pif_rd_swapper_fb);
3649 	if (val64 != 0x0123456789ABCDEFULL) {
3650 		/* Endian settings are incorrect, calls for another dekko. */
3651 		DBG_PRINT(ERR_DBG,
3652 			  "%s: Endian settings are wrong, feedback read %llx\n",
3653 			  dev->name, (unsigned long long)val64);
3654 		return FAILURE;
3655 	}
3656 
3657 	return SUCCESS;
3658 }
3659 
3660 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3661 {
3662 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3663 	u64 val64;
3664 	int ret = 0, cnt = 0;
3665 
3666 	do {
3667 		val64 = readq(&bar0->xmsi_access);
3668 		if (!(val64 & s2BIT(15)))
3669 			break;
3670 		mdelay(1);
3671 		cnt++;
3672 	} while (cnt < 5);
3673 	if (cnt == 5) {
3674 		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3675 		ret = 1;
3676 	}
3677 
3678 	return ret;
3679 }
3680 
3681 static void restore_xmsi_data(struct s2io_nic *nic)
3682 {
3683 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3684 	u64 val64;
3685 	int i, msix_index;
3686 
3687 	if (nic->device_type == XFRAME_I_DEVICE)
3688 		return;
3689 
3690 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3691 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3692 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3693 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3694 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3695 		writeq(val64, &bar0->xmsi_access);
3696 		if (wait_for_msix_trans(nic, msix_index)) {
3697 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3698 				  __func__, msix_index);
3699 			continue;
3700 		}
3701 	}
3702 }
3703 
3704 static void store_xmsi_data(struct s2io_nic *nic)
3705 {
3706 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3707 	u64 val64, addr, data;
3708 	int i, msix_index;
3709 
3710 	if (nic->device_type == XFRAME_I_DEVICE)
3711 		return;
3712 
3713 	/* Store and display */
3714 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3715 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3716 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3717 		writeq(val64, &bar0->xmsi_access);
3718 		if (wait_for_msix_trans(nic, msix_index)) {
3719 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3720 				  __func__, msix_index);
3721 			continue;
3722 		}
3723 		addr = readq(&bar0->xmsi_address);
3724 		data = readq(&bar0->xmsi_data);
3725 		if (addr && data) {
3726 			nic->msix_info[i].addr = addr;
3727 			nic->msix_info[i].data = data;
3728 		}
3729 	}
3730 }
3731 
3732 static int s2io_enable_msi_x(struct s2io_nic *nic)
3733 {
3734 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3735 	u64 rx_mat;
3736 	u16 msi_control; /* Temp variable */
3737 	int ret, i, j, msix_indx = 1;
3738 	int size;
3739 	struct stat_block *stats = nic->mac_control.stats_info;
3740 	struct swStat *swstats = &stats->sw_stat;
3741 
3742 	size = nic->num_entries * sizeof(struct msix_entry);
3743 	nic->entries = kzalloc(size, GFP_KERNEL);
3744 	if (!nic->entries) {
3745 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3746 			  __func__);
3747 		swstats->mem_alloc_fail_cnt++;
3748 		return -ENOMEM;
3749 	}
3750 	swstats->mem_allocated += size;
3751 
3752 	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3753 	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3754 	if (!nic->s2io_entries) {
3755 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3756 			  __func__);
3757 		swstats->mem_alloc_fail_cnt++;
3758 		kfree(nic->entries);
3759 		swstats->mem_freed
3760 			+= (nic->num_entries * sizeof(struct msix_entry));
3761 		return -ENOMEM;
3762 	}
3763 	swstats->mem_allocated += size;
3764 
3765 	nic->entries[0].entry = 0;
3766 	nic->s2io_entries[0].entry = 0;
3767 	nic->s2io_entries[0].in_use = MSIX_FLG;
3768 	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3769 	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3770 
3771 	for (i = 1; i < nic->num_entries; i++) {
3772 		nic->entries[i].entry = ((i - 1) * 8) + 1;
3773 		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3774 		nic->s2io_entries[i].arg = NULL;
3775 		nic->s2io_entries[i].in_use = 0;
3776 	}
3777 
3778 	rx_mat = readq(&bar0->rx_mat);
3779 	for (j = 0; j < nic->config.rx_ring_num; j++) {
3780 		rx_mat |= RX_MAT_SET(j, msix_indx);
3781 		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3782 		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3783 		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3784 		msix_indx += 8;
3785 	}
3786 	writeq(rx_mat, &bar0->rx_mat);
3787 	readq(&bar0->rx_mat);
3788 
3789 	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3790 				    nic->num_entries, nic->num_entries);
3791 	/* We fail init if error or we get less vectors than min required */
3792 	if (ret < 0) {
3793 		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3794 		kfree(nic->entries);
3795 		swstats->mem_freed += nic->num_entries *
3796 			sizeof(struct msix_entry);
3797 		kfree(nic->s2io_entries);
3798 		swstats->mem_freed += nic->num_entries *
3799 			sizeof(struct s2io_msix_entry);
3800 		nic->entries = NULL;
3801 		nic->s2io_entries = NULL;
3802 		return -ENOMEM;
3803 	}
3804 
3805 	/*
3806 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3807 	 * in the herc NIC. (Temp change, needs to be removed later)
3808 	 */
3809 	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3810 	msi_control |= 0x1; /* Enable MSI */
3811 	pci_write_config_word(nic->pdev, 0x42, msi_control);
3812 
3813 	return 0;
3814 }
3815 
3816 /* Handle software interrupt used during MSI(X) test */
3817 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3818 {
3819 	struct s2io_nic *sp = dev_id;
3820 
3821 	sp->msi_detected = 1;
3822 	wake_up(&sp->msi_wait);
3823 
3824 	return IRQ_HANDLED;
3825 }
3826 
3827 /* Test interrupt path by forcing a a software IRQ */
3828 static int s2io_test_msi(struct s2io_nic *sp)
3829 {
3830 	struct pci_dev *pdev = sp->pdev;
3831 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3832 	int err;
3833 	u64 val64, saved64;
3834 
3835 	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3836 			  sp->name, sp);
3837 	if (err) {
3838 		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3839 			  sp->dev->name, pci_name(pdev), pdev->irq);
3840 		return err;
3841 	}
3842 
3843 	init_waitqueue_head(&sp->msi_wait);
3844 	sp->msi_detected = 0;
3845 
3846 	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3847 	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3848 	val64 |= SCHED_INT_CTRL_TIMER_EN;
3849 	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3850 	writeq(val64, &bar0->scheduled_int_ctrl);
3851 
3852 	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3853 
3854 	if (!sp->msi_detected) {
3855 		/* MSI(X) test failed, go back to INTx mode */
3856 		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3857 			  "using MSI(X) during test\n",
3858 			  sp->dev->name, pci_name(pdev));
3859 
3860 		err = -EOPNOTSUPP;
3861 	}
3862 
3863 	free_irq(sp->entries[1].vector, sp);
3864 
3865 	writeq(saved64, &bar0->scheduled_int_ctrl);
3866 
3867 	return err;
3868 }
3869 
3870 static void remove_msix_isr(struct s2io_nic *sp)
3871 {
3872 	int i;
3873 	u16 msi_control;
3874 
3875 	for (i = 0; i < sp->num_entries; i++) {
3876 		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3877 			int vector = sp->entries[i].vector;
3878 			void *arg = sp->s2io_entries[i].arg;
3879 			free_irq(vector, arg);
3880 		}
3881 	}
3882 
3883 	kfree(sp->entries);
3884 	kfree(sp->s2io_entries);
3885 	sp->entries = NULL;
3886 	sp->s2io_entries = NULL;
3887 
3888 	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3889 	msi_control &= 0xFFFE; /* Disable MSI */
3890 	pci_write_config_word(sp->pdev, 0x42, msi_control);
3891 
3892 	pci_disable_msix(sp->pdev);
3893 }
3894 
3895 static void remove_inta_isr(struct s2io_nic *sp)
3896 {
3897 	free_irq(sp->pdev->irq, sp->dev);
3898 }
3899 
3900 /* ********************************************************* *
3901  * Functions defined below concern the OS part of the driver *
3902  * ********************************************************* */
3903 
3904 /**
3905  *  s2io_open - open entry point of the driver
3906  *  @dev : pointer to the device structure.
3907  *  Description:
3908  *  This function is the open entry point of the driver. It mainly calls a
3909  *  function to allocate Rx buffers and inserts them into the buffer
3910  *  descriptors and then enables the Rx part of the NIC.
3911  *  Return value:
3912  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3913  *   file on failure.
3914  */
3915 
3916 static int s2io_open(struct net_device *dev)
3917 {
3918 	struct s2io_nic *sp = netdev_priv(dev);
3919 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3920 	int err = 0;
3921 
3922 	/*
3923 	 * Make sure you have link off by default every time
3924 	 * Nic is initialized
3925 	 */
3926 	netif_carrier_off(dev);
3927 	sp->last_link_state = 0;
3928 
3929 	/* Initialize H/W and enable interrupts */
3930 	err = s2io_card_up(sp);
3931 	if (err) {
3932 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3933 			  dev->name);
3934 		goto hw_init_failed;
3935 	}
3936 
3937 	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3938 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3939 		s2io_card_down(sp);
3940 		err = -ENODEV;
3941 		goto hw_init_failed;
3942 	}
3943 	s2io_start_all_tx_queue(sp);
3944 	return 0;
3945 
3946 hw_init_failed:
3947 	if (sp->config.intr_type == MSI_X) {
3948 		if (sp->entries) {
3949 			kfree(sp->entries);
3950 			swstats->mem_freed += sp->num_entries *
3951 				sizeof(struct msix_entry);
3952 		}
3953 		if (sp->s2io_entries) {
3954 			kfree(sp->s2io_entries);
3955 			swstats->mem_freed += sp->num_entries *
3956 				sizeof(struct s2io_msix_entry);
3957 		}
3958 	}
3959 	return err;
3960 }
3961 
3962 /**
3963  *  s2io_close -close entry point of the driver
3964  *  @dev : device pointer.
3965  *  Description:
3966  *  This is the stop entry point of the driver. It needs to undo exactly
3967  *  whatever was done by the open entry point,thus it's usually referred to
3968  *  as the close function.Among other things this function mainly stops the
3969  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3970  *  Return value:
3971  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3972  *  file on failure.
3973  */
3974 
3975 static int s2io_close(struct net_device *dev)
3976 {
3977 	struct s2io_nic *sp = netdev_priv(dev);
3978 	struct config_param *config = &sp->config;
3979 	u64 tmp64;
3980 	int offset;
3981 
3982 	/* Return if the device is already closed               *
3983 	 *  Can happen when s2io_card_up failed in change_mtu    *
3984 	 */
3985 	if (!is_s2io_card_up(sp))
3986 		return 0;
3987 
3988 	s2io_stop_all_tx_queue(sp);
3989 	/* delete all populated mac entries */
3990 	for (offset = 1; offset < config->max_mc_addr; offset++) {
3991 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3992 		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3993 			do_s2io_delete_unicast_mc(sp, tmp64);
3994 	}
3995 
3996 	s2io_card_down(sp);
3997 
3998 	return 0;
3999 }
4000 
4001 /**
4002  *  s2io_xmit - Tx entry point of te driver
4003  *  @skb : the socket buffer containing the Tx data.
4004  *  @dev : device pointer.
4005  *  Description :
4006  *  This function is the Tx entry point of the driver. S2IO NIC supports
4007  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4008  *  NOTE: when device can't queue the pkt,just the trans_start variable will
4009  *  not be upadted.
4010  *  Return value:
4011  *  0 on success & 1 on failure.
4012  */
4013 
4014 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4015 {
4016 	struct s2io_nic *sp = netdev_priv(dev);
4017 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4018 	register u64 val64;
4019 	struct TxD *txdp;
4020 	struct TxFIFO_element __iomem *tx_fifo;
4021 	unsigned long flags = 0;
4022 	u16 vlan_tag = 0;
4023 	struct fifo_info *fifo = NULL;
4024 	int do_spin_lock = 1;
4025 	int offload_type;
4026 	int enable_per_list_interrupt = 0;
4027 	struct config_param *config = &sp->config;
4028 	struct mac_info *mac_control = &sp->mac_control;
4029 	struct stat_block *stats = mac_control->stats_info;
4030 	struct swStat *swstats = &stats->sw_stat;
4031 
4032 	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4033 
4034 	if (unlikely(skb->len <= 0)) {
4035 		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4036 		dev_kfree_skb_any(skb);
4037 		return NETDEV_TX_OK;
4038 	}
4039 
4040 	if (!is_s2io_card_up(sp)) {
4041 		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4042 			  dev->name);
4043 		dev_kfree_skb_any(skb);
4044 		return NETDEV_TX_OK;
4045 	}
4046 
4047 	queue = 0;
4048 	if (skb_vlan_tag_present(skb))
4049 		vlan_tag = skb_vlan_tag_get(skb);
4050 	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4051 		if (skb->protocol == htons(ETH_P_IP)) {
4052 			struct iphdr *ip;
4053 			struct tcphdr *th;
4054 			ip = ip_hdr(skb);
4055 
4056 			if (!ip_is_fragment(ip)) {
4057 				th = (struct tcphdr *)(((unsigned char *)ip) +
4058 						       ip->ihl*4);
4059 
4060 				if (ip->protocol == IPPROTO_TCP) {
4061 					queue_len = sp->total_tcp_fifos;
4062 					queue = (ntohs(th->source) +
4063 						 ntohs(th->dest)) &
4064 						sp->fifo_selector[queue_len - 1];
4065 					if (queue >= queue_len)
4066 						queue = queue_len - 1;
4067 				} else if (ip->protocol == IPPROTO_UDP) {
4068 					queue_len = sp->total_udp_fifos;
4069 					queue = (ntohs(th->source) +
4070 						 ntohs(th->dest)) &
4071 						sp->fifo_selector[queue_len - 1];
4072 					if (queue >= queue_len)
4073 						queue = queue_len - 1;
4074 					queue += sp->udp_fifo_idx;
4075 					if (skb->len > 1024)
4076 						enable_per_list_interrupt = 1;
4077 					do_spin_lock = 0;
4078 				}
4079 			}
4080 		}
4081 	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4082 		/* get fifo number based on skb->priority value */
4083 		queue = config->fifo_mapping
4084 			[skb->priority & (MAX_TX_FIFOS - 1)];
4085 	fifo = &mac_control->fifos[queue];
4086 
4087 	if (do_spin_lock)
4088 		spin_lock_irqsave(&fifo->tx_lock, flags);
4089 	else {
4090 		if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4091 			return NETDEV_TX_LOCKED;
4092 	}
4093 
4094 	if (sp->config.multiq) {
4095 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4096 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4097 			return NETDEV_TX_BUSY;
4098 		}
4099 	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4100 		if (netif_queue_stopped(dev)) {
4101 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4102 			return NETDEV_TX_BUSY;
4103 		}
4104 	}
4105 
4106 	put_off = (u16)fifo->tx_curr_put_info.offset;
4107 	get_off = (u16)fifo->tx_curr_get_info.offset;
4108 	txdp = fifo->list_info[put_off].list_virt_addr;
4109 
4110 	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4111 	/* Avoid "put" pointer going beyond "get" pointer */
4112 	if (txdp->Host_Control ||
4113 	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4114 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4115 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4116 		dev_kfree_skb_any(skb);
4117 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4118 		return NETDEV_TX_OK;
4119 	}
4120 
4121 	offload_type = s2io_offload_type(skb);
4122 	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4123 		txdp->Control_1 |= TXD_TCP_LSO_EN;
4124 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4125 	}
4126 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4127 		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4128 				    TXD_TX_CKO_TCP_EN |
4129 				    TXD_TX_CKO_UDP_EN);
4130 	}
4131 	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4132 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4133 	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4134 	if (enable_per_list_interrupt)
4135 		if (put_off & (queue_len >> 5))
4136 			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4137 	if (vlan_tag) {
4138 		txdp->Control_2 |= TXD_VLAN_ENABLE;
4139 		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4140 	}
4141 
4142 	frg_len = skb_headlen(skb);
4143 	if (offload_type == SKB_GSO_UDP) {
4144 		int ufo_size;
4145 
4146 		ufo_size = s2io_udp_mss(skb);
4147 		ufo_size &= ~7;
4148 		txdp->Control_1 |= TXD_UFO_EN;
4149 		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4150 		txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4151 #ifdef __BIG_ENDIAN
4152 		/* both variants do cpu_to_be64(be32_to_cpu(...)) */
4153 		fifo->ufo_in_band_v[put_off] =
4154 			(__force u64)skb_shinfo(skb)->ip6_frag_id;
4155 #else
4156 		fifo->ufo_in_band_v[put_off] =
4157 			(__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4158 #endif
4159 		txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4160 		txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4161 						      fifo->ufo_in_band_v,
4162 						      sizeof(u64),
4163 						      PCI_DMA_TODEVICE);
4164 		if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4165 			goto pci_map_failed;
4166 		txdp++;
4167 	}
4168 
4169 	txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4170 					      frg_len, PCI_DMA_TODEVICE);
4171 	if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4172 		goto pci_map_failed;
4173 
4174 	txdp->Host_Control = (unsigned long)skb;
4175 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4176 	if (offload_type == SKB_GSO_UDP)
4177 		txdp->Control_1 |= TXD_UFO_EN;
4178 
4179 	frg_cnt = skb_shinfo(skb)->nr_frags;
4180 	/* For fragmented SKB. */
4181 	for (i = 0; i < frg_cnt; i++) {
4182 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4183 		/* A '0' length fragment will be ignored */
4184 		if (!skb_frag_size(frag))
4185 			continue;
4186 		txdp++;
4187 		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4188 							     frag, 0,
4189 							     skb_frag_size(frag),
4190 							     DMA_TO_DEVICE);
4191 		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4192 		if (offload_type == SKB_GSO_UDP)
4193 			txdp->Control_1 |= TXD_UFO_EN;
4194 	}
4195 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4196 
4197 	if (offload_type == SKB_GSO_UDP)
4198 		frg_cnt++; /* as Txd0 was used for inband header */
4199 
4200 	tx_fifo = mac_control->tx_FIFO_start[queue];
4201 	val64 = fifo->list_info[put_off].list_phy_addr;
4202 	writeq(val64, &tx_fifo->TxDL_Pointer);
4203 
4204 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4205 		 TX_FIFO_LAST_LIST);
4206 	if (offload_type)
4207 		val64 |= TX_FIFO_SPECIAL_FUNC;
4208 
4209 	writeq(val64, &tx_fifo->List_Control);
4210 
4211 	mmiowb();
4212 
4213 	put_off++;
4214 	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4215 		put_off = 0;
4216 	fifo->tx_curr_put_info.offset = put_off;
4217 
4218 	/* Avoid "put" pointer going beyond "get" pointer */
4219 	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4220 		swstats->fifo_full_cnt++;
4221 		DBG_PRINT(TX_DBG,
4222 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4223 			  put_off, get_off);
4224 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4225 	}
4226 	swstats->mem_allocated += skb->truesize;
4227 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4228 
4229 	if (sp->config.intr_type == MSI_X)
4230 		tx_intr_handler(fifo);
4231 
4232 	return NETDEV_TX_OK;
4233 
4234 pci_map_failed:
4235 	swstats->pci_map_fail_cnt++;
4236 	s2io_stop_tx_queue(sp, fifo->fifo_no);
4237 	swstats->mem_freed += skb->truesize;
4238 	dev_kfree_skb_any(skb);
4239 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4240 	return NETDEV_TX_OK;
4241 }
4242 
4243 static void
4244 s2io_alarm_handle(unsigned long data)
4245 {
4246 	struct s2io_nic *sp = (struct s2io_nic *)data;
4247 	struct net_device *dev = sp->dev;
4248 
4249 	s2io_handle_errors(dev);
4250 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4251 }
4252 
4253 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4254 {
4255 	struct ring_info *ring = (struct ring_info *)dev_id;
4256 	struct s2io_nic *sp = ring->nic;
4257 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4258 
4259 	if (unlikely(!is_s2io_card_up(sp)))
4260 		return IRQ_HANDLED;
4261 
4262 	if (sp->config.napi) {
4263 		u8 __iomem *addr = NULL;
4264 		u8 val8 = 0;
4265 
4266 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4267 		addr += (7 - ring->ring_no);
4268 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4269 		writeb(val8, addr);
4270 		val8 = readb(addr);
4271 		napi_schedule(&ring->napi);
4272 	} else {
4273 		rx_intr_handler(ring, 0);
4274 		s2io_chk_rx_buffers(sp, ring);
4275 	}
4276 
4277 	return IRQ_HANDLED;
4278 }
4279 
4280 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4281 {
4282 	int i;
4283 	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4284 	struct s2io_nic *sp = fifos->nic;
4285 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4286 	struct config_param *config  = &sp->config;
4287 	u64 reason;
4288 
4289 	if (unlikely(!is_s2io_card_up(sp)))
4290 		return IRQ_NONE;
4291 
4292 	reason = readq(&bar0->general_int_status);
4293 	if (unlikely(reason == S2IO_MINUS_ONE))
4294 		/* Nothing much can be done. Get out */
4295 		return IRQ_HANDLED;
4296 
4297 	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4298 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4299 
4300 		if (reason & GEN_INTR_TXPIC)
4301 			s2io_txpic_intr_handle(sp);
4302 
4303 		if (reason & GEN_INTR_TXTRAFFIC)
4304 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4305 
4306 		for (i = 0; i < config->tx_fifo_num; i++)
4307 			tx_intr_handler(&fifos[i]);
4308 
4309 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4310 		readl(&bar0->general_int_status);
4311 		return IRQ_HANDLED;
4312 	}
4313 	/* The interrupt was not raised by us */
4314 	return IRQ_NONE;
4315 }
4316 
4317 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4318 {
4319 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4320 	u64 val64;
4321 
4322 	val64 = readq(&bar0->pic_int_status);
4323 	if (val64 & PIC_INT_GPIO) {
4324 		val64 = readq(&bar0->gpio_int_reg);
4325 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4326 		    (val64 & GPIO_INT_REG_LINK_UP)) {
4327 			/*
4328 			 * This is unstable state so clear both up/down
4329 			 * interrupt and adapter to re-evaluate the link state.
4330 			 */
4331 			val64 |= GPIO_INT_REG_LINK_DOWN;
4332 			val64 |= GPIO_INT_REG_LINK_UP;
4333 			writeq(val64, &bar0->gpio_int_reg);
4334 			val64 = readq(&bar0->gpio_int_mask);
4335 			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4336 				   GPIO_INT_MASK_LINK_DOWN);
4337 			writeq(val64, &bar0->gpio_int_mask);
4338 		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4339 			val64 = readq(&bar0->adapter_status);
4340 			/* Enable Adapter */
4341 			val64 = readq(&bar0->adapter_control);
4342 			val64 |= ADAPTER_CNTL_EN;
4343 			writeq(val64, &bar0->adapter_control);
4344 			val64 |= ADAPTER_LED_ON;
4345 			writeq(val64, &bar0->adapter_control);
4346 			if (!sp->device_enabled_once)
4347 				sp->device_enabled_once = 1;
4348 
4349 			s2io_link(sp, LINK_UP);
4350 			/*
4351 			 * unmask link down interrupt and mask link-up
4352 			 * intr
4353 			 */
4354 			val64 = readq(&bar0->gpio_int_mask);
4355 			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4356 			val64 |= GPIO_INT_MASK_LINK_UP;
4357 			writeq(val64, &bar0->gpio_int_mask);
4358 
4359 		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4360 			val64 = readq(&bar0->adapter_status);
4361 			s2io_link(sp, LINK_DOWN);
4362 			/* Link is down so unmaks link up interrupt */
4363 			val64 = readq(&bar0->gpio_int_mask);
4364 			val64 &= ~GPIO_INT_MASK_LINK_UP;
4365 			val64 |= GPIO_INT_MASK_LINK_DOWN;
4366 			writeq(val64, &bar0->gpio_int_mask);
4367 
4368 			/* turn off LED */
4369 			val64 = readq(&bar0->adapter_control);
4370 			val64 = val64 & (~ADAPTER_LED_ON);
4371 			writeq(val64, &bar0->adapter_control);
4372 		}
4373 	}
4374 	val64 = readq(&bar0->gpio_int_mask);
4375 }
4376 
4377 /**
4378  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4379  *  @value: alarm bits
4380  *  @addr: address value
4381  *  @cnt: counter variable
4382  *  Description: Check for alarm and increment the counter
4383  *  Return Value:
4384  *  1 - if alarm bit set
4385  *  0 - if alarm bit is not set
4386  */
4387 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4388 				 unsigned long long *cnt)
4389 {
4390 	u64 val64;
4391 	val64 = readq(addr);
4392 	if (val64 & value) {
4393 		writeq(val64, addr);
4394 		(*cnt)++;
4395 		return 1;
4396 	}
4397 	return 0;
4398 
4399 }
4400 
4401 /**
4402  *  s2io_handle_errors - Xframe error indication handler
4403  *  @nic: device private variable
4404  *  Description: Handle alarms such as loss of link, single or
4405  *  double ECC errors, critical and serious errors.
4406  *  Return Value:
4407  *  NONE
4408  */
4409 static void s2io_handle_errors(void *dev_id)
4410 {
4411 	struct net_device *dev = (struct net_device *)dev_id;
4412 	struct s2io_nic *sp = netdev_priv(dev);
4413 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4414 	u64 temp64 = 0, val64 = 0;
4415 	int i = 0;
4416 
4417 	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4418 	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4419 
4420 	if (!is_s2io_card_up(sp))
4421 		return;
4422 
4423 	if (pci_channel_offline(sp->pdev))
4424 		return;
4425 
4426 	memset(&sw_stat->ring_full_cnt, 0,
4427 	       sizeof(sw_stat->ring_full_cnt));
4428 
4429 	/* Handling the XPAK counters update */
4430 	if (stats->xpak_timer_count < 72000) {
4431 		/* waiting for an hour */
4432 		stats->xpak_timer_count++;
4433 	} else {
4434 		s2io_updt_xpak_counter(dev);
4435 		/* reset the count to zero */
4436 		stats->xpak_timer_count = 0;
4437 	}
4438 
4439 	/* Handling link status change error Intr */
4440 	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4441 		val64 = readq(&bar0->mac_rmac_err_reg);
4442 		writeq(val64, &bar0->mac_rmac_err_reg);
4443 		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4444 			schedule_work(&sp->set_link_task);
4445 	}
4446 
4447 	/* In case of a serious error, the device will be Reset. */
4448 	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4449 				  &sw_stat->serious_err_cnt))
4450 		goto reset;
4451 
4452 	/* Check for data parity error */
4453 	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4454 				  &sw_stat->parity_err_cnt))
4455 		goto reset;
4456 
4457 	/* Check for ring full counter */
4458 	if (sp->device_type == XFRAME_II_DEVICE) {
4459 		val64 = readq(&bar0->ring_bump_counter1);
4460 		for (i = 0; i < 4; i++) {
4461 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4462 			temp64 >>= 64 - ((i+1)*16);
4463 			sw_stat->ring_full_cnt[i] += temp64;
4464 		}
4465 
4466 		val64 = readq(&bar0->ring_bump_counter2);
4467 		for (i = 0; i < 4; i++) {
4468 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4469 			temp64 >>= 64 - ((i+1)*16);
4470 			sw_stat->ring_full_cnt[i+4] += temp64;
4471 		}
4472 	}
4473 
4474 	val64 = readq(&bar0->txdma_int_status);
4475 	/*check for pfc_err*/
4476 	if (val64 & TXDMA_PFC_INT) {
4477 		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4478 					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4479 					  PFC_PCIX_ERR,
4480 					  &bar0->pfc_err_reg,
4481 					  &sw_stat->pfc_err_cnt))
4482 			goto reset;
4483 		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4484 				      &bar0->pfc_err_reg,
4485 				      &sw_stat->pfc_err_cnt);
4486 	}
4487 
4488 	/*check for tda_err*/
4489 	if (val64 & TXDMA_TDA_INT) {
4490 		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4491 					  TDA_SM0_ERR_ALARM |
4492 					  TDA_SM1_ERR_ALARM,
4493 					  &bar0->tda_err_reg,
4494 					  &sw_stat->tda_err_cnt))
4495 			goto reset;
4496 		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4497 				      &bar0->tda_err_reg,
4498 				      &sw_stat->tda_err_cnt);
4499 	}
4500 	/*check for pcc_err*/
4501 	if (val64 & TXDMA_PCC_INT) {
4502 		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4503 					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4504 					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4505 					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4506 					  PCC_TXB_ECC_DB_ERR,
4507 					  &bar0->pcc_err_reg,
4508 					  &sw_stat->pcc_err_cnt))
4509 			goto reset;
4510 		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4511 				      &bar0->pcc_err_reg,
4512 				      &sw_stat->pcc_err_cnt);
4513 	}
4514 
4515 	/*check for tti_err*/
4516 	if (val64 & TXDMA_TTI_INT) {
4517 		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4518 					  &bar0->tti_err_reg,
4519 					  &sw_stat->tti_err_cnt))
4520 			goto reset;
4521 		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4522 				      &bar0->tti_err_reg,
4523 				      &sw_stat->tti_err_cnt);
4524 	}
4525 
4526 	/*check for lso_err*/
4527 	if (val64 & TXDMA_LSO_INT) {
4528 		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4529 					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4530 					  &bar0->lso_err_reg,
4531 					  &sw_stat->lso_err_cnt))
4532 			goto reset;
4533 		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4534 				      &bar0->lso_err_reg,
4535 				      &sw_stat->lso_err_cnt);
4536 	}
4537 
4538 	/*check for tpa_err*/
4539 	if (val64 & TXDMA_TPA_INT) {
4540 		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4541 					  &bar0->tpa_err_reg,
4542 					  &sw_stat->tpa_err_cnt))
4543 			goto reset;
4544 		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4545 				      &bar0->tpa_err_reg,
4546 				      &sw_stat->tpa_err_cnt);
4547 	}
4548 
4549 	/*check for sm_err*/
4550 	if (val64 & TXDMA_SM_INT) {
4551 		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4552 					  &bar0->sm_err_reg,
4553 					  &sw_stat->sm_err_cnt))
4554 			goto reset;
4555 	}
4556 
4557 	val64 = readq(&bar0->mac_int_status);
4558 	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4559 		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4560 					  &bar0->mac_tmac_err_reg,
4561 					  &sw_stat->mac_tmac_err_cnt))
4562 			goto reset;
4563 		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4564 				      TMAC_DESC_ECC_SG_ERR |
4565 				      TMAC_DESC_ECC_DB_ERR,
4566 				      &bar0->mac_tmac_err_reg,
4567 				      &sw_stat->mac_tmac_err_cnt);
4568 	}
4569 
4570 	val64 = readq(&bar0->xgxs_int_status);
4571 	if (val64 & XGXS_INT_STATUS_TXGXS) {
4572 		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4573 					  &bar0->xgxs_txgxs_err_reg,
4574 					  &sw_stat->xgxs_txgxs_err_cnt))
4575 			goto reset;
4576 		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4577 				      &bar0->xgxs_txgxs_err_reg,
4578 				      &sw_stat->xgxs_txgxs_err_cnt);
4579 	}
4580 
4581 	val64 = readq(&bar0->rxdma_int_status);
4582 	if (val64 & RXDMA_INT_RC_INT_M) {
4583 		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4584 					  RC_FTC_ECC_DB_ERR |
4585 					  RC_PRCn_SM_ERR_ALARM |
4586 					  RC_FTC_SM_ERR_ALARM,
4587 					  &bar0->rc_err_reg,
4588 					  &sw_stat->rc_err_cnt))
4589 			goto reset;
4590 		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4591 				      RC_FTC_ECC_SG_ERR |
4592 				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4593 				      &sw_stat->rc_err_cnt);
4594 		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4595 					  PRC_PCI_AB_WR_Rn |
4596 					  PRC_PCI_AB_F_WR_Rn,
4597 					  &bar0->prc_pcix_err_reg,
4598 					  &sw_stat->prc_pcix_err_cnt))
4599 			goto reset;
4600 		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4601 				      PRC_PCI_DP_WR_Rn |
4602 				      PRC_PCI_DP_F_WR_Rn,
4603 				      &bar0->prc_pcix_err_reg,
4604 				      &sw_stat->prc_pcix_err_cnt);
4605 	}
4606 
4607 	if (val64 & RXDMA_INT_RPA_INT_M) {
4608 		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4609 					  &bar0->rpa_err_reg,
4610 					  &sw_stat->rpa_err_cnt))
4611 			goto reset;
4612 		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4613 				      &bar0->rpa_err_reg,
4614 				      &sw_stat->rpa_err_cnt);
4615 	}
4616 
4617 	if (val64 & RXDMA_INT_RDA_INT_M) {
4618 		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4619 					  RDA_FRM_ECC_DB_N_AERR |
4620 					  RDA_SM1_ERR_ALARM |
4621 					  RDA_SM0_ERR_ALARM |
4622 					  RDA_RXD_ECC_DB_SERR,
4623 					  &bar0->rda_err_reg,
4624 					  &sw_stat->rda_err_cnt))
4625 			goto reset;
4626 		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4627 				      RDA_FRM_ECC_SG_ERR |
4628 				      RDA_MISC_ERR |
4629 				      RDA_PCIX_ERR,
4630 				      &bar0->rda_err_reg,
4631 				      &sw_stat->rda_err_cnt);
4632 	}
4633 
4634 	if (val64 & RXDMA_INT_RTI_INT_M) {
4635 		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4636 					  &bar0->rti_err_reg,
4637 					  &sw_stat->rti_err_cnt))
4638 			goto reset;
4639 		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4640 				      &bar0->rti_err_reg,
4641 				      &sw_stat->rti_err_cnt);
4642 	}
4643 
4644 	val64 = readq(&bar0->mac_int_status);
4645 	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4646 		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4647 					  &bar0->mac_rmac_err_reg,
4648 					  &sw_stat->mac_rmac_err_cnt))
4649 			goto reset;
4650 		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4651 				      RMAC_SINGLE_ECC_ERR |
4652 				      RMAC_DOUBLE_ECC_ERR,
4653 				      &bar0->mac_rmac_err_reg,
4654 				      &sw_stat->mac_rmac_err_cnt);
4655 	}
4656 
4657 	val64 = readq(&bar0->xgxs_int_status);
4658 	if (val64 & XGXS_INT_STATUS_RXGXS) {
4659 		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4660 					  &bar0->xgxs_rxgxs_err_reg,
4661 					  &sw_stat->xgxs_rxgxs_err_cnt))
4662 			goto reset;
4663 	}
4664 
4665 	val64 = readq(&bar0->mc_int_status);
4666 	if (val64 & MC_INT_STATUS_MC_INT) {
4667 		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4668 					  &bar0->mc_err_reg,
4669 					  &sw_stat->mc_err_cnt))
4670 			goto reset;
4671 
4672 		/* Handling Ecc errors */
4673 		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4674 			writeq(val64, &bar0->mc_err_reg);
4675 			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4676 				sw_stat->double_ecc_errs++;
4677 				if (sp->device_type != XFRAME_II_DEVICE) {
4678 					/*
4679 					 * Reset XframeI only if critical error
4680 					 */
4681 					if (val64 &
4682 					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4683 					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4684 						goto reset;
4685 				}
4686 			} else
4687 				sw_stat->single_ecc_errs++;
4688 		}
4689 	}
4690 	return;
4691 
4692 reset:
4693 	s2io_stop_all_tx_queue(sp);
4694 	schedule_work(&sp->rst_timer_task);
4695 	sw_stat->soft_reset_cnt++;
4696 }
4697 
4698 /**
4699  *  s2io_isr - ISR handler of the device .
4700  *  @irq: the irq of the device.
4701  *  @dev_id: a void pointer to the dev structure of the NIC.
4702  *  Description:  This function is the ISR handler of the device. It
4703  *  identifies the reason for the interrupt and calls the relevant
4704  *  service routines. As a contongency measure, this ISR allocates the
4705  *  recv buffers, if their numbers are below the panic value which is
4706  *  presently set to 25% of the original number of rcv buffers allocated.
4707  *  Return value:
4708  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4709  *   IRQ_NONE: will be returned if interrupt is not from our device
4710  */
4711 static irqreturn_t s2io_isr(int irq, void *dev_id)
4712 {
4713 	struct net_device *dev = (struct net_device *)dev_id;
4714 	struct s2io_nic *sp = netdev_priv(dev);
4715 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4716 	int i;
4717 	u64 reason = 0;
4718 	struct mac_info *mac_control;
4719 	struct config_param *config;
4720 
4721 	/* Pretend we handled any irq's from a disconnected card */
4722 	if (pci_channel_offline(sp->pdev))
4723 		return IRQ_NONE;
4724 
4725 	if (!is_s2io_card_up(sp))
4726 		return IRQ_NONE;
4727 
4728 	config = &sp->config;
4729 	mac_control = &sp->mac_control;
4730 
4731 	/*
4732 	 * Identify the cause for interrupt and call the appropriate
4733 	 * interrupt handler. Causes for the interrupt could be;
4734 	 * 1. Rx of packet.
4735 	 * 2. Tx complete.
4736 	 * 3. Link down.
4737 	 */
4738 	reason = readq(&bar0->general_int_status);
4739 
4740 	if (unlikely(reason == S2IO_MINUS_ONE))
4741 		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4742 
4743 	if (reason &
4744 	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4745 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4746 
4747 		if (config->napi) {
4748 			if (reason & GEN_INTR_RXTRAFFIC) {
4749 				napi_schedule(&sp->napi);
4750 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4751 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4752 				readl(&bar0->rx_traffic_int);
4753 			}
4754 		} else {
4755 			/*
4756 			 * rx_traffic_int reg is an R1 register, writing all 1's
4757 			 * will ensure that the actual interrupt causing bit
4758 			 * get's cleared and hence a read can be avoided.
4759 			 */
4760 			if (reason & GEN_INTR_RXTRAFFIC)
4761 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4762 
4763 			for (i = 0; i < config->rx_ring_num; i++) {
4764 				struct ring_info *ring = &mac_control->rings[i];
4765 
4766 				rx_intr_handler(ring, 0);
4767 			}
4768 		}
4769 
4770 		/*
4771 		 * tx_traffic_int reg is an R1 register, writing all 1's
4772 		 * will ensure that the actual interrupt causing bit get's
4773 		 * cleared and hence a read can be avoided.
4774 		 */
4775 		if (reason & GEN_INTR_TXTRAFFIC)
4776 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4777 
4778 		for (i = 0; i < config->tx_fifo_num; i++)
4779 			tx_intr_handler(&mac_control->fifos[i]);
4780 
4781 		if (reason & GEN_INTR_TXPIC)
4782 			s2io_txpic_intr_handle(sp);
4783 
4784 		/*
4785 		 * Reallocate the buffers from the interrupt handler itself.
4786 		 */
4787 		if (!config->napi) {
4788 			for (i = 0; i < config->rx_ring_num; i++) {
4789 				struct ring_info *ring = &mac_control->rings[i];
4790 
4791 				s2io_chk_rx_buffers(sp, ring);
4792 			}
4793 		}
4794 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4795 		readl(&bar0->general_int_status);
4796 
4797 		return IRQ_HANDLED;
4798 
4799 	} else if (!reason) {
4800 		/* The interrupt was not raised by us */
4801 		return IRQ_NONE;
4802 	}
4803 
4804 	return IRQ_HANDLED;
4805 }
4806 
4807 /**
4808  * s2io_updt_stats -
4809  */
4810 static void s2io_updt_stats(struct s2io_nic *sp)
4811 {
4812 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4813 	u64 val64;
4814 	int cnt = 0;
4815 
4816 	if (is_s2io_card_up(sp)) {
4817 		/* Apprx 30us on a 133 MHz bus */
4818 		val64 = SET_UPDT_CLICKS(10) |
4819 			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4820 		writeq(val64, &bar0->stat_cfg);
4821 		do {
4822 			udelay(100);
4823 			val64 = readq(&bar0->stat_cfg);
4824 			if (!(val64 & s2BIT(0)))
4825 				break;
4826 			cnt++;
4827 			if (cnt == 5)
4828 				break; /* Updt failed */
4829 		} while (1);
4830 	}
4831 }
4832 
4833 /**
4834  *  s2io_get_stats - Updates the device statistics structure.
4835  *  @dev : pointer to the device structure.
4836  *  Description:
4837  *  This function updates the device statistics structure in the s2io_nic
4838  *  structure and returns a pointer to the same.
4839  *  Return value:
4840  *  pointer to the updated net_device_stats structure.
4841  */
4842 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4843 {
4844 	struct s2io_nic *sp = netdev_priv(dev);
4845 	struct mac_info *mac_control = &sp->mac_control;
4846 	struct stat_block *stats = mac_control->stats_info;
4847 	u64 delta;
4848 
4849 	/* Configure Stats for immediate updt */
4850 	s2io_updt_stats(sp);
4851 
4852 	/* A device reset will cause the on-adapter statistics to be zero'ed.
4853 	 * This can be done while running by changing the MTU.  To prevent the
4854 	 * system from having the stats zero'ed, the driver keeps a copy of the
4855 	 * last update to the system (which is also zero'ed on reset).  This
4856 	 * enables the driver to accurately know the delta between the last
4857 	 * update and the current update.
4858 	 */
4859 	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4860 		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4861 	sp->stats.rx_packets += delta;
4862 	dev->stats.rx_packets += delta;
4863 
4864 	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4865 		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4866 	sp->stats.tx_packets += delta;
4867 	dev->stats.tx_packets += delta;
4868 
4869 	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4870 		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4871 	sp->stats.rx_bytes += delta;
4872 	dev->stats.rx_bytes += delta;
4873 
4874 	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4875 		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4876 	sp->stats.tx_bytes += delta;
4877 	dev->stats.tx_bytes += delta;
4878 
4879 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4880 	sp->stats.rx_errors += delta;
4881 	dev->stats.rx_errors += delta;
4882 
4883 	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4884 		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4885 	sp->stats.tx_errors += delta;
4886 	dev->stats.tx_errors += delta;
4887 
4888 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4889 	sp->stats.rx_dropped += delta;
4890 	dev->stats.rx_dropped += delta;
4891 
4892 	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4893 	sp->stats.tx_dropped += delta;
4894 	dev->stats.tx_dropped += delta;
4895 
4896 	/* The adapter MAC interprets pause frames as multicast packets, but
4897 	 * does not pass them up.  This erroneously increases the multicast
4898 	 * packet count and needs to be deducted when the multicast frame count
4899 	 * is queried.
4900 	 */
4901 	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4902 		le32_to_cpu(stats->rmac_vld_mcst_frms);
4903 	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4904 	delta -= sp->stats.multicast;
4905 	sp->stats.multicast += delta;
4906 	dev->stats.multicast += delta;
4907 
4908 	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4909 		le32_to_cpu(stats->rmac_usized_frms)) +
4910 		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4911 	sp->stats.rx_length_errors += delta;
4912 	dev->stats.rx_length_errors += delta;
4913 
4914 	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4915 	sp->stats.rx_crc_errors += delta;
4916 	dev->stats.rx_crc_errors += delta;
4917 
4918 	return &dev->stats;
4919 }
4920 
4921 /**
4922  *  s2io_set_multicast - entry point for multicast address enable/disable.
4923  *  @dev : pointer to the device structure
4924  *  Description:
4925  *  This function is a driver entry point which gets called by the kernel
4926  *  whenever multicast addresses must be enabled/disabled. This also gets
4927  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4928  *  determine, if multicast address must be enabled or if promiscuous mode
4929  *  is to be disabled etc.
4930  *  Return value:
4931  *  void.
4932  */
4933 
4934 static void s2io_set_multicast(struct net_device *dev)
4935 {
4936 	int i, j, prev_cnt;
4937 	struct netdev_hw_addr *ha;
4938 	struct s2io_nic *sp = netdev_priv(dev);
4939 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4940 	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4941 		0xfeffffffffffULL;
4942 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4943 	void __iomem *add;
4944 	struct config_param *config = &sp->config;
4945 
4946 	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4947 		/*  Enable all Multicast addresses */
4948 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4949 		       &bar0->rmac_addr_data0_mem);
4950 		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4951 		       &bar0->rmac_addr_data1_mem);
4952 		val64 = RMAC_ADDR_CMD_MEM_WE |
4953 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4954 			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4955 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4956 		/* Wait till command completes */
4957 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4958 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4959 				      S2IO_BIT_RESET);
4960 
4961 		sp->m_cast_flg = 1;
4962 		sp->all_multi_pos = config->max_mc_addr - 1;
4963 	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4964 		/*  Disable all Multicast addresses */
4965 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4966 		       &bar0->rmac_addr_data0_mem);
4967 		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4968 		       &bar0->rmac_addr_data1_mem);
4969 		val64 = RMAC_ADDR_CMD_MEM_WE |
4970 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4971 			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4972 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4973 		/* Wait till command completes */
4974 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4975 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4976 				      S2IO_BIT_RESET);
4977 
4978 		sp->m_cast_flg = 0;
4979 		sp->all_multi_pos = 0;
4980 	}
4981 
4982 	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4983 		/*  Put the NIC into promiscuous mode */
4984 		add = &bar0->mac_cfg;
4985 		val64 = readq(&bar0->mac_cfg);
4986 		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4987 
4988 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4989 		writel((u32)val64, add);
4990 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4991 		writel((u32) (val64 >> 32), (add + 4));
4992 
4993 		if (vlan_tag_strip != 1) {
4994 			val64 = readq(&bar0->rx_pa_cfg);
4995 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4996 			writeq(val64, &bar0->rx_pa_cfg);
4997 			sp->vlan_strip_flag = 0;
4998 		}
4999 
5000 		val64 = readq(&bar0->mac_cfg);
5001 		sp->promisc_flg = 1;
5002 		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5003 			  dev->name);
5004 	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5005 		/*  Remove the NIC from promiscuous mode */
5006 		add = &bar0->mac_cfg;
5007 		val64 = readq(&bar0->mac_cfg);
5008 		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5009 
5010 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5011 		writel((u32)val64, add);
5012 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5013 		writel((u32) (val64 >> 32), (add + 4));
5014 
5015 		if (vlan_tag_strip != 0) {
5016 			val64 = readq(&bar0->rx_pa_cfg);
5017 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5018 			writeq(val64, &bar0->rx_pa_cfg);
5019 			sp->vlan_strip_flag = 1;
5020 		}
5021 
5022 		val64 = readq(&bar0->mac_cfg);
5023 		sp->promisc_flg = 0;
5024 		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
5025 	}
5026 
5027 	/*  Update individual M_CAST address list */
5028 	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5029 		if (netdev_mc_count(dev) >
5030 		    (config->max_mc_addr - config->max_mac_addr)) {
5031 			DBG_PRINT(ERR_DBG,
5032 				  "%s: No more Rx filters can be added - "
5033 				  "please enable ALL_MULTI instead\n",
5034 				  dev->name);
5035 			return;
5036 		}
5037 
5038 		prev_cnt = sp->mc_addr_count;
5039 		sp->mc_addr_count = netdev_mc_count(dev);
5040 
5041 		/* Clear out the previous list of Mc in the H/W. */
5042 		for (i = 0; i < prev_cnt; i++) {
5043 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5044 			       &bar0->rmac_addr_data0_mem);
5045 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5046 			       &bar0->rmac_addr_data1_mem);
5047 			val64 = RMAC_ADDR_CMD_MEM_WE |
5048 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5049 				RMAC_ADDR_CMD_MEM_OFFSET
5050 				(config->mc_start_offset + i);
5051 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5052 
5053 			/* Wait for command completes */
5054 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5055 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5056 						  S2IO_BIT_RESET)) {
5057 				DBG_PRINT(ERR_DBG,
5058 					  "%s: Adding Multicasts failed\n",
5059 					  dev->name);
5060 				return;
5061 			}
5062 		}
5063 
5064 		/* Create the new Rx filter list and update the same in H/W. */
5065 		i = 0;
5066 		netdev_for_each_mc_addr(ha, dev) {
5067 			mac_addr = 0;
5068 			for (j = 0; j < ETH_ALEN; j++) {
5069 				mac_addr |= ha->addr[j];
5070 				mac_addr <<= 8;
5071 			}
5072 			mac_addr >>= 8;
5073 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5074 			       &bar0->rmac_addr_data0_mem);
5075 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5076 			       &bar0->rmac_addr_data1_mem);
5077 			val64 = RMAC_ADDR_CMD_MEM_WE |
5078 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5079 				RMAC_ADDR_CMD_MEM_OFFSET
5080 				(i + config->mc_start_offset);
5081 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5082 
5083 			/* Wait for command completes */
5084 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5085 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5086 						  S2IO_BIT_RESET)) {
5087 				DBG_PRINT(ERR_DBG,
5088 					  "%s: Adding Multicasts failed\n",
5089 					  dev->name);
5090 				return;
5091 			}
5092 			i++;
5093 		}
5094 	}
5095 }
5096 
5097 /* read from CAM unicast & multicast addresses and store it in
5098  * def_mac_addr structure
5099  */
5100 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5101 {
5102 	int offset;
5103 	u64 mac_addr = 0x0;
5104 	struct config_param *config = &sp->config;
5105 
5106 	/* store unicast & multicast mac addresses */
5107 	for (offset = 0; offset < config->max_mc_addr; offset++) {
5108 		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5109 		/* if read fails disable the entry */
5110 		if (mac_addr == FAILURE)
5111 			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5112 		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5113 	}
5114 }
5115 
5116 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5117 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5118 {
5119 	int offset;
5120 	struct config_param *config = &sp->config;
5121 	/* restore unicast mac address */
5122 	for (offset = 0; offset < config->max_mac_addr; offset++)
5123 		do_s2io_prog_unicast(sp->dev,
5124 				     sp->def_mac_addr[offset].mac_addr);
5125 
5126 	/* restore multicast mac address */
5127 	for (offset = config->mc_start_offset;
5128 	     offset < config->max_mc_addr; offset++)
5129 		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5130 }
5131 
5132 /* add a multicast MAC address to CAM */
5133 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5134 {
5135 	int i;
5136 	u64 mac_addr = 0;
5137 	struct config_param *config = &sp->config;
5138 
5139 	for (i = 0; i < ETH_ALEN; i++) {
5140 		mac_addr <<= 8;
5141 		mac_addr |= addr[i];
5142 	}
5143 	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5144 		return SUCCESS;
5145 
5146 	/* check if the multicast mac already preset in CAM */
5147 	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5148 		u64 tmp64;
5149 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5150 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5151 			break;
5152 
5153 		if (tmp64 == mac_addr)
5154 			return SUCCESS;
5155 	}
5156 	if (i == config->max_mc_addr) {
5157 		DBG_PRINT(ERR_DBG,
5158 			  "CAM full no space left for multicast MAC\n");
5159 		return FAILURE;
5160 	}
5161 	/* Update the internal structure with this new mac address */
5162 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5163 
5164 	return do_s2io_add_mac(sp, mac_addr, i);
5165 }
5166 
5167 /* add MAC address to CAM */
5168 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5169 {
5170 	u64 val64;
5171 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5172 
5173 	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5174 	       &bar0->rmac_addr_data0_mem);
5175 
5176 	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5177 		RMAC_ADDR_CMD_MEM_OFFSET(off);
5178 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5179 
5180 	/* Wait till command completes */
5181 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5182 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5183 				  S2IO_BIT_RESET)) {
5184 		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5185 		return FAILURE;
5186 	}
5187 	return SUCCESS;
5188 }
5189 /* deletes a specified unicast/multicast mac entry from CAM */
5190 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5191 {
5192 	int offset;
5193 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5194 	struct config_param *config = &sp->config;
5195 
5196 	for (offset = 1;
5197 	     offset < config->max_mc_addr; offset++) {
5198 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5199 		if (tmp64 == addr) {
5200 			/* disable the entry by writing  0xffffffffffffULL */
5201 			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5202 				return FAILURE;
5203 			/* store the new mac list from CAM */
5204 			do_s2io_store_unicast_mc(sp);
5205 			return SUCCESS;
5206 		}
5207 	}
5208 	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5209 		  (unsigned long long)addr);
5210 	return FAILURE;
5211 }
5212 
5213 /* read mac entries from CAM */
5214 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5215 {
5216 	u64 tmp64 = 0xffffffffffff0000ULL, val64;
5217 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5218 
5219 	/* read mac addr */
5220 	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5221 		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5222 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5223 
5224 	/* Wait till command completes */
5225 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5226 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5227 				  S2IO_BIT_RESET)) {
5228 		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5229 		return FAILURE;
5230 	}
5231 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5232 
5233 	return tmp64 >> 16;
5234 }
5235 
5236 /**
5237  * s2io_set_mac_addr - driver entry point
5238  */
5239 
5240 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5241 {
5242 	struct sockaddr *addr = p;
5243 
5244 	if (!is_valid_ether_addr(addr->sa_data))
5245 		return -EADDRNOTAVAIL;
5246 
5247 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5248 
5249 	/* store the MAC address in CAM */
5250 	return do_s2io_prog_unicast(dev, dev->dev_addr);
5251 }
5252 /**
5253  *  do_s2io_prog_unicast - Programs the Xframe mac address
5254  *  @dev : pointer to the device structure.
5255  *  @addr: a uchar pointer to the new mac address which is to be set.
5256  *  Description : This procedure will program the Xframe to receive
5257  *  frames with new Mac Address
5258  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5259  *  as defined in errno.h file on failure.
5260  */
5261 
5262 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5263 {
5264 	struct s2io_nic *sp = netdev_priv(dev);
5265 	register u64 mac_addr = 0, perm_addr = 0;
5266 	int i;
5267 	u64 tmp64;
5268 	struct config_param *config = &sp->config;
5269 
5270 	/*
5271 	 * Set the new MAC address as the new unicast filter and reflect this
5272 	 * change on the device address registered with the OS. It will be
5273 	 * at offset 0.
5274 	 */
5275 	for (i = 0; i < ETH_ALEN; i++) {
5276 		mac_addr <<= 8;
5277 		mac_addr |= addr[i];
5278 		perm_addr <<= 8;
5279 		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5280 	}
5281 
5282 	/* check if the dev_addr is different than perm_addr */
5283 	if (mac_addr == perm_addr)
5284 		return SUCCESS;
5285 
5286 	/* check if the mac already preset in CAM */
5287 	for (i = 1; i < config->max_mac_addr; i++) {
5288 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5289 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5290 			break;
5291 
5292 		if (tmp64 == mac_addr) {
5293 			DBG_PRINT(INFO_DBG,
5294 				  "MAC addr:0x%llx already present in CAM\n",
5295 				  (unsigned long long)mac_addr);
5296 			return SUCCESS;
5297 		}
5298 	}
5299 	if (i == config->max_mac_addr) {
5300 		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5301 		return FAILURE;
5302 	}
5303 	/* Update the internal structure with this new mac address */
5304 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5305 
5306 	return do_s2io_add_mac(sp, mac_addr, i);
5307 }
5308 
5309 /**
5310  * s2io_ethtool_sset - Sets different link parameters.
5311  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5312  * @info: pointer to the structure with parameters given by ethtool to set
5313  * link information.
5314  * Description:
5315  * The function sets different link parameters provided by the user onto
5316  * the NIC.
5317  * Return value:
5318  * 0 on success.
5319  */
5320 
5321 static int s2io_ethtool_sset(struct net_device *dev,
5322 			     struct ethtool_cmd *info)
5323 {
5324 	struct s2io_nic *sp = netdev_priv(dev);
5325 	if ((info->autoneg == AUTONEG_ENABLE) ||
5326 	    (ethtool_cmd_speed(info) != SPEED_10000) ||
5327 	    (info->duplex != DUPLEX_FULL))
5328 		return -EINVAL;
5329 	else {
5330 		s2io_close(sp->dev);
5331 		s2io_open(sp->dev);
5332 	}
5333 
5334 	return 0;
5335 }
5336 
5337 /**
5338  * s2io_ethtol_gset - Return link specific information.
5339  * @sp : private member of the device structure, pointer to the
5340  *      s2io_nic structure.
5341  * @info : pointer to the structure with parameters given by ethtool
5342  * to return link information.
5343  * Description:
5344  * Returns link specific information like speed, duplex etc.. to ethtool.
5345  * Return value :
5346  * return 0 on success.
5347  */
5348 
5349 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5350 {
5351 	struct s2io_nic *sp = netdev_priv(dev);
5352 	info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5353 	info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5354 	info->port = PORT_FIBRE;
5355 
5356 	/* info->transceiver */
5357 	info->transceiver = XCVR_EXTERNAL;
5358 
5359 	if (netif_carrier_ok(sp->dev)) {
5360 		ethtool_cmd_speed_set(info, SPEED_10000);
5361 		info->duplex = DUPLEX_FULL;
5362 	} else {
5363 		ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
5364 		info->duplex = DUPLEX_UNKNOWN;
5365 	}
5366 
5367 	info->autoneg = AUTONEG_DISABLE;
5368 	return 0;
5369 }
5370 
5371 /**
5372  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5373  * @sp : private member of the device structure, which is a pointer to the
5374  * s2io_nic structure.
5375  * @info : pointer to the structure with parameters given by ethtool to
5376  * return driver information.
5377  * Description:
5378  * Returns driver specefic information like name, version etc.. to ethtool.
5379  * Return value:
5380  *  void
5381  */
5382 
5383 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5384 				  struct ethtool_drvinfo *info)
5385 {
5386 	struct s2io_nic *sp = netdev_priv(dev);
5387 
5388 	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5389 	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5390 	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5391 	info->regdump_len = XENA_REG_SPACE;
5392 	info->eedump_len = XENA_EEPROM_SPACE;
5393 }
5394 
5395 /**
5396  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5397  *  @sp: private member of the device structure, which is a pointer to the
5398  *  s2io_nic structure.
5399  *  @regs : pointer to the structure with parameters given by ethtool for
5400  *  dumping the registers.
5401  *  @reg_space: The input argumnet into which all the registers are dumped.
5402  *  Description:
5403  *  Dumps the entire register space of xFrame NIC into the user given
5404  *  buffer area.
5405  * Return value :
5406  * void .
5407  */
5408 
5409 static void s2io_ethtool_gregs(struct net_device *dev,
5410 			       struct ethtool_regs *regs, void *space)
5411 {
5412 	int i;
5413 	u64 reg;
5414 	u8 *reg_space = (u8 *)space;
5415 	struct s2io_nic *sp = netdev_priv(dev);
5416 
5417 	regs->len = XENA_REG_SPACE;
5418 	regs->version = sp->pdev->subsystem_device;
5419 
5420 	for (i = 0; i < regs->len; i += 8) {
5421 		reg = readq(sp->bar0 + i);
5422 		memcpy((reg_space + i), &reg, 8);
5423 	}
5424 }
5425 
5426 /*
5427  *  s2io_set_led - control NIC led
5428  */
5429 static void s2io_set_led(struct s2io_nic *sp, bool on)
5430 {
5431 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5432 	u16 subid = sp->pdev->subsystem_device;
5433 	u64 val64;
5434 
5435 	if ((sp->device_type == XFRAME_II_DEVICE) ||
5436 	    ((subid & 0xFF) >= 0x07)) {
5437 		val64 = readq(&bar0->gpio_control);
5438 		if (on)
5439 			val64 |= GPIO_CTRL_GPIO_0;
5440 		else
5441 			val64 &= ~GPIO_CTRL_GPIO_0;
5442 
5443 		writeq(val64, &bar0->gpio_control);
5444 	} else {
5445 		val64 = readq(&bar0->adapter_control);
5446 		if (on)
5447 			val64 |= ADAPTER_LED_ON;
5448 		else
5449 			val64 &= ~ADAPTER_LED_ON;
5450 
5451 		writeq(val64, &bar0->adapter_control);
5452 	}
5453 
5454 }
5455 
5456 /**
5457  * s2io_ethtool_set_led - To physically identify the nic on the system.
5458  * @dev : network device
5459  * @state: led setting
5460  *
5461  * Description: Used to physically identify the NIC on the system.
5462  * The Link LED will blink for a time specified by the user for
5463  * identification.
5464  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5465  * identification is possible only if it's link is up.
5466  */
5467 
5468 static int s2io_ethtool_set_led(struct net_device *dev,
5469 				enum ethtool_phys_id_state state)
5470 {
5471 	struct s2io_nic *sp = netdev_priv(dev);
5472 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5473 	u16 subid = sp->pdev->subsystem_device;
5474 
5475 	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5476 		u64 val64 = readq(&bar0->adapter_control);
5477 		if (!(val64 & ADAPTER_CNTL_EN)) {
5478 			pr_err("Adapter Link down, cannot blink LED\n");
5479 			return -EAGAIN;
5480 		}
5481 	}
5482 
5483 	switch (state) {
5484 	case ETHTOOL_ID_ACTIVE:
5485 		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5486 		return 1;	/* cycle on/off once per second */
5487 
5488 	case ETHTOOL_ID_ON:
5489 		s2io_set_led(sp, true);
5490 		break;
5491 
5492 	case ETHTOOL_ID_OFF:
5493 		s2io_set_led(sp, false);
5494 		break;
5495 
5496 	case ETHTOOL_ID_INACTIVE:
5497 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5498 			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5499 	}
5500 
5501 	return 0;
5502 }
5503 
5504 static void s2io_ethtool_gringparam(struct net_device *dev,
5505 				    struct ethtool_ringparam *ering)
5506 {
5507 	struct s2io_nic *sp = netdev_priv(dev);
5508 	int i, tx_desc_count = 0, rx_desc_count = 0;
5509 
5510 	if (sp->rxd_mode == RXD_MODE_1) {
5511 		ering->rx_max_pending = MAX_RX_DESC_1;
5512 		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5513 	} else {
5514 		ering->rx_max_pending = MAX_RX_DESC_2;
5515 		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5516 	}
5517 
5518 	ering->tx_max_pending = MAX_TX_DESC;
5519 
5520 	for (i = 0; i < sp->config.rx_ring_num; i++)
5521 		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5522 	ering->rx_pending = rx_desc_count;
5523 	ering->rx_jumbo_pending = rx_desc_count;
5524 
5525 	for (i = 0; i < sp->config.tx_fifo_num; i++)
5526 		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5527 	ering->tx_pending = tx_desc_count;
5528 	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5529 }
5530 
5531 /**
5532  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5533  * @sp : private member of the device structure, which is a pointer to the
5534  *	s2io_nic structure.
5535  * @ep : pointer to the structure with pause parameters given by ethtool.
5536  * Description:
5537  * Returns the Pause frame generation and reception capability of the NIC.
5538  * Return value:
5539  *  void
5540  */
5541 static void s2io_ethtool_getpause_data(struct net_device *dev,
5542 				       struct ethtool_pauseparam *ep)
5543 {
5544 	u64 val64;
5545 	struct s2io_nic *sp = netdev_priv(dev);
5546 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5547 
5548 	val64 = readq(&bar0->rmac_pause_cfg);
5549 	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5550 		ep->tx_pause = true;
5551 	if (val64 & RMAC_PAUSE_RX_ENABLE)
5552 		ep->rx_pause = true;
5553 	ep->autoneg = false;
5554 }
5555 
5556 /**
5557  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5558  * @sp : private member of the device structure, which is a pointer to the
5559  *      s2io_nic structure.
5560  * @ep : pointer to the structure with pause parameters given by ethtool.
5561  * Description:
5562  * It can be used to set or reset Pause frame generation or reception
5563  * support of the NIC.
5564  * Return value:
5565  * int, returns 0 on Success
5566  */
5567 
5568 static int s2io_ethtool_setpause_data(struct net_device *dev,
5569 				      struct ethtool_pauseparam *ep)
5570 {
5571 	u64 val64;
5572 	struct s2io_nic *sp = netdev_priv(dev);
5573 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5574 
5575 	val64 = readq(&bar0->rmac_pause_cfg);
5576 	if (ep->tx_pause)
5577 		val64 |= RMAC_PAUSE_GEN_ENABLE;
5578 	else
5579 		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5580 	if (ep->rx_pause)
5581 		val64 |= RMAC_PAUSE_RX_ENABLE;
5582 	else
5583 		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5584 	writeq(val64, &bar0->rmac_pause_cfg);
5585 	return 0;
5586 }
5587 
5588 /**
5589  * read_eeprom - reads 4 bytes of data from user given offset.
5590  * @sp : private member of the device structure, which is a pointer to the
5591  *      s2io_nic structure.
5592  * @off : offset at which the data must be written
5593  * @data : Its an output parameter where the data read at the given
5594  *	offset is stored.
5595  * Description:
5596  * Will read 4 bytes of data from the user given offset and return the
5597  * read data.
5598  * NOTE: Will allow to read only part of the EEPROM visible through the
5599  *   I2C bus.
5600  * Return value:
5601  *  -1 on failure and 0 on success.
5602  */
5603 
5604 #define S2IO_DEV_ID		5
5605 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5606 {
5607 	int ret = -1;
5608 	u32 exit_cnt = 0;
5609 	u64 val64;
5610 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5611 
5612 	if (sp->device_type == XFRAME_I_DEVICE) {
5613 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5614 			I2C_CONTROL_ADDR(off) |
5615 			I2C_CONTROL_BYTE_CNT(0x3) |
5616 			I2C_CONTROL_READ |
5617 			I2C_CONTROL_CNTL_START;
5618 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5619 
5620 		while (exit_cnt < 5) {
5621 			val64 = readq(&bar0->i2c_control);
5622 			if (I2C_CONTROL_CNTL_END(val64)) {
5623 				*data = I2C_CONTROL_GET_DATA(val64);
5624 				ret = 0;
5625 				break;
5626 			}
5627 			msleep(50);
5628 			exit_cnt++;
5629 		}
5630 	}
5631 
5632 	if (sp->device_type == XFRAME_II_DEVICE) {
5633 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5634 			SPI_CONTROL_BYTECNT(0x3) |
5635 			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5636 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5637 		val64 |= SPI_CONTROL_REQ;
5638 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5639 		while (exit_cnt < 5) {
5640 			val64 = readq(&bar0->spi_control);
5641 			if (val64 & SPI_CONTROL_NACK) {
5642 				ret = 1;
5643 				break;
5644 			} else if (val64 & SPI_CONTROL_DONE) {
5645 				*data = readq(&bar0->spi_data);
5646 				*data &= 0xffffff;
5647 				ret = 0;
5648 				break;
5649 			}
5650 			msleep(50);
5651 			exit_cnt++;
5652 		}
5653 	}
5654 	return ret;
5655 }
5656 
5657 /**
5658  *  write_eeprom - actually writes the relevant part of the data value.
5659  *  @sp : private member of the device structure, which is a pointer to the
5660  *       s2io_nic structure.
5661  *  @off : offset at which the data must be written
5662  *  @data : The data that is to be written
5663  *  @cnt : Number of bytes of the data that are actually to be written into
5664  *  the Eeprom. (max of 3)
5665  * Description:
5666  *  Actually writes the relevant part of the data value into the Eeprom
5667  *  through the I2C bus.
5668  * Return value:
5669  *  0 on success, -1 on failure.
5670  */
5671 
5672 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5673 {
5674 	int exit_cnt = 0, ret = -1;
5675 	u64 val64;
5676 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5677 
5678 	if (sp->device_type == XFRAME_I_DEVICE) {
5679 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5680 			I2C_CONTROL_ADDR(off) |
5681 			I2C_CONTROL_BYTE_CNT(cnt) |
5682 			I2C_CONTROL_SET_DATA((u32)data) |
5683 			I2C_CONTROL_CNTL_START;
5684 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5685 
5686 		while (exit_cnt < 5) {
5687 			val64 = readq(&bar0->i2c_control);
5688 			if (I2C_CONTROL_CNTL_END(val64)) {
5689 				if (!(val64 & I2C_CONTROL_NACK))
5690 					ret = 0;
5691 				break;
5692 			}
5693 			msleep(50);
5694 			exit_cnt++;
5695 		}
5696 	}
5697 
5698 	if (sp->device_type == XFRAME_II_DEVICE) {
5699 		int write_cnt = (cnt == 8) ? 0 : cnt;
5700 		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5701 
5702 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5703 			SPI_CONTROL_BYTECNT(write_cnt) |
5704 			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5705 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5706 		val64 |= SPI_CONTROL_REQ;
5707 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5708 		while (exit_cnt < 5) {
5709 			val64 = readq(&bar0->spi_control);
5710 			if (val64 & SPI_CONTROL_NACK) {
5711 				ret = 1;
5712 				break;
5713 			} else if (val64 & SPI_CONTROL_DONE) {
5714 				ret = 0;
5715 				break;
5716 			}
5717 			msleep(50);
5718 			exit_cnt++;
5719 		}
5720 	}
5721 	return ret;
5722 }
5723 static void s2io_vpd_read(struct s2io_nic *nic)
5724 {
5725 	u8 *vpd_data;
5726 	u8 data;
5727 	int i = 0, cnt, len, fail = 0;
5728 	int vpd_addr = 0x80;
5729 	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5730 
5731 	if (nic->device_type == XFRAME_II_DEVICE) {
5732 		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5733 		vpd_addr = 0x80;
5734 	} else {
5735 		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5736 		vpd_addr = 0x50;
5737 	}
5738 	strcpy(nic->serial_num, "NOT AVAILABLE");
5739 
5740 	vpd_data = kmalloc(256, GFP_KERNEL);
5741 	if (!vpd_data) {
5742 		swstats->mem_alloc_fail_cnt++;
5743 		return;
5744 	}
5745 	swstats->mem_allocated += 256;
5746 
5747 	for (i = 0; i < 256; i += 4) {
5748 		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5749 		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5750 		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5751 		for (cnt = 0; cnt < 5; cnt++) {
5752 			msleep(2);
5753 			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5754 			if (data == 0x80)
5755 				break;
5756 		}
5757 		if (cnt >= 5) {
5758 			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5759 			fail = 1;
5760 			break;
5761 		}
5762 		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5763 				      (u32 *)&vpd_data[i]);
5764 	}
5765 
5766 	if (!fail) {
5767 		/* read serial number of adapter */
5768 		for (cnt = 0; cnt < 252; cnt++) {
5769 			if ((vpd_data[cnt] == 'S') &&
5770 			    (vpd_data[cnt+1] == 'N')) {
5771 				len = vpd_data[cnt+2];
5772 				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5773 					memcpy(nic->serial_num,
5774 					       &vpd_data[cnt + 3],
5775 					       len);
5776 					memset(nic->serial_num+len,
5777 					       0,
5778 					       VPD_STRING_LEN-len);
5779 					break;
5780 				}
5781 			}
5782 		}
5783 	}
5784 
5785 	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5786 		len = vpd_data[1];
5787 		memcpy(nic->product_name, &vpd_data[3], len);
5788 		nic->product_name[len] = 0;
5789 	}
5790 	kfree(vpd_data);
5791 	swstats->mem_freed += 256;
5792 }
5793 
5794 /**
5795  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5796  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5797  *  @eeprom : pointer to the user level structure provided by ethtool,
5798  *  containing all relevant information.
5799  *  @data_buf : user defined value to be written into Eeprom.
5800  *  Description: Reads the values stored in the Eeprom at given offset
5801  *  for a given length. Stores these values int the input argument data
5802  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5803  *  Return value:
5804  *  int  0 on success
5805  */
5806 
5807 static int s2io_ethtool_geeprom(struct net_device *dev,
5808 				struct ethtool_eeprom *eeprom, u8 * data_buf)
5809 {
5810 	u32 i, valid;
5811 	u64 data;
5812 	struct s2io_nic *sp = netdev_priv(dev);
5813 
5814 	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5815 
5816 	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5817 		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5818 
5819 	for (i = 0; i < eeprom->len; i += 4) {
5820 		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5821 			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5822 			return -EFAULT;
5823 		}
5824 		valid = INV(data);
5825 		memcpy((data_buf + i), &valid, 4);
5826 	}
5827 	return 0;
5828 }
5829 
5830 /**
5831  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5832  *  @sp : private member of the device structure, which is a pointer to the
5833  *  s2io_nic structure.
5834  *  @eeprom : pointer to the user level structure provided by ethtool,
5835  *  containing all relevant information.
5836  *  @data_buf ; user defined value to be written into Eeprom.
5837  *  Description:
5838  *  Tries to write the user provided value in the Eeprom, at the offset
5839  *  given by the user.
5840  *  Return value:
5841  *  0 on success, -EFAULT on failure.
5842  */
5843 
5844 static int s2io_ethtool_seeprom(struct net_device *dev,
5845 				struct ethtool_eeprom *eeprom,
5846 				u8 *data_buf)
5847 {
5848 	int len = eeprom->len, cnt = 0;
5849 	u64 valid = 0, data;
5850 	struct s2io_nic *sp = netdev_priv(dev);
5851 
5852 	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5853 		DBG_PRINT(ERR_DBG,
5854 			  "ETHTOOL_WRITE_EEPROM Err: "
5855 			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5856 			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5857 			  eeprom->magic);
5858 		return -EFAULT;
5859 	}
5860 
5861 	while (len) {
5862 		data = (u32)data_buf[cnt] & 0x000000FF;
5863 		if (data)
5864 			valid = (u32)(data << 24);
5865 		else
5866 			valid = data;
5867 
5868 		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5869 			DBG_PRINT(ERR_DBG,
5870 				  "ETHTOOL_WRITE_EEPROM Err: "
5871 				  "Cannot write into the specified offset\n");
5872 			return -EFAULT;
5873 		}
5874 		cnt++;
5875 		len--;
5876 	}
5877 
5878 	return 0;
5879 }
5880 
5881 /**
5882  * s2io_register_test - reads and writes into all clock domains.
5883  * @sp : private member of the device structure, which is a pointer to the
5884  * s2io_nic structure.
5885  * @data : variable that returns the result of each of the test conducted b
5886  * by the driver.
5887  * Description:
5888  * Read and write into all clock domains. The NIC has 3 clock domains,
5889  * see that registers in all the three regions are accessible.
5890  * Return value:
5891  * 0 on success.
5892  */
5893 
5894 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5895 {
5896 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5897 	u64 val64 = 0, exp_val;
5898 	int fail = 0;
5899 
5900 	val64 = readq(&bar0->pif_rd_swapper_fb);
5901 	if (val64 != 0x123456789abcdefULL) {
5902 		fail = 1;
5903 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5904 	}
5905 
5906 	val64 = readq(&bar0->rmac_pause_cfg);
5907 	if (val64 != 0xc000ffff00000000ULL) {
5908 		fail = 1;
5909 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5910 	}
5911 
5912 	val64 = readq(&bar0->rx_queue_cfg);
5913 	if (sp->device_type == XFRAME_II_DEVICE)
5914 		exp_val = 0x0404040404040404ULL;
5915 	else
5916 		exp_val = 0x0808080808080808ULL;
5917 	if (val64 != exp_val) {
5918 		fail = 1;
5919 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5920 	}
5921 
5922 	val64 = readq(&bar0->xgxs_efifo_cfg);
5923 	if (val64 != 0x000000001923141EULL) {
5924 		fail = 1;
5925 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5926 	}
5927 
5928 	val64 = 0x5A5A5A5A5A5A5A5AULL;
5929 	writeq(val64, &bar0->xmsi_data);
5930 	val64 = readq(&bar0->xmsi_data);
5931 	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5932 		fail = 1;
5933 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5934 	}
5935 
5936 	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5937 	writeq(val64, &bar0->xmsi_data);
5938 	val64 = readq(&bar0->xmsi_data);
5939 	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5940 		fail = 1;
5941 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5942 	}
5943 
5944 	*data = fail;
5945 	return fail;
5946 }
5947 
5948 /**
5949  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5950  * @sp : private member of the device structure, which is a pointer to the
5951  * s2io_nic structure.
5952  * @data:variable that returns the result of each of the test conducted by
5953  * the driver.
5954  * Description:
5955  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5956  * register.
5957  * Return value:
5958  * 0 on success.
5959  */
5960 
5961 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5962 {
5963 	int fail = 0;
5964 	u64 ret_data, org_4F0, org_7F0;
5965 	u8 saved_4F0 = 0, saved_7F0 = 0;
5966 	struct net_device *dev = sp->dev;
5967 
5968 	/* Test Write Error at offset 0 */
5969 	/* Note that SPI interface allows write access to all areas
5970 	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5971 	 */
5972 	if (sp->device_type == XFRAME_I_DEVICE)
5973 		if (!write_eeprom(sp, 0, 0, 3))
5974 			fail = 1;
5975 
5976 	/* Save current values at offsets 0x4F0 and 0x7F0 */
5977 	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5978 		saved_4F0 = 1;
5979 	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5980 		saved_7F0 = 1;
5981 
5982 	/* Test Write at offset 4f0 */
5983 	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5984 		fail = 1;
5985 	if (read_eeprom(sp, 0x4F0, &ret_data))
5986 		fail = 1;
5987 
5988 	if (ret_data != 0x012345) {
5989 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5990 			  "Data written %llx Data read %llx\n",
5991 			  dev->name, (unsigned long long)0x12345,
5992 			  (unsigned long long)ret_data);
5993 		fail = 1;
5994 	}
5995 
5996 	/* Reset the EEPROM data go FFFF */
5997 	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5998 
5999 	/* Test Write Request Error at offset 0x7c */
6000 	if (sp->device_type == XFRAME_I_DEVICE)
6001 		if (!write_eeprom(sp, 0x07C, 0, 3))
6002 			fail = 1;
6003 
6004 	/* Test Write Request at offset 0x7f0 */
6005 	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6006 		fail = 1;
6007 	if (read_eeprom(sp, 0x7F0, &ret_data))
6008 		fail = 1;
6009 
6010 	if (ret_data != 0x012345) {
6011 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6012 			  "Data written %llx Data read %llx\n",
6013 			  dev->name, (unsigned long long)0x12345,
6014 			  (unsigned long long)ret_data);
6015 		fail = 1;
6016 	}
6017 
6018 	/* Reset the EEPROM data go FFFF */
6019 	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6020 
6021 	if (sp->device_type == XFRAME_I_DEVICE) {
6022 		/* Test Write Error at offset 0x80 */
6023 		if (!write_eeprom(sp, 0x080, 0, 3))
6024 			fail = 1;
6025 
6026 		/* Test Write Error at offset 0xfc */
6027 		if (!write_eeprom(sp, 0x0FC, 0, 3))
6028 			fail = 1;
6029 
6030 		/* Test Write Error at offset 0x100 */
6031 		if (!write_eeprom(sp, 0x100, 0, 3))
6032 			fail = 1;
6033 
6034 		/* Test Write Error at offset 4ec */
6035 		if (!write_eeprom(sp, 0x4EC, 0, 3))
6036 			fail = 1;
6037 	}
6038 
6039 	/* Restore values at offsets 0x4F0 and 0x7F0 */
6040 	if (saved_4F0)
6041 		write_eeprom(sp, 0x4F0, org_4F0, 3);
6042 	if (saved_7F0)
6043 		write_eeprom(sp, 0x7F0, org_7F0, 3);
6044 
6045 	*data = fail;
6046 	return fail;
6047 }
6048 
6049 /**
6050  * s2io_bist_test - invokes the MemBist test of the card .
6051  * @sp : private member of the device structure, which is a pointer to the
6052  * s2io_nic structure.
6053  * @data:variable that returns the result of each of the test conducted by
6054  * the driver.
6055  * Description:
6056  * This invokes the MemBist test of the card. We give around
6057  * 2 secs time for the Test to complete. If it's still not complete
6058  * within this peiod, we consider that the test failed.
6059  * Return value:
6060  * 0 on success and -1 on failure.
6061  */
6062 
6063 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6064 {
6065 	u8 bist = 0;
6066 	int cnt = 0, ret = -1;
6067 
6068 	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6069 	bist |= PCI_BIST_START;
6070 	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6071 
6072 	while (cnt < 20) {
6073 		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6074 		if (!(bist & PCI_BIST_START)) {
6075 			*data = (bist & PCI_BIST_CODE_MASK);
6076 			ret = 0;
6077 			break;
6078 		}
6079 		msleep(100);
6080 		cnt++;
6081 	}
6082 
6083 	return ret;
6084 }
6085 
6086 /**
6087  * s2io_link_test - verifies the link state of the nic
6088  * @sp ; private member of the device structure, which is a pointer to the
6089  * s2io_nic structure.
6090  * @data: variable that returns the result of each of the test conducted by
6091  * the driver.
6092  * Description:
6093  * The function verifies the link state of the NIC and updates the input
6094  * argument 'data' appropriately.
6095  * Return value:
6096  * 0 on success.
6097  */
6098 
6099 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6100 {
6101 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6102 	u64 val64;
6103 
6104 	val64 = readq(&bar0->adapter_status);
6105 	if (!(LINK_IS_UP(val64)))
6106 		*data = 1;
6107 	else
6108 		*data = 0;
6109 
6110 	return *data;
6111 }
6112 
6113 /**
6114  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6115  * @sp: private member of the device structure, which is a pointer to the
6116  * s2io_nic structure.
6117  * @data: variable that returns the result of each of the test
6118  * conducted by the driver.
6119  * Description:
6120  *  This is one of the offline test that tests the read and write
6121  *  access to the RldRam chip on the NIC.
6122  * Return value:
6123  *  0 on success.
6124  */
6125 
6126 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6127 {
6128 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6129 	u64 val64;
6130 	int cnt, iteration = 0, test_fail = 0;
6131 
6132 	val64 = readq(&bar0->adapter_control);
6133 	val64 &= ~ADAPTER_ECC_EN;
6134 	writeq(val64, &bar0->adapter_control);
6135 
6136 	val64 = readq(&bar0->mc_rldram_test_ctrl);
6137 	val64 |= MC_RLDRAM_TEST_MODE;
6138 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6139 
6140 	val64 = readq(&bar0->mc_rldram_mrs);
6141 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6142 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6143 
6144 	val64 |= MC_RLDRAM_MRS_ENABLE;
6145 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6146 
6147 	while (iteration < 2) {
6148 		val64 = 0x55555555aaaa0000ULL;
6149 		if (iteration == 1)
6150 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6151 		writeq(val64, &bar0->mc_rldram_test_d0);
6152 
6153 		val64 = 0xaaaa5a5555550000ULL;
6154 		if (iteration == 1)
6155 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6156 		writeq(val64, &bar0->mc_rldram_test_d1);
6157 
6158 		val64 = 0x55aaaaaaaa5a0000ULL;
6159 		if (iteration == 1)
6160 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6161 		writeq(val64, &bar0->mc_rldram_test_d2);
6162 
6163 		val64 = (u64) (0x0000003ffffe0100ULL);
6164 		writeq(val64, &bar0->mc_rldram_test_add);
6165 
6166 		val64 = MC_RLDRAM_TEST_MODE |
6167 			MC_RLDRAM_TEST_WRITE |
6168 			MC_RLDRAM_TEST_GO;
6169 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6170 
6171 		for (cnt = 0; cnt < 5; cnt++) {
6172 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6173 			if (val64 & MC_RLDRAM_TEST_DONE)
6174 				break;
6175 			msleep(200);
6176 		}
6177 
6178 		if (cnt == 5)
6179 			break;
6180 
6181 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6182 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6183 
6184 		for (cnt = 0; cnt < 5; cnt++) {
6185 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6186 			if (val64 & MC_RLDRAM_TEST_DONE)
6187 				break;
6188 			msleep(500);
6189 		}
6190 
6191 		if (cnt == 5)
6192 			break;
6193 
6194 		val64 = readq(&bar0->mc_rldram_test_ctrl);
6195 		if (!(val64 & MC_RLDRAM_TEST_PASS))
6196 			test_fail = 1;
6197 
6198 		iteration++;
6199 	}
6200 
6201 	*data = test_fail;
6202 
6203 	/* Bring the adapter out of test mode */
6204 	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6205 
6206 	return test_fail;
6207 }
6208 
6209 /**
6210  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6211  *  @sp : private member of the device structure, which is a pointer to the
6212  *  s2io_nic structure.
6213  *  @ethtest : pointer to a ethtool command specific structure that will be
6214  *  returned to the user.
6215  *  @data : variable that returns the result of each of the test
6216  * conducted by the driver.
6217  * Description:
6218  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6219  *  the health of the card.
6220  * Return value:
6221  *  void
6222  */
6223 
6224 static void s2io_ethtool_test(struct net_device *dev,
6225 			      struct ethtool_test *ethtest,
6226 			      uint64_t *data)
6227 {
6228 	struct s2io_nic *sp = netdev_priv(dev);
6229 	int orig_state = netif_running(sp->dev);
6230 
6231 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6232 		/* Offline Tests. */
6233 		if (orig_state)
6234 			s2io_close(sp->dev);
6235 
6236 		if (s2io_register_test(sp, &data[0]))
6237 			ethtest->flags |= ETH_TEST_FL_FAILED;
6238 
6239 		s2io_reset(sp);
6240 
6241 		if (s2io_rldram_test(sp, &data[3]))
6242 			ethtest->flags |= ETH_TEST_FL_FAILED;
6243 
6244 		s2io_reset(sp);
6245 
6246 		if (s2io_eeprom_test(sp, &data[1]))
6247 			ethtest->flags |= ETH_TEST_FL_FAILED;
6248 
6249 		if (s2io_bist_test(sp, &data[4]))
6250 			ethtest->flags |= ETH_TEST_FL_FAILED;
6251 
6252 		if (orig_state)
6253 			s2io_open(sp->dev);
6254 
6255 		data[2] = 0;
6256 	} else {
6257 		/* Online Tests. */
6258 		if (!orig_state) {
6259 			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6260 				  dev->name);
6261 			data[0] = -1;
6262 			data[1] = -1;
6263 			data[2] = -1;
6264 			data[3] = -1;
6265 			data[4] = -1;
6266 		}
6267 
6268 		if (s2io_link_test(sp, &data[2]))
6269 			ethtest->flags |= ETH_TEST_FL_FAILED;
6270 
6271 		data[0] = 0;
6272 		data[1] = 0;
6273 		data[3] = 0;
6274 		data[4] = 0;
6275 	}
6276 }
6277 
6278 static void s2io_get_ethtool_stats(struct net_device *dev,
6279 				   struct ethtool_stats *estats,
6280 				   u64 *tmp_stats)
6281 {
6282 	int i = 0, k;
6283 	struct s2io_nic *sp = netdev_priv(dev);
6284 	struct stat_block *stats = sp->mac_control.stats_info;
6285 	struct swStat *swstats = &stats->sw_stat;
6286 	struct xpakStat *xstats = &stats->xpak_stat;
6287 
6288 	s2io_updt_stats(sp);
6289 	tmp_stats[i++] =
6290 		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6291 		le32_to_cpu(stats->tmac_frms);
6292 	tmp_stats[i++] =
6293 		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6294 		le32_to_cpu(stats->tmac_data_octets);
6295 	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6296 	tmp_stats[i++] =
6297 		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6298 		le32_to_cpu(stats->tmac_mcst_frms);
6299 	tmp_stats[i++] =
6300 		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6301 		le32_to_cpu(stats->tmac_bcst_frms);
6302 	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6303 	tmp_stats[i++] =
6304 		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6305 		le32_to_cpu(stats->tmac_ttl_octets);
6306 	tmp_stats[i++] =
6307 		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6308 		le32_to_cpu(stats->tmac_ucst_frms);
6309 	tmp_stats[i++] =
6310 		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6311 		le32_to_cpu(stats->tmac_nucst_frms);
6312 	tmp_stats[i++] =
6313 		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6314 		le32_to_cpu(stats->tmac_any_err_frms);
6315 	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6316 	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6317 	tmp_stats[i++] =
6318 		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6319 		le32_to_cpu(stats->tmac_vld_ip);
6320 	tmp_stats[i++] =
6321 		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6322 		le32_to_cpu(stats->tmac_drop_ip);
6323 	tmp_stats[i++] =
6324 		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6325 		le32_to_cpu(stats->tmac_icmp);
6326 	tmp_stats[i++] =
6327 		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6328 		le32_to_cpu(stats->tmac_rst_tcp);
6329 	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6330 	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6331 		le32_to_cpu(stats->tmac_udp);
6332 	tmp_stats[i++] =
6333 		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6334 		le32_to_cpu(stats->rmac_vld_frms);
6335 	tmp_stats[i++] =
6336 		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6337 		le32_to_cpu(stats->rmac_data_octets);
6338 	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6339 	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6340 	tmp_stats[i++] =
6341 		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6342 		le32_to_cpu(stats->rmac_vld_mcst_frms);
6343 	tmp_stats[i++] =
6344 		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6345 		le32_to_cpu(stats->rmac_vld_bcst_frms);
6346 	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6347 	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6348 	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6349 	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6350 	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6351 	tmp_stats[i++] =
6352 		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6353 		le32_to_cpu(stats->rmac_ttl_octets);
6354 	tmp_stats[i++] =
6355 		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6356 		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6357 	tmp_stats[i++] =
6358 		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6359 		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6360 	tmp_stats[i++] =
6361 		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6362 		le32_to_cpu(stats->rmac_discarded_frms);
6363 	tmp_stats[i++] =
6364 		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6365 		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6366 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6367 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6368 	tmp_stats[i++] =
6369 		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6370 		le32_to_cpu(stats->rmac_usized_frms);
6371 	tmp_stats[i++] =
6372 		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6373 		le32_to_cpu(stats->rmac_osized_frms);
6374 	tmp_stats[i++] =
6375 		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6376 		le32_to_cpu(stats->rmac_frag_frms);
6377 	tmp_stats[i++] =
6378 		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6379 		le32_to_cpu(stats->rmac_jabber_frms);
6380 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6381 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6382 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6383 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6384 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6385 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6386 	tmp_stats[i++] =
6387 		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6388 		le32_to_cpu(stats->rmac_ip);
6389 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6390 	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6391 	tmp_stats[i++] =
6392 		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6393 		le32_to_cpu(stats->rmac_drop_ip);
6394 	tmp_stats[i++] =
6395 		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6396 		le32_to_cpu(stats->rmac_icmp);
6397 	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6398 	tmp_stats[i++] =
6399 		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6400 		le32_to_cpu(stats->rmac_udp);
6401 	tmp_stats[i++] =
6402 		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6403 		le32_to_cpu(stats->rmac_err_drp_udp);
6404 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6405 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6406 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6407 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6408 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6409 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6410 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6411 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6412 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6413 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6414 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6415 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6416 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6417 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6418 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6419 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6420 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6421 	tmp_stats[i++] =
6422 		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6423 		le32_to_cpu(stats->rmac_pause_cnt);
6424 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6425 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6426 	tmp_stats[i++] =
6427 		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6428 		le32_to_cpu(stats->rmac_accepted_ip);
6429 	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6430 	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6431 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6432 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6433 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6434 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6435 	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6436 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6437 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6438 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6439 	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6440 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6441 	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6442 	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6443 	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6444 	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6445 	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6446 	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6447 	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6448 
6449 	/* Enhanced statistics exist only for Hercules */
6450 	if (sp->device_type == XFRAME_II_DEVICE) {
6451 		tmp_stats[i++] =
6452 			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6453 		tmp_stats[i++] =
6454 			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6455 		tmp_stats[i++] =
6456 			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6457 		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6458 		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6459 		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6460 		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6461 		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6462 		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6463 		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6464 		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6465 		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6466 		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6467 		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6468 		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6469 		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6470 	}
6471 
6472 	tmp_stats[i++] = 0;
6473 	tmp_stats[i++] = swstats->single_ecc_errs;
6474 	tmp_stats[i++] = swstats->double_ecc_errs;
6475 	tmp_stats[i++] = swstats->parity_err_cnt;
6476 	tmp_stats[i++] = swstats->serious_err_cnt;
6477 	tmp_stats[i++] = swstats->soft_reset_cnt;
6478 	tmp_stats[i++] = swstats->fifo_full_cnt;
6479 	for (k = 0; k < MAX_RX_RINGS; k++)
6480 		tmp_stats[i++] = swstats->ring_full_cnt[k];
6481 	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6482 	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6483 	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6484 	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6485 	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6486 	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6487 	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6488 	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6489 	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6490 	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6491 	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6492 	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6493 	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6494 	tmp_stats[i++] = swstats->sending_both;
6495 	tmp_stats[i++] = swstats->outof_sequence_pkts;
6496 	tmp_stats[i++] = swstats->flush_max_pkts;
6497 	if (swstats->num_aggregations) {
6498 		u64 tmp = swstats->sum_avg_pkts_aggregated;
6499 		int count = 0;
6500 		/*
6501 		 * Since 64-bit divide does not work on all platforms,
6502 		 * do repeated subtraction.
6503 		 */
6504 		while (tmp >= swstats->num_aggregations) {
6505 			tmp -= swstats->num_aggregations;
6506 			count++;
6507 		}
6508 		tmp_stats[i++] = count;
6509 	} else
6510 		tmp_stats[i++] = 0;
6511 	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6512 	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6513 	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6514 	tmp_stats[i++] = swstats->mem_allocated;
6515 	tmp_stats[i++] = swstats->mem_freed;
6516 	tmp_stats[i++] = swstats->link_up_cnt;
6517 	tmp_stats[i++] = swstats->link_down_cnt;
6518 	tmp_stats[i++] = swstats->link_up_time;
6519 	tmp_stats[i++] = swstats->link_down_time;
6520 
6521 	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6522 	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6523 	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6524 	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6525 	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6526 
6527 	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6528 	tmp_stats[i++] = swstats->rx_abort_cnt;
6529 	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6530 	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6531 	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6532 	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6533 	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6534 	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6535 	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6536 	tmp_stats[i++] = swstats->tda_err_cnt;
6537 	tmp_stats[i++] = swstats->pfc_err_cnt;
6538 	tmp_stats[i++] = swstats->pcc_err_cnt;
6539 	tmp_stats[i++] = swstats->tti_err_cnt;
6540 	tmp_stats[i++] = swstats->tpa_err_cnt;
6541 	tmp_stats[i++] = swstats->sm_err_cnt;
6542 	tmp_stats[i++] = swstats->lso_err_cnt;
6543 	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6544 	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6545 	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6546 	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6547 	tmp_stats[i++] = swstats->rc_err_cnt;
6548 	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6549 	tmp_stats[i++] = swstats->rpa_err_cnt;
6550 	tmp_stats[i++] = swstats->rda_err_cnt;
6551 	tmp_stats[i++] = swstats->rti_err_cnt;
6552 	tmp_stats[i++] = swstats->mc_err_cnt;
6553 }
6554 
6555 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6556 {
6557 	return XENA_REG_SPACE;
6558 }
6559 
6560 
6561 static int s2io_get_eeprom_len(struct net_device *dev)
6562 {
6563 	return XENA_EEPROM_SPACE;
6564 }
6565 
6566 static int s2io_get_sset_count(struct net_device *dev, int sset)
6567 {
6568 	struct s2io_nic *sp = netdev_priv(dev);
6569 
6570 	switch (sset) {
6571 	case ETH_SS_TEST:
6572 		return S2IO_TEST_LEN;
6573 	case ETH_SS_STATS:
6574 		switch (sp->device_type) {
6575 		case XFRAME_I_DEVICE:
6576 			return XFRAME_I_STAT_LEN;
6577 		case XFRAME_II_DEVICE:
6578 			return XFRAME_II_STAT_LEN;
6579 		default:
6580 			return 0;
6581 		}
6582 	default:
6583 		return -EOPNOTSUPP;
6584 	}
6585 }
6586 
6587 static void s2io_ethtool_get_strings(struct net_device *dev,
6588 				     u32 stringset, u8 *data)
6589 {
6590 	int stat_size = 0;
6591 	struct s2io_nic *sp = netdev_priv(dev);
6592 
6593 	switch (stringset) {
6594 	case ETH_SS_TEST:
6595 		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6596 		break;
6597 	case ETH_SS_STATS:
6598 		stat_size = sizeof(ethtool_xena_stats_keys);
6599 		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6600 		if (sp->device_type == XFRAME_II_DEVICE) {
6601 			memcpy(data + stat_size,
6602 			       &ethtool_enhanced_stats_keys,
6603 			       sizeof(ethtool_enhanced_stats_keys));
6604 			stat_size += sizeof(ethtool_enhanced_stats_keys);
6605 		}
6606 
6607 		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6608 		       sizeof(ethtool_driver_stats_keys));
6609 	}
6610 }
6611 
6612 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6613 {
6614 	struct s2io_nic *sp = netdev_priv(dev);
6615 	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6616 
6617 	if (changed && netif_running(dev)) {
6618 		int rc;
6619 
6620 		s2io_stop_all_tx_queue(sp);
6621 		s2io_card_down(sp);
6622 		dev->features = features;
6623 		rc = s2io_card_up(sp);
6624 		if (rc)
6625 			s2io_reset(sp);
6626 		else
6627 			s2io_start_all_tx_queue(sp);
6628 
6629 		return rc ? rc : 1;
6630 	}
6631 
6632 	return 0;
6633 }
6634 
6635 static const struct ethtool_ops netdev_ethtool_ops = {
6636 	.get_settings = s2io_ethtool_gset,
6637 	.set_settings = s2io_ethtool_sset,
6638 	.get_drvinfo = s2io_ethtool_gdrvinfo,
6639 	.get_regs_len = s2io_ethtool_get_regs_len,
6640 	.get_regs = s2io_ethtool_gregs,
6641 	.get_link = ethtool_op_get_link,
6642 	.get_eeprom_len = s2io_get_eeprom_len,
6643 	.get_eeprom = s2io_ethtool_geeprom,
6644 	.set_eeprom = s2io_ethtool_seeprom,
6645 	.get_ringparam = s2io_ethtool_gringparam,
6646 	.get_pauseparam = s2io_ethtool_getpause_data,
6647 	.set_pauseparam = s2io_ethtool_setpause_data,
6648 	.self_test = s2io_ethtool_test,
6649 	.get_strings = s2io_ethtool_get_strings,
6650 	.set_phys_id = s2io_ethtool_set_led,
6651 	.get_ethtool_stats = s2io_get_ethtool_stats,
6652 	.get_sset_count = s2io_get_sset_count,
6653 };
6654 
6655 /**
6656  *  s2io_ioctl - Entry point for the Ioctl
6657  *  @dev :  Device pointer.
6658  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6659  *  a proprietary structure used to pass information to the driver.
6660  *  @cmd :  This is used to distinguish between the different commands that
6661  *  can be passed to the IOCTL functions.
6662  *  Description:
6663  *  Currently there are no special functionality supported in IOCTL, hence
6664  *  function always return EOPNOTSUPPORTED
6665  */
6666 
6667 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6668 {
6669 	return -EOPNOTSUPP;
6670 }
6671 
6672 /**
6673  *  s2io_change_mtu - entry point to change MTU size for the device.
6674  *   @dev : device pointer.
6675  *   @new_mtu : the new MTU size for the device.
6676  *   Description: A driver entry point to change MTU size for the device.
6677  *   Before changing the MTU the device must be stopped.
6678  *  Return value:
6679  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6680  *   file on failure.
6681  */
6682 
6683 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6684 {
6685 	struct s2io_nic *sp = netdev_priv(dev);
6686 	int ret = 0;
6687 
6688 	if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6689 		DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
6690 		return -EPERM;
6691 	}
6692 
6693 	dev->mtu = new_mtu;
6694 	if (netif_running(dev)) {
6695 		s2io_stop_all_tx_queue(sp);
6696 		s2io_card_down(sp);
6697 		ret = s2io_card_up(sp);
6698 		if (ret) {
6699 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6700 				  __func__);
6701 			return ret;
6702 		}
6703 		s2io_wake_all_tx_queue(sp);
6704 	} else { /* Device is down */
6705 		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6706 		u64 val64 = new_mtu;
6707 
6708 		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6709 	}
6710 
6711 	return ret;
6712 }
6713 
6714 /**
6715  * s2io_set_link - Set the LInk status
6716  * @data: long pointer to device private structue
6717  * Description: Sets the link status for the adapter
6718  */
6719 
6720 static void s2io_set_link(struct work_struct *work)
6721 {
6722 	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6723 					    set_link_task);
6724 	struct net_device *dev = nic->dev;
6725 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6726 	register u64 val64;
6727 	u16 subid;
6728 
6729 	rtnl_lock();
6730 
6731 	if (!netif_running(dev))
6732 		goto out_unlock;
6733 
6734 	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6735 		/* The card is being reset, no point doing anything */
6736 		goto out_unlock;
6737 	}
6738 
6739 	subid = nic->pdev->subsystem_device;
6740 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6741 		/*
6742 		 * Allow a small delay for the NICs self initiated
6743 		 * cleanup to complete.
6744 		 */
6745 		msleep(100);
6746 	}
6747 
6748 	val64 = readq(&bar0->adapter_status);
6749 	if (LINK_IS_UP(val64)) {
6750 		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6751 			if (verify_xena_quiescence(nic)) {
6752 				val64 = readq(&bar0->adapter_control);
6753 				val64 |= ADAPTER_CNTL_EN;
6754 				writeq(val64, &bar0->adapter_control);
6755 				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6756 					    nic->device_type, subid)) {
6757 					val64 = readq(&bar0->gpio_control);
6758 					val64 |= GPIO_CTRL_GPIO_0;
6759 					writeq(val64, &bar0->gpio_control);
6760 					val64 = readq(&bar0->gpio_control);
6761 				} else {
6762 					val64 |= ADAPTER_LED_ON;
6763 					writeq(val64, &bar0->adapter_control);
6764 				}
6765 				nic->device_enabled_once = true;
6766 			} else {
6767 				DBG_PRINT(ERR_DBG,
6768 					  "%s: Error: device is not Quiescent\n",
6769 					  dev->name);
6770 				s2io_stop_all_tx_queue(nic);
6771 			}
6772 		}
6773 		val64 = readq(&bar0->adapter_control);
6774 		val64 |= ADAPTER_LED_ON;
6775 		writeq(val64, &bar0->adapter_control);
6776 		s2io_link(nic, LINK_UP);
6777 	} else {
6778 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6779 						      subid)) {
6780 			val64 = readq(&bar0->gpio_control);
6781 			val64 &= ~GPIO_CTRL_GPIO_0;
6782 			writeq(val64, &bar0->gpio_control);
6783 			val64 = readq(&bar0->gpio_control);
6784 		}
6785 		/* turn off LED */
6786 		val64 = readq(&bar0->adapter_control);
6787 		val64 = val64 & (~ADAPTER_LED_ON);
6788 		writeq(val64, &bar0->adapter_control);
6789 		s2io_link(nic, LINK_DOWN);
6790 	}
6791 	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6792 
6793 out_unlock:
6794 	rtnl_unlock();
6795 }
6796 
6797 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6798 				  struct buffAdd *ba,
6799 				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6800 				  u64 *temp2, int size)
6801 {
6802 	struct net_device *dev = sp->dev;
6803 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6804 
6805 	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6806 		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6807 		/* allocate skb */
6808 		if (*skb) {
6809 			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6810 			/*
6811 			 * As Rx frame are not going to be processed,
6812 			 * using same mapped address for the Rxd
6813 			 * buffer pointer
6814 			 */
6815 			rxdp1->Buffer0_ptr = *temp0;
6816 		} else {
6817 			*skb = netdev_alloc_skb(dev, size);
6818 			if (!(*skb)) {
6819 				DBG_PRINT(INFO_DBG,
6820 					  "%s: Out of memory to allocate %s\n",
6821 					  dev->name, "1 buf mode SKBs");
6822 				stats->mem_alloc_fail_cnt++;
6823 				return -ENOMEM ;
6824 			}
6825 			stats->mem_allocated += (*skb)->truesize;
6826 			/* storing the mapped addr in a temp variable
6827 			 * such it will be used for next rxd whose
6828 			 * Host Control is NULL
6829 			 */
6830 			rxdp1->Buffer0_ptr = *temp0 =
6831 				pci_map_single(sp->pdev, (*skb)->data,
6832 					       size - NET_IP_ALIGN,
6833 					       PCI_DMA_FROMDEVICE);
6834 			if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6835 				goto memalloc_failed;
6836 			rxdp->Host_Control = (unsigned long) (*skb);
6837 		}
6838 	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6839 		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6840 		/* Two buffer Mode */
6841 		if (*skb) {
6842 			rxdp3->Buffer2_ptr = *temp2;
6843 			rxdp3->Buffer0_ptr = *temp0;
6844 			rxdp3->Buffer1_ptr = *temp1;
6845 		} else {
6846 			*skb = netdev_alloc_skb(dev, size);
6847 			if (!(*skb)) {
6848 				DBG_PRINT(INFO_DBG,
6849 					  "%s: Out of memory to allocate %s\n",
6850 					  dev->name,
6851 					  "2 buf mode SKBs");
6852 				stats->mem_alloc_fail_cnt++;
6853 				return -ENOMEM;
6854 			}
6855 			stats->mem_allocated += (*skb)->truesize;
6856 			rxdp3->Buffer2_ptr = *temp2 =
6857 				pci_map_single(sp->pdev, (*skb)->data,
6858 					       dev->mtu + 4,
6859 					       PCI_DMA_FROMDEVICE);
6860 			if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6861 				goto memalloc_failed;
6862 			rxdp3->Buffer0_ptr = *temp0 =
6863 				pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6864 					       PCI_DMA_FROMDEVICE);
6865 			if (pci_dma_mapping_error(sp->pdev,
6866 						  rxdp3->Buffer0_ptr)) {
6867 				pci_unmap_single(sp->pdev,
6868 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6869 						 dev->mtu + 4,
6870 						 PCI_DMA_FROMDEVICE);
6871 				goto memalloc_failed;
6872 			}
6873 			rxdp->Host_Control = (unsigned long) (*skb);
6874 
6875 			/* Buffer-1 will be dummy buffer not used */
6876 			rxdp3->Buffer1_ptr = *temp1 =
6877 				pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6878 					       PCI_DMA_FROMDEVICE);
6879 			if (pci_dma_mapping_error(sp->pdev,
6880 						  rxdp3->Buffer1_ptr)) {
6881 				pci_unmap_single(sp->pdev,
6882 						 (dma_addr_t)rxdp3->Buffer0_ptr,
6883 						 BUF0_LEN, PCI_DMA_FROMDEVICE);
6884 				pci_unmap_single(sp->pdev,
6885 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6886 						 dev->mtu + 4,
6887 						 PCI_DMA_FROMDEVICE);
6888 				goto memalloc_failed;
6889 			}
6890 		}
6891 	}
6892 	return 0;
6893 
6894 memalloc_failed:
6895 	stats->pci_map_fail_cnt++;
6896 	stats->mem_freed += (*skb)->truesize;
6897 	dev_kfree_skb(*skb);
6898 	return -ENOMEM;
6899 }
6900 
6901 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6902 				int size)
6903 {
6904 	struct net_device *dev = sp->dev;
6905 	if (sp->rxd_mode == RXD_MODE_1) {
6906 		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6907 	} else if (sp->rxd_mode == RXD_MODE_3B) {
6908 		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6909 		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6910 		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6911 	}
6912 }
6913 
6914 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6915 {
6916 	int i, j, k, blk_cnt = 0, size;
6917 	struct config_param *config = &sp->config;
6918 	struct mac_info *mac_control = &sp->mac_control;
6919 	struct net_device *dev = sp->dev;
6920 	struct RxD_t *rxdp = NULL;
6921 	struct sk_buff *skb = NULL;
6922 	struct buffAdd *ba = NULL;
6923 	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6924 
6925 	/* Calculate the size based on ring mode */
6926 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6927 		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6928 	if (sp->rxd_mode == RXD_MODE_1)
6929 		size += NET_IP_ALIGN;
6930 	else if (sp->rxd_mode == RXD_MODE_3B)
6931 		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6932 
6933 	for (i = 0; i < config->rx_ring_num; i++) {
6934 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6935 		struct ring_info *ring = &mac_control->rings[i];
6936 
6937 		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6938 
6939 		for (j = 0; j < blk_cnt; j++) {
6940 			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6941 				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6942 				if (sp->rxd_mode == RXD_MODE_3B)
6943 					ba = &ring->ba[j][k];
6944 				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6945 							   &temp0_64,
6946 							   &temp1_64,
6947 							   &temp2_64,
6948 							   size) == -ENOMEM) {
6949 					return 0;
6950 				}
6951 
6952 				set_rxd_buffer_size(sp, rxdp, size);
6953 				dma_wmb();
6954 				/* flip the Ownership bit to Hardware */
6955 				rxdp->Control_1 |= RXD_OWN_XENA;
6956 			}
6957 		}
6958 	}
6959 	return 0;
6960 
6961 }
6962 
6963 static int s2io_add_isr(struct s2io_nic *sp)
6964 {
6965 	int ret = 0;
6966 	struct net_device *dev = sp->dev;
6967 	int err = 0;
6968 
6969 	if (sp->config.intr_type == MSI_X)
6970 		ret = s2io_enable_msi_x(sp);
6971 	if (ret) {
6972 		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6973 		sp->config.intr_type = INTA;
6974 	}
6975 
6976 	/*
6977 	 * Store the values of the MSIX table in
6978 	 * the struct s2io_nic structure
6979 	 */
6980 	store_xmsi_data(sp);
6981 
6982 	/* After proper initialization of H/W, register ISR */
6983 	if (sp->config.intr_type == MSI_X) {
6984 		int i, msix_rx_cnt = 0;
6985 
6986 		for (i = 0; i < sp->num_entries; i++) {
6987 			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6988 				if (sp->s2io_entries[i].type ==
6989 				    MSIX_RING_TYPE) {
6990 					snprintf(sp->desc[i],
6991 						sizeof(sp->desc[i]),
6992 						"%s:MSI-X-%d-RX",
6993 						dev->name, i);
6994 					err = request_irq(sp->entries[i].vector,
6995 							  s2io_msix_ring_handle,
6996 							  0,
6997 							  sp->desc[i],
6998 							  sp->s2io_entries[i].arg);
6999 				} else if (sp->s2io_entries[i].type ==
7000 					   MSIX_ALARM_TYPE) {
7001 					snprintf(sp->desc[i],
7002 						sizeof(sp->desc[i]),
7003 						"%s:MSI-X-%d-TX",
7004 						dev->name, i);
7005 					err = request_irq(sp->entries[i].vector,
7006 							  s2io_msix_fifo_handle,
7007 							  0,
7008 							  sp->desc[i],
7009 							  sp->s2io_entries[i].arg);
7010 
7011 				}
7012 				/* if either data or addr is zero print it. */
7013 				if (!(sp->msix_info[i].addr &&
7014 				      sp->msix_info[i].data)) {
7015 					DBG_PRINT(ERR_DBG,
7016 						  "%s @Addr:0x%llx Data:0x%llx\n",
7017 						  sp->desc[i],
7018 						  (unsigned long long)
7019 						  sp->msix_info[i].addr,
7020 						  (unsigned long long)
7021 						  ntohl(sp->msix_info[i].data));
7022 				} else
7023 					msix_rx_cnt++;
7024 				if (err) {
7025 					remove_msix_isr(sp);
7026 
7027 					DBG_PRINT(ERR_DBG,
7028 						  "%s:MSI-X-%d registration "
7029 						  "failed\n", dev->name, i);
7030 
7031 					DBG_PRINT(ERR_DBG,
7032 						  "%s: Defaulting to INTA\n",
7033 						  dev->name);
7034 					sp->config.intr_type = INTA;
7035 					break;
7036 				}
7037 				sp->s2io_entries[i].in_use =
7038 					MSIX_REGISTERED_SUCCESS;
7039 			}
7040 		}
7041 		if (!err) {
7042 			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
7043 			DBG_PRINT(INFO_DBG,
7044 				  "MSI-X-TX entries enabled through alarm vector\n");
7045 		}
7046 	}
7047 	if (sp->config.intr_type == INTA) {
7048 		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
7049 				  sp->name, dev);
7050 		if (err) {
7051 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7052 				  dev->name);
7053 			return -1;
7054 		}
7055 	}
7056 	return 0;
7057 }
7058 
7059 static void s2io_rem_isr(struct s2io_nic *sp)
7060 {
7061 	if (sp->config.intr_type == MSI_X)
7062 		remove_msix_isr(sp);
7063 	else
7064 		remove_inta_isr(sp);
7065 }
7066 
7067 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7068 {
7069 	int cnt = 0;
7070 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7071 	register u64 val64 = 0;
7072 	struct config_param *config;
7073 	config = &sp->config;
7074 
7075 	if (!is_s2io_card_up(sp))
7076 		return;
7077 
7078 	del_timer_sync(&sp->alarm_timer);
7079 	/* If s2io_set_link task is executing, wait till it completes. */
7080 	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7081 		msleep(50);
7082 	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7083 
7084 	/* Disable napi */
7085 	if (sp->config.napi) {
7086 		int off = 0;
7087 		if (config->intr_type ==  MSI_X) {
7088 			for (; off < sp->config.rx_ring_num; off++)
7089 				napi_disable(&sp->mac_control.rings[off].napi);
7090 		}
7091 		else
7092 			napi_disable(&sp->napi);
7093 	}
7094 
7095 	/* disable Tx and Rx traffic on the NIC */
7096 	if (do_io)
7097 		stop_nic(sp);
7098 
7099 	s2io_rem_isr(sp);
7100 
7101 	/* stop the tx queue, indicate link down */
7102 	s2io_link(sp, LINK_DOWN);
7103 
7104 	/* Check if the device is Quiescent and then Reset the NIC */
7105 	while (do_io) {
7106 		/* As per the HW requirement we need to replenish the
7107 		 * receive buffer to avoid the ring bump. Since there is
7108 		 * no intention of processing the Rx frame at this pointwe are
7109 		 * just setting the ownership bit of rxd in Each Rx
7110 		 * ring to HW and set the appropriate buffer size
7111 		 * based on the ring mode
7112 		 */
7113 		rxd_owner_bit_reset(sp);
7114 
7115 		val64 = readq(&bar0->adapter_status);
7116 		if (verify_xena_quiescence(sp)) {
7117 			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7118 				break;
7119 		}
7120 
7121 		msleep(50);
7122 		cnt++;
7123 		if (cnt == 10) {
7124 			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7125 				  "adapter status reads 0x%llx\n",
7126 				  (unsigned long long)val64);
7127 			break;
7128 		}
7129 	}
7130 	if (do_io)
7131 		s2io_reset(sp);
7132 
7133 	/* Free all Tx buffers */
7134 	free_tx_buffers(sp);
7135 
7136 	/* Free all Rx buffers */
7137 	free_rx_buffers(sp);
7138 
7139 	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7140 }
7141 
7142 static void s2io_card_down(struct s2io_nic *sp)
7143 {
7144 	do_s2io_card_down(sp, 1);
7145 }
7146 
7147 static int s2io_card_up(struct s2io_nic *sp)
7148 {
7149 	int i, ret = 0;
7150 	struct config_param *config;
7151 	struct mac_info *mac_control;
7152 	struct net_device *dev = sp->dev;
7153 	u16 interruptible;
7154 
7155 	/* Initialize the H/W I/O registers */
7156 	ret = init_nic(sp);
7157 	if (ret != 0) {
7158 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7159 			  dev->name);
7160 		if (ret != -EIO)
7161 			s2io_reset(sp);
7162 		return ret;
7163 	}
7164 
7165 	/*
7166 	 * Initializing the Rx buffers. For now we are considering only 1
7167 	 * Rx ring and initializing buffers into 30 Rx blocks
7168 	 */
7169 	config = &sp->config;
7170 	mac_control = &sp->mac_control;
7171 
7172 	for (i = 0; i < config->rx_ring_num; i++) {
7173 		struct ring_info *ring = &mac_control->rings[i];
7174 
7175 		ring->mtu = dev->mtu;
7176 		ring->lro = !!(dev->features & NETIF_F_LRO);
7177 		ret = fill_rx_buffers(sp, ring, 1);
7178 		if (ret) {
7179 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7180 				  dev->name);
7181 			s2io_reset(sp);
7182 			free_rx_buffers(sp);
7183 			return -ENOMEM;
7184 		}
7185 		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7186 			  ring->rx_bufs_left);
7187 	}
7188 
7189 	/* Initialise napi */
7190 	if (config->napi) {
7191 		if (config->intr_type ==  MSI_X) {
7192 			for (i = 0; i < sp->config.rx_ring_num; i++)
7193 				napi_enable(&sp->mac_control.rings[i].napi);
7194 		} else {
7195 			napi_enable(&sp->napi);
7196 		}
7197 	}
7198 
7199 	/* Maintain the state prior to the open */
7200 	if (sp->promisc_flg)
7201 		sp->promisc_flg = 0;
7202 	if (sp->m_cast_flg) {
7203 		sp->m_cast_flg = 0;
7204 		sp->all_multi_pos = 0;
7205 	}
7206 
7207 	/* Setting its receive mode */
7208 	s2io_set_multicast(dev);
7209 
7210 	if (dev->features & NETIF_F_LRO) {
7211 		/* Initialize max aggregatable pkts per session based on MTU */
7212 		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7213 		/* Check if we can use (if specified) user provided value */
7214 		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7215 			sp->lro_max_aggr_per_sess = lro_max_pkts;
7216 	}
7217 
7218 	/* Enable Rx Traffic and interrupts on the NIC */
7219 	if (start_nic(sp)) {
7220 		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7221 		s2io_reset(sp);
7222 		free_rx_buffers(sp);
7223 		return -ENODEV;
7224 	}
7225 
7226 	/* Add interrupt service routine */
7227 	if (s2io_add_isr(sp) != 0) {
7228 		if (sp->config.intr_type == MSI_X)
7229 			s2io_rem_isr(sp);
7230 		s2io_reset(sp);
7231 		free_rx_buffers(sp);
7232 		return -ENODEV;
7233 	}
7234 
7235 	S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7236 
7237 	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7238 
7239 	/*  Enable select interrupts */
7240 	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7241 	if (sp->config.intr_type != INTA) {
7242 		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7243 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7244 	} else {
7245 		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7246 		interruptible |= TX_PIC_INTR;
7247 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7248 	}
7249 
7250 	return 0;
7251 }
7252 
7253 /**
7254  * s2io_restart_nic - Resets the NIC.
7255  * @data : long pointer to the device private structure
7256  * Description:
7257  * This function is scheduled to be run by the s2io_tx_watchdog
7258  * function after 0.5 secs to reset the NIC. The idea is to reduce
7259  * the run time of the watch dog routine which is run holding a
7260  * spin lock.
7261  */
7262 
7263 static void s2io_restart_nic(struct work_struct *work)
7264 {
7265 	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7266 	struct net_device *dev = sp->dev;
7267 
7268 	rtnl_lock();
7269 
7270 	if (!netif_running(dev))
7271 		goto out_unlock;
7272 
7273 	s2io_card_down(sp);
7274 	if (s2io_card_up(sp)) {
7275 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7276 	}
7277 	s2io_wake_all_tx_queue(sp);
7278 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7279 out_unlock:
7280 	rtnl_unlock();
7281 }
7282 
7283 /**
7284  *  s2io_tx_watchdog - Watchdog for transmit side.
7285  *  @dev : Pointer to net device structure
7286  *  Description:
7287  *  This function is triggered if the Tx Queue is stopped
7288  *  for a pre-defined amount of time when the Interface is still up.
7289  *  If the Interface is jammed in such a situation, the hardware is
7290  *  reset (by s2io_close) and restarted again (by s2io_open) to
7291  *  overcome any problem that might have been caused in the hardware.
7292  *  Return value:
7293  *  void
7294  */
7295 
7296 static void s2io_tx_watchdog(struct net_device *dev)
7297 {
7298 	struct s2io_nic *sp = netdev_priv(dev);
7299 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7300 
7301 	if (netif_carrier_ok(dev)) {
7302 		swstats->watchdog_timer_cnt++;
7303 		schedule_work(&sp->rst_timer_task);
7304 		swstats->soft_reset_cnt++;
7305 	}
7306 }
7307 
7308 /**
7309  *   rx_osm_handler - To perform some OS related operations on SKB.
7310  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7311  *   @skb : the socket buffer pointer.
7312  *   @len : length of the packet
7313  *   @cksum : FCS checksum of the frame.
7314  *   @ring_no : the ring from which this RxD was extracted.
7315  *   Description:
7316  *   This function is called by the Rx interrupt serivce routine to perform
7317  *   some OS related operations on the SKB before passing it to the upper
7318  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7319  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7320  *   to the upper layer. If the checksum is wrong, it increments the Rx
7321  *   packet error count, frees the SKB and returns error.
7322  *   Return value:
7323  *   SUCCESS on success and -1 on failure.
7324  */
7325 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7326 {
7327 	struct s2io_nic *sp = ring_data->nic;
7328 	struct net_device *dev = ring_data->dev;
7329 	struct sk_buff *skb = (struct sk_buff *)
7330 		((unsigned long)rxdp->Host_Control);
7331 	int ring_no = ring_data->ring_no;
7332 	u16 l3_csum, l4_csum;
7333 	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7334 	struct lro *uninitialized_var(lro);
7335 	u8 err_mask;
7336 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7337 
7338 	skb->dev = dev;
7339 
7340 	if (err) {
7341 		/* Check for parity error */
7342 		if (err & 0x1)
7343 			swstats->parity_err_cnt++;
7344 
7345 		err_mask = err >> 48;
7346 		switch (err_mask) {
7347 		case 1:
7348 			swstats->rx_parity_err_cnt++;
7349 			break;
7350 
7351 		case 2:
7352 			swstats->rx_abort_cnt++;
7353 			break;
7354 
7355 		case 3:
7356 			swstats->rx_parity_abort_cnt++;
7357 			break;
7358 
7359 		case 4:
7360 			swstats->rx_rda_fail_cnt++;
7361 			break;
7362 
7363 		case 5:
7364 			swstats->rx_unkn_prot_cnt++;
7365 			break;
7366 
7367 		case 6:
7368 			swstats->rx_fcs_err_cnt++;
7369 			break;
7370 
7371 		case 7:
7372 			swstats->rx_buf_size_err_cnt++;
7373 			break;
7374 
7375 		case 8:
7376 			swstats->rx_rxd_corrupt_cnt++;
7377 			break;
7378 
7379 		case 15:
7380 			swstats->rx_unkn_err_cnt++;
7381 			break;
7382 		}
7383 		/*
7384 		 * Drop the packet if bad transfer code. Exception being
7385 		 * 0x5, which could be due to unsupported IPv6 extension header.
7386 		 * In this case, we let stack handle the packet.
7387 		 * Note that in this case, since checksum will be incorrect,
7388 		 * stack will validate the same.
7389 		 */
7390 		if (err_mask != 0x5) {
7391 			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7392 				  dev->name, err_mask);
7393 			dev->stats.rx_crc_errors++;
7394 			swstats->mem_freed
7395 				+= skb->truesize;
7396 			dev_kfree_skb(skb);
7397 			ring_data->rx_bufs_left -= 1;
7398 			rxdp->Host_Control = 0;
7399 			return 0;
7400 		}
7401 	}
7402 
7403 	rxdp->Host_Control = 0;
7404 	if (sp->rxd_mode == RXD_MODE_1) {
7405 		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7406 
7407 		skb_put(skb, len);
7408 	} else if (sp->rxd_mode == RXD_MODE_3B) {
7409 		int get_block = ring_data->rx_curr_get_info.block_index;
7410 		int get_off = ring_data->rx_curr_get_info.offset;
7411 		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7412 		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7413 		unsigned char *buff = skb_push(skb, buf0_len);
7414 
7415 		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7416 		memcpy(buff, ba->ba_0, buf0_len);
7417 		skb_put(skb, buf2_len);
7418 	}
7419 
7420 	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7421 	    ((!ring_data->lro) ||
7422 	     (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7423 	    (dev->features & NETIF_F_RXCSUM)) {
7424 		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7425 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7426 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7427 			/*
7428 			 * NIC verifies if the Checksum of the received
7429 			 * frame is Ok or not and accordingly returns
7430 			 * a flag in the RxD.
7431 			 */
7432 			skb->ip_summed = CHECKSUM_UNNECESSARY;
7433 			if (ring_data->lro) {
7434 				u32 tcp_len = 0;
7435 				u8 *tcp;
7436 				int ret = 0;
7437 
7438 				ret = s2io_club_tcp_session(ring_data,
7439 							    skb->data, &tcp,
7440 							    &tcp_len, &lro,
7441 							    rxdp, sp);
7442 				switch (ret) {
7443 				case 3: /* Begin anew */
7444 					lro->parent = skb;
7445 					goto aggregate;
7446 				case 1: /* Aggregate */
7447 					lro_append_pkt(sp, lro, skb, tcp_len);
7448 					goto aggregate;
7449 				case 4: /* Flush session */
7450 					lro_append_pkt(sp, lro, skb, tcp_len);
7451 					queue_rx_frame(lro->parent,
7452 						       lro->vlan_tag);
7453 					clear_lro_session(lro);
7454 					swstats->flush_max_pkts++;
7455 					goto aggregate;
7456 				case 2: /* Flush both */
7457 					lro->parent->data_len = lro->frags_len;
7458 					swstats->sending_both++;
7459 					queue_rx_frame(lro->parent,
7460 						       lro->vlan_tag);
7461 					clear_lro_session(lro);
7462 					goto send_up;
7463 				case 0: /* sessions exceeded */
7464 				case -1: /* non-TCP or not L2 aggregatable */
7465 				case 5: /*
7466 					 * First pkt in session not
7467 					 * L3/L4 aggregatable
7468 					 */
7469 					break;
7470 				default:
7471 					DBG_PRINT(ERR_DBG,
7472 						  "%s: Samadhana!!\n",
7473 						  __func__);
7474 					BUG();
7475 				}
7476 			}
7477 		} else {
7478 			/*
7479 			 * Packet with erroneous checksum, let the
7480 			 * upper layers deal with it.
7481 			 */
7482 			skb_checksum_none_assert(skb);
7483 		}
7484 	} else
7485 		skb_checksum_none_assert(skb);
7486 
7487 	swstats->mem_freed += skb->truesize;
7488 send_up:
7489 	skb_record_rx_queue(skb, ring_no);
7490 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7491 aggregate:
7492 	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7493 	return SUCCESS;
7494 }
7495 
7496 /**
7497  *  s2io_link - stops/starts the Tx queue.
7498  *  @sp : private member of the device structure, which is a pointer to the
7499  *  s2io_nic structure.
7500  *  @link : inidicates whether link is UP/DOWN.
7501  *  Description:
7502  *  This function stops/starts the Tx queue depending on whether the link
7503  *  status of the NIC is is down or up. This is called by the Alarm
7504  *  interrupt handler whenever a link change interrupt comes up.
7505  *  Return value:
7506  *  void.
7507  */
7508 
7509 static void s2io_link(struct s2io_nic *sp, int link)
7510 {
7511 	struct net_device *dev = sp->dev;
7512 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7513 
7514 	if (link != sp->last_link_state) {
7515 		init_tti(sp, link);
7516 		if (link == LINK_DOWN) {
7517 			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7518 			s2io_stop_all_tx_queue(sp);
7519 			netif_carrier_off(dev);
7520 			if (swstats->link_up_cnt)
7521 				swstats->link_up_time =
7522 					jiffies - sp->start_time;
7523 			swstats->link_down_cnt++;
7524 		} else {
7525 			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7526 			if (swstats->link_down_cnt)
7527 				swstats->link_down_time =
7528 					jiffies - sp->start_time;
7529 			swstats->link_up_cnt++;
7530 			netif_carrier_on(dev);
7531 			s2io_wake_all_tx_queue(sp);
7532 		}
7533 	}
7534 	sp->last_link_state = link;
7535 	sp->start_time = jiffies;
7536 }
7537 
7538 /**
7539  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7540  *  @sp : private member of the device structure, which is a pointer to the
7541  *  s2io_nic structure.
7542  *  Description:
7543  *  This function initializes a few of the PCI and PCI-X configuration registers
7544  *  with recommended values.
7545  *  Return value:
7546  *  void
7547  */
7548 
7549 static void s2io_init_pci(struct s2io_nic *sp)
7550 {
7551 	u16 pci_cmd = 0, pcix_cmd = 0;
7552 
7553 	/* Enable Data Parity Error Recovery in PCI-X command register. */
7554 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7555 			     &(pcix_cmd));
7556 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7557 			      (pcix_cmd | 1));
7558 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7559 			     &(pcix_cmd));
7560 
7561 	/* Set the PErr Response bit in PCI command register. */
7562 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7563 	pci_write_config_word(sp->pdev, PCI_COMMAND,
7564 			      (pci_cmd | PCI_COMMAND_PARITY));
7565 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7566 }
7567 
7568 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7569 			    u8 *dev_multiq)
7570 {
7571 	int i;
7572 
7573 	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7574 		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7575 			  "(%d) not supported\n", tx_fifo_num);
7576 
7577 		if (tx_fifo_num < 1)
7578 			tx_fifo_num = 1;
7579 		else
7580 			tx_fifo_num = MAX_TX_FIFOS;
7581 
7582 		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7583 	}
7584 
7585 	if (multiq)
7586 		*dev_multiq = multiq;
7587 
7588 	if (tx_steering_type && (1 == tx_fifo_num)) {
7589 		if (tx_steering_type != TX_DEFAULT_STEERING)
7590 			DBG_PRINT(ERR_DBG,
7591 				  "Tx steering is not supported with "
7592 				  "one fifo. Disabling Tx steering.\n");
7593 		tx_steering_type = NO_STEERING;
7594 	}
7595 
7596 	if ((tx_steering_type < NO_STEERING) ||
7597 	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7598 		DBG_PRINT(ERR_DBG,
7599 			  "Requested transmit steering not supported\n");
7600 		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7601 		tx_steering_type = NO_STEERING;
7602 	}
7603 
7604 	if (rx_ring_num > MAX_RX_RINGS) {
7605 		DBG_PRINT(ERR_DBG,
7606 			  "Requested number of rx rings not supported\n");
7607 		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7608 			  MAX_RX_RINGS);
7609 		rx_ring_num = MAX_RX_RINGS;
7610 	}
7611 
7612 	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7613 		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7614 			  "Defaulting to INTA\n");
7615 		*dev_intr_type = INTA;
7616 	}
7617 
7618 	if ((*dev_intr_type == MSI_X) &&
7619 	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7620 	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7621 		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7622 			  "Defaulting to INTA\n");
7623 		*dev_intr_type = INTA;
7624 	}
7625 
7626 	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7627 		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7628 		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7629 		rx_ring_mode = 1;
7630 	}
7631 
7632 	for (i = 0; i < MAX_RX_RINGS; i++)
7633 		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7634 			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7635 				  "supported\nDefaulting to %d\n",
7636 				  MAX_RX_BLOCKS_PER_RING);
7637 			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7638 		}
7639 
7640 	return SUCCESS;
7641 }
7642 
7643 /**
7644  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7645  * or Traffic class respectively.
7646  * @nic: device private variable
7647  * Description: The function configures the receive steering to
7648  * desired receive ring.
7649  * Return Value:  SUCCESS on success and
7650  * '-1' on failure (endian settings incorrect).
7651  */
7652 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7653 {
7654 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7655 	register u64 val64 = 0;
7656 
7657 	if (ds_codepoint > 63)
7658 		return FAILURE;
7659 
7660 	val64 = RTS_DS_MEM_DATA(ring);
7661 	writeq(val64, &bar0->rts_ds_mem_data);
7662 
7663 	val64 = RTS_DS_MEM_CTRL_WE |
7664 		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7665 		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7666 
7667 	writeq(val64, &bar0->rts_ds_mem_ctrl);
7668 
7669 	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7670 				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7671 				     S2IO_BIT_RESET);
7672 }
7673 
7674 static const struct net_device_ops s2io_netdev_ops = {
7675 	.ndo_open	        = s2io_open,
7676 	.ndo_stop	        = s2io_close,
7677 	.ndo_get_stats	        = s2io_get_stats,
7678 	.ndo_start_xmit    	= s2io_xmit,
7679 	.ndo_validate_addr	= eth_validate_addr,
7680 	.ndo_set_rx_mode	= s2io_set_multicast,
7681 	.ndo_do_ioctl	   	= s2io_ioctl,
7682 	.ndo_set_mac_address    = s2io_set_mac_addr,
7683 	.ndo_change_mtu	   	= s2io_change_mtu,
7684 	.ndo_set_features	= s2io_set_features,
7685 	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7686 #ifdef CONFIG_NET_POLL_CONTROLLER
7687 	.ndo_poll_controller    = s2io_netpoll,
7688 #endif
7689 };
7690 
7691 /**
7692  *  s2io_init_nic - Initialization of the adapter .
7693  *  @pdev : structure containing the PCI related information of the device.
7694  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7695  *  Description:
7696  *  The function initializes an adapter identified by the pci_dec structure.
7697  *  All OS related initialization including memory and device structure and
7698  *  initlaization of the device private variable is done. Also the swapper
7699  *  control register is initialized to enable read and write into the I/O
7700  *  registers of the device.
7701  *  Return value:
7702  *  returns 0 on success and negative on failure.
7703  */
7704 
7705 static int
7706 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7707 {
7708 	struct s2io_nic *sp;
7709 	struct net_device *dev;
7710 	int i, j, ret;
7711 	int dma_flag = false;
7712 	u32 mac_up, mac_down;
7713 	u64 val64 = 0, tmp64 = 0;
7714 	struct XENA_dev_config __iomem *bar0 = NULL;
7715 	u16 subid;
7716 	struct config_param *config;
7717 	struct mac_info *mac_control;
7718 	int mode;
7719 	u8 dev_intr_type = intr_type;
7720 	u8 dev_multiq = 0;
7721 
7722 	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7723 	if (ret)
7724 		return ret;
7725 
7726 	ret = pci_enable_device(pdev);
7727 	if (ret) {
7728 		DBG_PRINT(ERR_DBG,
7729 			  "%s: pci_enable_device failed\n", __func__);
7730 		return ret;
7731 	}
7732 
7733 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7734 		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7735 		dma_flag = true;
7736 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7737 			DBG_PRINT(ERR_DBG,
7738 				  "Unable to obtain 64bit DMA "
7739 				  "for consistent allocations\n");
7740 			pci_disable_device(pdev);
7741 			return -ENOMEM;
7742 		}
7743 	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7744 		DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7745 	} else {
7746 		pci_disable_device(pdev);
7747 		return -ENOMEM;
7748 	}
7749 	ret = pci_request_regions(pdev, s2io_driver_name);
7750 	if (ret) {
7751 		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7752 			  __func__, ret);
7753 		pci_disable_device(pdev);
7754 		return -ENODEV;
7755 	}
7756 	if (dev_multiq)
7757 		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7758 	else
7759 		dev = alloc_etherdev(sizeof(struct s2io_nic));
7760 	if (dev == NULL) {
7761 		pci_disable_device(pdev);
7762 		pci_release_regions(pdev);
7763 		return -ENODEV;
7764 	}
7765 
7766 	pci_set_master(pdev);
7767 	pci_set_drvdata(pdev, dev);
7768 	SET_NETDEV_DEV(dev, &pdev->dev);
7769 
7770 	/*  Private member variable initialized to s2io NIC structure */
7771 	sp = netdev_priv(dev);
7772 	sp->dev = dev;
7773 	sp->pdev = pdev;
7774 	sp->high_dma_flag = dma_flag;
7775 	sp->device_enabled_once = false;
7776 	if (rx_ring_mode == 1)
7777 		sp->rxd_mode = RXD_MODE_1;
7778 	if (rx_ring_mode == 2)
7779 		sp->rxd_mode = RXD_MODE_3B;
7780 
7781 	sp->config.intr_type = dev_intr_type;
7782 
7783 	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7784 	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7785 		sp->device_type = XFRAME_II_DEVICE;
7786 	else
7787 		sp->device_type = XFRAME_I_DEVICE;
7788 
7789 
7790 	/* Initialize some PCI/PCI-X fields of the NIC. */
7791 	s2io_init_pci(sp);
7792 
7793 	/*
7794 	 * Setting the device configuration parameters.
7795 	 * Most of these parameters can be specified by the user during
7796 	 * module insertion as they are module loadable parameters. If
7797 	 * these parameters are not not specified during load time, they
7798 	 * are initialized with default values.
7799 	 */
7800 	config = &sp->config;
7801 	mac_control = &sp->mac_control;
7802 
7803 	config->napi = napi;
7804 	config->tx_steering_type = tx_steering_type;
7805 
7806 	/* Tx side parameters. */
7807 	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7808 		config->tx_fifo_num = MAX_TX_FIFOS;
7809 	else
7810 		config->tx_fifo_num = tx_fifo_num;
7811 
7812 	/* Initialize the fifos used for tx steering */
7813 	if (config->tx_fifo_num < 5) {
7814 		if (config->tx_fifo_num  == 1)
7815 			sp->total_tcp_fifos = 1;
7816 		else
7817 			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7818 		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7819 		sp->total_udp_fifos = 1;
7820 		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7821 	} else {
7822 		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7823 				       FIFO_OTHER_MAX_NUM);
7824 		sp->udp_fifo_idx = sp->total_tcp_fifos;
7825 		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7826 		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7827 	}
7828 
7829 	config->multiq = dev_multiq;
7830 	for (i = 0; i < config->tx_fifo_num; i++) {
7831 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7832 
7833 		tx_cfg->fifo_len = tx_fifo_len[i];
7834 		tx_cfg->fifo_priority = i;
7835 	}
7836 
7837 	/* mapping the QoS priority to the configured fifos */
7838 	for (i = 0; i < MAX_TX_FIFOS; i++)
7839 		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7840 
7841 	/* map the hashing selector table to the configured fifos */
7842 	for (i = 0; i < config->tx_fifo_num; i++)
7843 		sp->fifo_selector[i] = fifo_selector[i];
7844 
7845 
7846 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7847 	for (i = 0; i < config->tx_fifo_num; i++) {
7848 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7849 
7850 		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7851 		if (tx_cfg->fifo_len < 65) {
7852 			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7853 			break;
7854 		}
7855 	}
7856 	/* + 2 because one Txd for skb->data and one Txd for UFO */
7857 	config->max_txds = MAX_SKB_FRAGS + 2;
7858 
7859 	/* Rx side parameters. */
7860 	config->rx_ring_num = rx_ring_num;
7861 	for (i = 0; i < config->rx_ring_num; i++) {
7862 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7863 		struct ring_info *ring = &mac_control->rings[i];
7864 
7865 		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7866 		rx_cfg->ring_priority = i;
7867 		ring->rx_bufs_left = 0;
7868 		ring->rxd_mode = sp->rxd_mode;
7869 		ring->rxd_count = rxd_count[sp->rxd_mode];
7870 		ring->pdev = sp->pdev;
7871 		ring->dev = sp->dev;
7872 	}
7873 
7874 	for (i = 0; i < rx_ring_num; i++) {
7875 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7876 
7877 		rx_cfg->ring_org = RING_ORG_BUFF1;
7878 		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7879 	}
7880 
7881 	/*  Setting Mac Control parameters */
7882 	mac_control->rmac_pause_time = rmac_pause_time;
7883 	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7884 	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7885 
7886 
7887 	/*  initialize the shared memory used by the NIC and the host */
7888 	if (init_shared_mem(sp)) {
7889 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7890 		ret = -ENOMEM;
7891 		goto mem_alloc_failed;
7892 	}
7893 
7894 	sp->bar0 = pci_ioremap_bar(pdev, 0);
7895 	if (!sp->bar0) {
7896 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7897 			  dev->name);
7898 		ret = -ENOMEM;
7899 		goto bar0_remap_failed;
7900 	}
7901 
7902 	sp->bar1 = pci_ioremap_bar(pdev, 2);
7903 	if (!sp->bar1) {
7904 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7905 			  dev->name);
7906 		ret = -ENOMEM;
7907 		goto bar1_remap_failed;
7908 	}
7909 
7910 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7911 	for (j = 0; j < MAX_TX_FIFOS; j++) {
7912 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7913 	}
7914 
7915 	/*  Driver entry points */
7916 	dev->netdev_ops = &s2io_netdev_ops;
7917 	dev->ethtool_ops = &netdev_ethtool_ops;
7918 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7919 		NETIF_F_TSO | NETIF_F_TSO6 |
7920 		NETIF_F_RXCSUM | NETIF_F_LRO;
7921 	dev->features |= dev->hw_features |
7922 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7923 	if (sp->device_type & XFRAME_II_DEVICE) {
7924 		dev->hw_features |= NETIF_F_UFO;
7925 		if (ufo)
7926 			dev->features |= NETIF_F_UFO;
7927 	}
7928 	if (sp->high_dma_flag == true)
7929 		dev->features |= NETIF_F_HIGHDMA;
7930 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7931 	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7932 	INIT_WORK(&sp->set_link_task, s2io_set_link);
7933 
7934 	pci_save_state(sp->pdev);
7935 
7936 	/* Setting swapper control on the NIC, for proper reset operation */
7937 	if (s2io_set_swapper(sp)) {
7938 		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7939 			  dev->name);
7940 		ret = -EAGAIN;
7941 		goto set_swap_failed;
7942 	}
7943 
7944 	/* Verify if the Herc works on the slot its placed into */
7945 	if (sp->device_type & XFRAME_II_DEVICE) {
7946 		mode = s2io_verify_pci_mode(sp);
7947 		if (mode < 0) {
7948 			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7949 				  __func__);
7950 			ret = -EBADSLT;
7951 			goto set_swap_failed;
7952 		}
7953 	}
7954 
7955 	if (sp->config.intr_type == MSI_X) {
7956 		sp->num_entries = config->rx_ring_num + 1;
7957 		ret = s2io_enable_msi_x(sp);
7958 
7959 		if (!ret) {
7960 			ret = s2io_test_msi(sp);
7961 			/* rollback MSI-X, will re-enable during add_isr() */
7962 			remove_msix_isr(sp);
7963 		}
7964 		if (ret) {
7965 
7966 			DBG_PRINT(ERR_DBG,
7967 				  "MSI-X requested but failed to enable\n");
7968 			sp->config.intr_type = INTA;
7969 		}
7970 	}
7971 
7972 	if (config->intr_type ==  MSI_X) {
7973 		for (i = 0; i < config->rx_ring_num ; i++) {
7974 			struct ring_info *ring = &mac_control->rings[i];
7975 
7976 			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7977 		}
7978 	} else {
7979 		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7980 	}
7981 
7982 	/* Not needed for Herc */
7983 	if (sp->device_type & XFRAME_I_DEVICE) {
7984 		/*
7985 		 * Fix for all "FFs" MAC address problems observed on
7986 		 * Alpha platforms
7987 		 */
7988 		fix_mac_address(sp);
7989 		s2io_reset(sp);
7990 	}
7991 
7992 	/*
7993 	 * MAC address initialization.
7994 	 * For now only one mac address will be read and used.
7995 	 */
7996 	bar0 = sp->bar0;
7997 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7998 		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7999 	writeq(val64, &bar0->rmac_addr_cmd_mem);
8000 	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8001 			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8002 			      S2IO_BIT_RESET);
8003 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
8004 	mac_down = (u32)tmp64;
8005 	mac_up = (u32) (tmp64 >> 32);
8006 
8007 	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8008 	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8009 	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8010 	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8011 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8012 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8013 
8014 	/*  Set the factory defined MAC address initially   */
8015 	dev->addr_len = ETH_ALEN;
8016 	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8017 
8018 	/* initialize number of multicast & unicast MAC entries variables */
8019 	if (sp->device_type == XFRAME_I_DEVICE) {
8020 		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8021 		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8022 		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8023 	} else if (sp->device_type == XFRAME_II_DEVICE) {
8024 		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8025 		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8026 		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8027 	}
8028 
8029 	/* store mac addresses from CAM to s2io_nic structure */
8030 	do_s2io_store_unicast_mc(sp);
8031 
8032 	/* Configure MSIX vector for number of rings configured plus one */
8033 	if ((sp->device_type == XFRAME_II_DEVICE) &&
8034 	    (config->intr_type == MSI_X))
8035 		sp->num_entries = config->rx_ring_num + 1;
8036 
8037 	/* Store the values of the MSIX table in the s2io_nic structure */
8038 	store_xmsi_data(sp);
8039 	/* reset Nic and bring it to known state */
8040 	s2io_reset(sp);
8041 
8042 	/*
8043 	 * Initialize link state flags
8044 	 * and the card state parameter
8045 	 */
8046 	sp->state = 0;
8047 
8048 	/* Initialize spinlocks */
8049 	for (i = 0; i < sp->config.tx_fifo_num; i++) {
8050 		struct fifo_info *fifo = &mac_control->fifos[i];
8051 
8052 		spin_lock_init(&fifo->tx_lock);
8053 	}
8054 
8055 	/*
8056 	 * SXE-002: Configure link and activity LED to init state
8057 	 * on driver load.
8058 	 */
8059 	subid = sp->pdev->subsystem_device;
8060 	if ((subid & 0xFF) >= 0x07) {
8061 		val64 = readq(&bar0->gpio_control);
8062 		val64 |= 0x0000800000000000ULL;
8063 		writeq(val64, &bar0->gpio_control);
8064 		val64 = 0x0411040400000000ULL;
8065 		writeq(val64, (void __iomem *)bar0 + 0x2700);
8066 		val64 = readq(&bar0->gpio_control);
8067 	}
8068 
8069 	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8070 
8071 	if (register_netdev(dev)) {
8072 		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8073 		ret = -ENODEV;
8074 		goto register_failed;
8075 	}
8076 	s2io_vpd_read(sp);
8077 	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8078 	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8079 		  sp->product_name, pdev->revision);
8080 	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8081 		  s2io_driver_version);
8082 	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8083 	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8084 	if (sp->device_type & XFRAME_II_DEVICE) {
8085 		mode = s2io_print_pci_mode(sp);
8086 		if (mode < 0) {
8087 			ret = -EBADSLT;
8088 			unregister_netdev(dev);
8089 			goto set_swap_failed;
8090 		}
8091 	}
8092 	switch (sp->rxd_mode) {
8093 	case RXD_MODE_1:
8094 		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8095 			  dev->name);
8096 		break;
8097 	case RXD_MODE_3B:
8098 		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8099 			  dev->name);
8100 		break;
8101 	}
8102 
8103 	switch (sp->config.napi) {
8104 	case 0:
8105 		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8106 		break;
8107 	case 1:
8108 		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8109 		break;
8110 	}
8111 
8112 	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8113 		  sp->config.tx_fifo_num);
8114 
8115 	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8116 		  sp->config.rx_ring_num);
8117 
8118 	switch (sp->config.intr_type) {
8119 	case INTA:
8120 		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8121 		break;
8122 	case MSI_X:
8123 		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8124 		break;
8125 	}
8126 	if (sp->config.multiq) {
8127 		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8128 			struct fifo_info *fifo = &mac_control->fifos[i];
8129 
8130 			fifo->multiq = config->multiq;
8131 		}
8132 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8133 			  dev->name);
8134 	} else
8135 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8136 			  dev->name);
8137 
8138 	switch (sp->config.tx_steering_type) {
8139 	case NO_STEERING:
8140 		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8141 			  dev->name);
8142 		break;
8143 	case TX_PRIORITY_STEERING:
8144 		DBG_PRINT(ERR_DBG,
8145 			  "%s: Priority steering enabled for transmit\n",
8146 			  dev->name);
8147 		break;
8148 	case TX_DEFAULT_STEERING:
8149 		DBG_PRINT(ERR_DBG,
8150 			  "%s: Default steering enabled for transmit\n",
8151 			  dev->name);
8152 	}
8153 
8154 	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8155 		  dev->name);
8156 	if (ufo)
8157 		DBG_PRINT(ERR_DBG,
8158 			  "%s: UDP Fragmentation Offload(UFO) enabled\n",
8159 			  dev->name);
8160 	/* Initialize device name */
8161 	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8162 		 sp->product_name);
8163 
8164 	if (vlan_tag_strip)
8165 		sp->vlan_strip_flag = 1;
8166 	else
8167 		sp->vlan_strip_flag = 0;
8168 
8169 	/*
8170 	 * Make Link state as off at this point, when the Link change
8171 	 * interrupt comes the state will be automatically changed to
8172 	 * the right state.
8173 	 */
8174 	netif_carrier_off(dev);
8175 
8176 	return 0;
8177 
8178 register_failed:
8179 set_swap_failed:
8180 	iounmap(sp->bar1);
8181 bar1_remap_failed:
8182 	iounmap(sp->bar0);
8183 bar0_remap_failed:
8184 mem_alloc_failed:
8185 	free_shared_mem(sp);
8186 	pci_disable_device(pdev);
8187 	pci_release_regions(pdev);
8188 	free_netdev(dev);
8189 
8190 	return ret;
8191 }
8192 
8193 /**
8194  * s2io_rem_nic - Free the PCI device
8195  * @pdev: structure containing the PCI related information of the device.
8196  * Description: This function is called by the Pci subsystem to release a
8197  * PCI device and free up all resource held up by the device. This could
8198  * be in response to a Hot plug event or when the driver is to be removed
8199  * from memory.
8200  */
8201 
8202 static void s2io_rem_nic(struct pci_dev *pdev)
8203 {
8204 	struct net_device *dev = pci_get_drvdata(pdev);
8205 	struct s2io_nic *sp;
8206 
8207 	if (dev == NULL) {
8208 		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8209 		return;
8210 	}
8211 
8212 	sp = netdev_priv(dev);
8213 
8214 	cancel_work_sync(&sp->rst_timer_task);
8215 	cancel_work_sync(&sp->set_link_task);
8216 
8217 	unregister_netdev(dev);
8218 
8219 	free_shared_mem(sp);
8220 	iounmap(sp->bar0);
8221 	iounmap(sp->bar1);
8222 	pci_release_regions(pdev);
8223 	free_netdev(dev);
8224 	pci_disable_device(pdev);
8225 }
8226 
8227 /**
8228  * s2io_starter - Entry point for the driver
8229  * Description: This function is the entry point for the driver. It verifies
8230  * the module loadable parameters and initializes PCI configuration space.
8231  */
8232 
8233 static int __init s2io_starter(void)
8234 {
8235 	return pci_register_driver(&s2io_driver);
8236 }
8237 
8238 /**
8239  * s2io_closer - Cleanup routine for the driver
8240  * Description: This function is the cleanup routine for the driver. It
8241  * unregisters the driver.
8242  */
8243 
8244 static __exit void s2io_closer(void)
8245 {
8246 	pci_unregister_driver(&s2io_driver);
8247 	DBG_PRINT(INIT_DBG, "cleanup done\n");
8248 }
8249 
8250 module_init(s2io_starter);
8251 module_exit(s2io_closer);
8252 
8253 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8254 				struct tcphdr **tcp, struct RxD_t *rxdp,
8255 				struct s2io_nic *sp)
8256 {
8257 	int ip_off;
8258 	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8259 
8260 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8261 		DBG_PRINT(INIT_DBG,
8262 			  "%s: Non-TCP frames not supported for LRO\n",
8263 			  __func__);
8264 		return -1;
8265 	}
8266 
8267 	/* Checking for DIX type or DIX type with VLAN */
8268 	if ((l2_type == 0) || (l2_type == 4)) {
8269 		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8270 		/*
8271 		 * If vlan stripping is disabled and the frame is VLAN tagged,
8272 		 * shift the offset by the VLAN header size bytes.
8273 		 */
8274 		if ((!sp->vlan_strip_flag) &&
8275 		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8276 			ip_off += HEADER_VLAN_SIZE;
8277 	} else {
8278 		/* LLC, SNAP etc are considered non-mergeable */
8279 		return -1;
8280 	}
8281 
8282 	*ip = (struct iphdr *)(buffer + ip_off);
8283 	ip_len = (u8)((*ip)->ihl);
8284 	ip_len <<= 2;
8285 	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8286 
8287 	return 0;
8288 }
8289 
8290 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8291 				  struct tcphdr *tcp)
8292 {
8293 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8294 	if ((lro->iph->saddr != ip->saddr) ||
8295 	    (lro->iph->daddr != ip->daddr) ||
8296 	    (lro->tcph->source != tcp->source) ||
8297 	    (lro->tcph->dest != tcp->dest))
8298 		return -1;
8299 	return 0;
8300 }
8301 
8302 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8303 {
8304 	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8305 }
8306 
8307 static void initiate_new_session(struct lro *lro, u8 *l2h,
8308 				 struct iphdr *ip, struct tcphdr *tcp,
8309 				 u32 tcp_pyld_len, u16 vlan_tag)
8310 {
8311 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8312 	lro->l2h = l2h;
8313 	lro->iph = ip;
8314 	lro->tcph = tcp;
8315 	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8316 	lro->tcp_ack = tcp->ack_seq;
8317 	lro->sg_num = 1;
8318 	lro->total_len = ntohs(ip->tot_len);
8319 	lro->frags_len = 0;
8320 	lro->vlan_tag = vlan_tag;
8321 	/*
8322 	 * Check if we saw TCP timestamp.
8323 	 * Other consistency checks have already been done.
8324 	 */
8325 	if (tcp->doff == 8) {
8326 		__be32 *ptr;
8327 		ptr = (__be32 *)(tcp+1);
8328 		lro->saw_ts = 1;
8329 		lro->cur_tsval = ntohl(*(ptr+1));
8330 		lro->cur_tsecr = *(ptr+2);
8331 	}
8332 	lro->in_use = 1;
8333 }
8334 
8335 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8336 {
8337 	struct iphdr *ip = lro->iph;
8338 	struct tcphdr *tcp = lro->tcph;
8339 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8340 
8341 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8342 
8343 	/* Update L3 header */
8344 	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8345 	ip->tot_len = htons(lro->total_len);
8346 
8347 	/* Update L4 header */
8348 	tcp->ack_seq = lro->tcp_ack;
8349 	tcp->window = lro->window;
8350 
8351 	/* Update tsecr field if this session has timestamps enabled */
8352 	if (lro->saw_ts) {
8353 		__be32 *ptr = (__be32 *)(tcp + 1);
8354 		*(ptr+2) = lro->cur_tsecr;
8355 	}
8356 
8357 	/* Update counters required for calculation of
8358 	 * average no. of packets aggregated.
8359 	 */
8360 	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8361 	swstats->num_aggregations++;
8362 }
8363 
8364 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8365 			     struct tcphdr *tcp, u32 l4_pyld)
8366 {
8367 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8368 	lro->total_len += l4_pyld;
8369 	lro->frags_len += l4_pyld;
8370 	lro->tcp_next_seq += l4_pyld;
8371 	lro->sg_num++;
8372 
8373 	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8374 	lro->tcp_ack = tcp->ack_seq;
8375 	lro->window = tcp->window;
8376 
8377 	if (lro->saw_ts) {
8378 		__be32 *ptr;
8379 		/* Update tsecr and tsval from this packet */
8380 		ptr = (__be32 *)(tcp+1);
8381 		lro->cur_tsval = ntohl(*(ptr+1));
8382 		lro->cur_tsecr = *(ptr + 2);
8383 	}
8384 }
8385 
8386 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8387 				    struct tcphdr *tcp, u32 tcp_pyld_len)
8388 {
8389 	u8 *ptr;
8390 
8391 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8392 
8393 	if (!tcp_pyld_len) {
8394 		/* Runt frame or a pure ack */
8395 		return -1;
8396 	}
8397 
8398 	if (ip->ihl != 5) /* IP has options */
8399 		return -1;
8400 
8401 	/* If we see CE codepoint in IP header, packet is not mergeable */
8402 	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8403 		return -1;
8404 
8405 	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8406 	if (tcp->urg || tcp->psh || tcp->rst ||
8407 	    tcp->syn || tcp->fin ||
8408 	    tcp->ece || tcp->cwr || !tcp->ack) {
8409 		/*
8410 		 * Currently recognize only the ack control word and
8411 		 * any other control field being set would result in
8412 		 * flushing the LRO session
8413 		 */
8414 		return -1;
8415 	}
8416 
8417 	/*
8418 	 * Allow only one TCP timestamp option. Don't aggregate if
8419 	 * any other options are detected.
8420 	 */
8421 	if (tcp->doff != 5 && tcp->doff != 8)
8422 		return -1;
8423 
8424 	if (tcp->doff == 8) {
8425 		ptr = (u8 *)(tcp + 1);
8426 		while (*ptr == TCPOPT_NOP)
8427 			ptr++;
8428 		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8429 			return -1;
8430 
8431 		/* Ensure timestamp value increases monotonically */
8432 		if (l_lro)
8433 			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8434 				return -1;
8435 
8436 		/* timestamp echo reply should be non-zero */
8437 		if (*((__be32 *)(ptr+6)) == 0)
8438 			return -1;
8439 	}
8440 
8441 	return 0;
8442 }
8443 
8444 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8445 				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8446 				 struct RxD_t *rxdp, struct s2io_nic *sp)
8447 {
8448 	struct iphdr *ip;
8449 	struct tcphdr *tcph;
8450 	int ret = 0, i;
8451 	u16 vlan_tag = 0;
8452 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8453 
8454 	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8455 				   rxdp, sp);
8456 	if (ret)
8457 		return ret;
8458 
8459 	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8460 
8461 	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8462 	tcph = (struct tcphdr *)*tcp;
8463 	*tcp_len = get_l4_pyld_length(ip, tcph);
8464 	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8465 		struct lro *l_lro = &ring_data->lro0_n[i];
8466 		if (l_lro->in_use) {
8467 			if (check_for_socket_match(l_lro, ip, tcph))
8468 				continue;
8469 			/* Sock pair matched */
8470 			*lro = l_lro;
8471 
8472 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8473 				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8474 					  "expected 0x%x, actual 0x%x\n",
8475 					  __func__,
8476 					  (*lro)->tcp_next_seq,
8477 					  ntohl(tcph->seq));
8478 
8479 				swstats->outof_sequence_pkts++;
8480 				ret = 2;
8481 				break;
8482 			}
8483 
8484 			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8485 						      *tcp_len))
8486 				ret = 1; /* Aggregate */
8487 			else
8488 				ret = 2; /* Flush both */
8489 			break;
8490 		}
8491 	}
8492 
8493 	if (ret == 0) {
8494 		/* Before searching for available LRO objects,
8495 		 * check if the pkt is L3/L4 aggregatable. If not
8496 		 * don't create new LRO session. Just send this
8497 		 * packet up.
8498 		 */
8499 		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8500 			return 5;
8501 
8502 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8503 			struct lro *l_lro = &ring_data->lro0_n[i];
8504 			if (!(l_lro->in_use)) {
8505 				*lro = l_lro;
8506 				ret = 3; /* Begin anew */
8507 				break;
8508 			}
8509 		}
8510 	}
8511 
8512 	if (ret == 0) { /* sessions exceeded */
8513 		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8514 			  __func__);
8515 		*lro = NULL;
8516 		return ret;
8517 	}
8518 
8519 	switch (ret) {
8520 	case 3:
8521 		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8522 				     vlan_tag);
8523 		break;
8524 	case 2:
8525 		update_L3L4_header(sp, *lro);
8526 		break;
8527 	case 1:
8528 		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8529 		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8530 			update_L3L4_header(sp, *lro);
8531 			ret = 4; /* Flush the LRO */
8532 		}
8533 		break;
8534 	default:
8535 		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8536 		break;
8537 	}
8538 
8539 	return ret;
8540 }
8541 
8542 static void clear_lro_session(struct lro *lro)
8543 {
8544 	static u16 lro_struct_size = sizeof(struct lro);
8545 
8546 	memset(lro, 0, lro_struct_size);
8547 }
8548 
8549 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8550 {
8551 	struct net_device *dev = skb->dev;
8552 	struct s2io_nic *sp = netdev_priv(dev);
8553 
8554 	skb->protocol = eth_type_trans(skb, dev);
8555 	if (vlan_tag && sp->vlan_strip_flag)
8556 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8557 	if (sp->config.napi)
8558 		netif_receive_skb(skb);
8559 	else
8560 		netif_rx(skb);
8561 }
8562 
8563 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8564 			   struct sk_buff *skb, u32 tcp_len)
8565 {
8566 	struct sk_buff *first = lro->parent;
8567 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8568 
8569 	first->len += tcp_len;
8570 	first->data_len = lro->frags_len;
8571 	skb_pull(skb, (skb->len - tcp_len));
8572 	if (skb_shinfo(first)->frag_list)
8573 		lro->last_frag->next = skb;
8574 	else
8575 		skb_shinfo(first)->frag_list = skb;
8576 	first->truesize += skb->truesize;
8577 	lro->last_frag = skb;
8578 	swstats->clubbed_frms_cnt++;
8579 }
8580 
8581 /**
8582  * s2io_io_error_detected - called when PCI error is detected
8583  * @pdev: Pointer to PCI device
8584  * @state: The current pci connection state
8585  *
8586  * This function is called after a PCI bus error affecting
8587  * this device has been detected.
8588  */
8589 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8590 					       pci_channel_state_t state)
8591 {
8592 	struct net_device *netdev = pci_get_drvdata(pdev);
8593 	struct s2io_nic *sp = netdev_priv(netdev);
8594 
8595 	netif_device_detach(netdev);
8596 
8597 	if (state == pci_channel_io_perm_failure)
8598 		return PCI_ERS_RESULT_DISCONNECT;
8599 
8600 	if (netif_running(netdev)) {
8601 		/* Bring down the card, while avoiding PCI I/O */
8602 		do_s2io_card_down(sp, 0);
8603 	}
8604 	pci_disable_device(pdev);
8605 
8606 	return PCI_ERS_RESULT_NEED_RESET;
8607 }
8608 
8609 /**
8610  * s2io_io_slot_reset - called after the pci bus has been reset.
8611  * @pdev: Pointer to PCI device
8612  *
8613  * Restart the card from scratch, as if from a cold-boot.
8614  * At this point, the card has exprienced a hard reset,
8615  * followed by fixups by BIOS, and has its config space
8616  * set up identically to what it was at cold boot.
8617  */
8618 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8619 {
8620 	struct net_device *netdev = pci_get_drvdata(pdev);
8621 	struct s2io_nic *sp = netdev_priv(netdev);
8622 
8623 	if (pci_enable_device(pdev)) {
8624 		pr_err("Cannot re-enable PCI device after reset.\n");
8625 		return PCI_ERS_RESULT_DISCONNECT;
8626 	}
8627 
8628 	pci_set_master(pdev);
8629 	s2io_reset(sp);
8630 
8631 	return PCI_ERS_RESULT_RECOVERED;
8632 }
8633 
8634 /**
8635  * s2io_io_resume - called when traffic can start flowing again.
8636  * @pdev: Pointer to PCI device
8637  *
8638  * This callback is called when the error recovery driver tells
8639  * us that its OK to resume normal operation.
8640  */
8641 static void s2io_io_resume(struct pci_dev *pdev)
8642 {
8643 	struct net_device *netdev = pci_get_drvdata(pdev);
8644 	struct s2io_nic *sp = netdev_priv(netdev);
8645 
8646 	if (netif_running(netdev)) {
8647 		if (s2io_card_up(sp)) {
8648 			pr_err("Can't bring device back up after reset.\n");
8649 			return;
8650 		}
8651 
8652 		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8653 			s2io_card_down(sp);
8654 			pr_err("Can't restore mac addr after reset.\n");
8655 			return;
8656 		}
8657 	}
8658 
8659 	netif_device_attach(netdev);
8660 	netif_tx_wake_all_queues(netdev);
8661 }
8662