1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2010 Exar Corp.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik		: For pointing out the improper error condition
15  *			  check in the s2io_xmit routine and also some
16  *			  issues in the Tx watch dog function. Also for
17  *			  patiently answering all those innumerable
18  *			  questions regaring the 2.6 porting issues.
19  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
20  *			  macros available only in 2.6 Kernel.
21  * Francois Romieu	: For pointing out all code part that were
22  *			  deprecated and also styling related comments.
23  * Grant Grundler	: For helping me get rid of some Architecture
24  *			  dependent code.
25  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explanation of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *		values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_max_pkts: This parameter defines maximum number of packets can be
42  *     aggregated as a single large packet
43  * napi: This parameter used to enable/disable NAPI (polling Rx)
44  *     Possible values '1' for enable and '0' for disable. Default is '1'
45  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
46  *                 Possible values '1' for enable , '0' for disable.
47  *                 Default is '2' - which means disable in promisc mode
48  *                 and enable in non-promiscuous mode.
49  * multiq: This parameter used to enable/disable MULTIQUEUE support.
50  *      Possible values '1' for enable and '0' for disable. Default is '0'
51  ************************************************************************/
52 
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/mdio.h>
65 #include <linux/skbuff.h>
66 #include <linux/init.h>
67 #include <linux/delay.h>
68 #include <linux/stddef.h>
69 #include <linux/ioctl.h>
70 #include <linux/timex.h>
71 #include <linux/ethtool.h>
72 #include <linux/workqueue.h>
73 #include <linux/if_vlan.h>
74 #include <linux/ip.h>
75 #include <linux/tcp.h>
76 #include <linux/uaccess.h>
77 #include <linux/io.h>
78 #include <linux/io-64-nonatomic-lo-hi.h>
79 #include <linux/slab.h>
80 #include <linux/prefetch.h>
81 #include <net/tcp.h>
82 #include <net/checksum.h>
83 
84 #include <asm/div64.h>
85 #include <asm/irq.h>
86 
87 /* local include */
88 #include "s2io.h"
89 #include "s2io-regs.h"
90 
91 #define DRV_VERSION "2.0.26.28"
92 
93 /* S2io Driver name & version. */
94 static const char s2io_driver_name[] = "Neterion";
95 static const char s2io_driver_version[] = DRV_VERSION;
96 
97 static const int rxd_size[2] = {32, 48};
98 static const int rxd_count[2] = {127, 85};
99 
100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101 {
102 	int ret;
103 
104 	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105 	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
106 
107 	return ret;
108 }
109 
110 /*
111  * Cards with following subsystem_id have a link state indication
112  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113  * macro below identifies these cards given the subsystem_id.
114  */
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
116 	(dev_type == XFRAME_I_DEVICE) ?					\
117 	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
118 	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
119 
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
122 
123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
124 {
125 	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 }
127 
128 /* Ethtool related variables and Macros. */
129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130 	"Register test\t(offline)",
131 	"Eeprom test\t(offline)",
132 	"Link test\t(online)",
133 	"RLDRAM test\t(offline)",
134 	"BIST Test\t(offline)"
135 };
136 
137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138 	{"tmac_frms"},
139 	{"tmac_data_octets"},
140 	{"tmac_drop_frms"},
141 	{"tmac_mcst_frms"},
142 	{"tmac_bcst_frms"},
143 	{"tmac_pause_ctrl_frms"},
144 	{"tmac_ttl_octets"},
145 	{"tmac_ucst_frms"},
146 	{"tmac_nucst_frms"},
147 	{"tmac_any_err_frms"},
148 	{"tmac_ttl_less_fb_octets"},
149 	{"tmac_vld_ip_octets"},
150 	{"tmac_vld_ip"},
151 	{"tmac_drop_ip"},
152 	{"tmac_icmp"},
153 	{"tmac_rst_tcp"},
154 	{"tmac_tcp"},
155 	{"tmac_udp"},
156 	{"rmac_vld_frms"},
157 	{"rmac_data_octets"},
158 	{"rmac_fcs_err_frms"},
159 	{"rmac_drop_frms"},
160 	{"rmac_vld_mcst_frms"},
161 	{"rmac_vld_bcst_frms"},
162 	{"rmac_in_rng_len_err_frms"},
163 	{"rmac_out_rng_len_err_frms"},
164 	{"rmac_long_frms"},
165 	{"rmac_pause_ctrl_frms"},
166 	{"rmac_unsup_ctrl_frms"},
167 	{"rmac_ttl_octets"},
168 	{"rmac_accepted_ucst_frms"},
169 	{"rmac_accepted_nucst_frms"},
170 	{"rmac_discarded_frms"},
171 	{"rmac_drop_events"},
172 	{"rmac_ttl_less_fb_octets"},
173 	{"rmac_ttl_frms"},
174 	{"rmac_usized_frms"},
175 	{"rmac_osized_frms"},
176 	{"rmac_frag_frms"},
177 	{"rmac_jabber_frms"},
178 	{"rmac_ttl_64_frms"},
179 	{"rmac_ttl_65_127_frms"},
180 	{"rmac_ttl_128_255_frms"},
181 	{"rmac_ttl_256_511_frms"},
182 	{"rmac_ttl_512_1023_frms"},
183 	{"rmac_ttl_1024_1518_frms"},
184 	{"rmac_ip"},
185 	{"rmac_ip_octets"},
186 	{"rmac_hdr_err_ip"},
187 	{"rmac_drop_ip"},
188 	{"rmac_icmp"},
189 	{"rmac_tcp"},
190 	{"rmac_udp"},
191 	{"rmac_err_drp_udp"},
192 	{"rmac_xgmii_err_sym"},
193 	{"rmac_frms_q0"},
194 	{"rmac_frms_q1"},
195 	{"rmac_frms_q2"},
196 	{"rmac_frms_q3"},
197 	{"rmac_frms_q4"},
198 	{"rmac_frms_q5"},
199 	{"rmac_frms_q6"},
200 	{"rmac_frms_q7"},
201 	{"rmac_full_q0"},
202 	{"rmac_full_q1"},
203 	{"rmac_full_q2"},
204 	{"rmac_full_q3"},
205 	{"rmac_full_q4"},
206 	{"rmac_full_q5"},
207 	{"rmac_full_q6"},
208 	{"rmac_full_q7"},
209 	{"rmac_pause_cnt"},
210 	{"rmac_xgmii_data_err_cnt"},
211 	{"rmac_xgmii_ctrl_err_cnt"},
212 	{"rmac_accepted_ip"},
213 	{"rmac_err_tcp"},
214 	{"rd_req_cnt"},
215 	{"new_rd_req_cnt"},
216 	{"new_rd_req_rtry_cnt"},
217 	{"rd_rtry_cnt"},
218 	{"wr_rtry_rd_ack_cnt"},
219 	{"wr_req_cnt"},
220 	{"new_wr_req_cnt"},
221 	{"new_wr_req_rtry_cnt"},
222 	{"wr_rtry_cnt"},
223 	{"wr_disc_cnt"},
224 	{"rd_rtry_wr_ack_cnt"},
225 	{"txp_wr_cnt"},
226 	{"txd_rd_cnt"},
227 	{"txd_wr_cnt"},
228 	{"rxd_rd_cnt"},
229 	{"rxd_wr_cnt"},
230 	{"txf_rd_cnt"},
231 	{"rxf_wr_cnt"}
232 };
233 
234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235 	{"rmac_ttl_1519_4095_frms"},
236 	{"rmac_ttl_4096_8191_frms"},
237 	{"rmac_ttl_8192_max_frms"},
238 	{"rmac_ttl_gt_max_frms"},
239 	{"rmac_osized_alt_frms"},
240 	{"rmac_jabber_alt_frms"},
241 	{"rmac_gt_max_alt_frms"},
242 	{"rmac_vlan_frms"},
243 	{"rmac_len_discard"},
244 	{"rmac_fcs_discard"},
245 	{"rmac_pf_discard"},
246 	{"rmac_da_discard"},
247 	{"rmac_red_discard"},
248 	{"rmac_rts_discard"},
249 	{"rmac_ingm_full_discard"},
250 	{"link_fault_cnt"}
251 };
252 
253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254 	{"\n DRIVER STATISTICS"},
255 	{"single_bit_ecc_errs"},
256 	{"double_bit_ecc_errs"},
257 	{"parity_err_cnt"},
258 	{"serious_err_cnt"},
259 	{"soft_reset_cnt"},
260 	{"fifo_full_cnt"},
261 	{"ring_0_full_cnt"},
262 	{"ring_1_full_cnt"},
263 	{"ring_2_full_cnt"},
264 	{"ring_3_full_cnt"},
265 	{"ring_4_full_cnt"},
266 	{"ring_5_full_cnt"},
267 	{"ring_6_full_cnt"},
268 	{"ring_7_full_cnt"},
269 	{"alarm_transceiver_temp_high"},
270 	{"alarm_transceiver_temp_low"},
271 	{"alarm_laser_bias_current_high"},
272 	{"alarm_laser_bias_current_low"},
273 	{"alarm_laser_output_power_high"},
274 	{"alarm_laser_output_power_low"},
275 	{"warn_transceiver_temp_high"},
276 	{"warn_transceiver_temp_low"},
277 	{"warn_laser_bias_current_high"},
278 	{"warn_laser_bias_current_low"},
279 	{"warn_laser_output_power_high"},
280 	{"warn_laser_output_power_low"},
281 	{"lro_aggregated_pkts"},
282 	{"lro_flush_both_count"},
283 	{"lro_out_of_sequence_pkts"},
284 	{"lro_flush_due_to_max_pkts"},
285 	{"lro_avg_aggr_pkts"},
286 	{"mem_alloc_fail_cnt"},
287 	{"pci_map_fail_cnt"},
288 	{"watchdog_timer_cnt"},
289 	{"mem_allocated"},
290 	{"mem_freed"},
291 	{"link_up_cnt"},
292 	{"link_down_cnt"},
293 	{"link_up_time"},
294 	{"link_down_time"},
295 	{"tx_tcode_buf_abort_cnt"},
296 	{"tx_tcode_desc_abort_cnt"},
297 	{"tx_tcode_parity_err_cnt"},
298 	{"tx_tcode_link_loss_cnt"},
299 	{"tx_tcode_list_proc_err_cnt"},
300 	{"rx_tcode_parity_err_cnt"},
301 	{"rx_tcode_abort_cnt"},
302 	{"rx_tcode_parity_abort_cnt"},
303 	{"rx_tcode_rda_fail_cnt"},
304 	{"rx_tcode_unkn_prot_cnt"},
305 	{"rx_tcode_fcs_err_cnt"},
306 	{"rx_tcode_buf_size_err_cnt"},
307 	{"rx_tcode_rxd_corrupt_cnt"},
308 	{"rx_tcode_unkn_err_cnt"},
309 	{"tda_err_cnt"},
310 	{"pfc_err_cnt"},
311 	{"pcc_err_cnt"},
312 	{"tti_err_cnt"},
313 	{"tpa_err_cnt"},
314 	{"sm_err_cnt"},
315 	{"lso_err_cnt"},
316 	{"mac_tmac_err_cnt"},
317 	{"mac_rmac_err_cnt"},
318 	{"xgxs_txgxs_err_cnt"},
319 	{"xgxs_rxgxs_err_cnt"},
320 	{"rc_err_cnt"},
321 	{"prc_pcix_err_cnt"},
322 	{"rpa_err_cnt"},
323 	{"rda_err_cnt"},
324 	{"rti_err_cnt"},
325 	{"mc_err_cnt"}
326 };
327 
328 #define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
331 
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
334 
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
337 
338 #define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
340 
341 /* copy mac addr to def_mac_addr array */
342 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
343 {
344 	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
345 	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
346 	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
347 	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
348 	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
349 	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
350 }
351 
352 /*
353  * Constants to be programmed into the Xena's registers, to configure
354  * the XAUI.
355  */
356 
357 #define	END_SIGN	0x0
358 static const u64 herc_act_dtx_cfg[] = {
359 	/* Set address */
360 	0x8000051536750000ULL, 0x80000515367500E0ULL,
361 	/* Write data */
362 	0x8000051536750004ULL, 0x80000515367500E4ULL,
363 	/* Set address */
364 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
365 	/* Write data */
366 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
367 	/* Set address */
368 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
369 	/* Write data */
370 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
371 	/* Set address */
372 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
373 	/* Write data */
374 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
375 	/* Done */
376 	END_SIGN
377 };
378 
379 static const u64 xena_dtx_cfg[] = {
380 	/* Set address */
381 	0x8000051500000000ULL, 0x80000515000000E0ULL,
382 	/* Write data */
383 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
384 	/* Set address */
385 	0x8001051500000000ULL, 0x80010515000000E0ULL,
386 	/* Write data */
387 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
388 	/* Set address */
389 	0x8002051500000000ULL, 0x80020515000000E0ULL,
390 	/* Write data */
391 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
392 	END_SIGN
393 };
394 
395 /*
396  * Constants for Fixing the MacAddress problem seen mostly on
397  * Alpha machines.
398  */
399 static const u64 fix_mac[] = {
400 	0x0060000000000000ULL, 0x0060600000000000ULL,
401 	0x0040600000000000ULL, 0x0000600000000000ULL,
402 	0x0020600000000000ULL, 0x0060600000000000ULL,
403 	0x0020600000000000ULL, 0x0060600000000000ULL,
404 	0x0020600000000000ULL, 0x0060600000000000ULL,
405 	0x0020600000000000ULL, 0x0060600000000000ULL,
406 	0x0020600000000000ULL, 0x0060600000000000ULL,
407 	0x0020600000000000ULL, 0x0060600000000000ULL,
408 	0x0020600000000000ULL, 0x0060600000000000ULL,
409 	0x0020600000000000ULL, 0x0060600000000000ULL,
410 	0x0020600000000000ULL, 0x0060600000000000ULL,
411 	0x0020600000000000ULL, 0x0060600000000000ULL,
412 	0x0020600000000000ULL, 0x0000600000000000ULL,
413 	0x0040600000000000ULL, 0x0060600000000000ULL,
414 	END_SIGN
415 };
416 
417 MODULE_LICENSE("GPL");
418 MODULE_VERSION(DRV_VERSION);
419 
420 
421 /* Module Loadable parameters. */
422 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
423 S2IO_PARM_INT(rx_ring_num, 1);
424 S2IO_PARM_INT(multiq, 0);
425 S2IO_PARM_INT(rx_ring_mode, 1);
426 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
427 S2IO_PARM_INT(rmac_pause_time, 0x100);
428 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
429 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
430 S2IO_PARM_INT(shared_splits, 0);
431 S2IO_PARM_INT(tmac_util_period, 5);
432 S2IO_PARM_INT(rmac_util_period, 5);
433 S2IO_PARM_INT(l3l4hdr_size, 128);
434 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
435 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
436 /* Frequency of Rx desc syncs expressed as power of 2 */
437 S2IO_PARM_INT(rxsync_frequency, 3);
438 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
439 S2IO_PARM_INT(intr_type, 2);
440 /* Large receive offload feature */
441 
442 /* Max pkts to be aggregated by LRO at one time. If not specified,
443  * aggregation happens until we hit max IP pkt size(64K)
444  */
445 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
446 S2IO_PARM_INT(indicate_max_pkts, 0);
447 
448 S2IO_PARM_INT(napi, 1);
449 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
450 
451 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
452 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
453 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
454 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
455 static unsigned int rts_frm_len[MAX_RX_RINGS] =
456 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
457 
458 module_param_array(tx_fifo_len, uint, NULL, 0);
459 module_param_array(rx_ring_sz, uint, NULL, 0);
460 module_param_array(rts_frm_len, uint, NULL, 0);
461 
462 /*
463  * S2IO device table.
464  * This table lists all the devices that this driver supports.
465  */
466 static const struct pci_device_id s2io_tbl[] = {
467 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
468 	 PCI_ANY_ID, PCI_ANY_ID},
469 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
470 	 PCI_ANY_ID, PCI_ANY_ID},
471 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
472 	 PCI_ANY_ID, PCI_ANY_ID},
473 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
474 	 PCI_ANY_ID, PCI_ANY_ID},
475 	{0,}
476 };
477 
478 MODULE_DEVICE_TABLE(pci, s2io_tbl);
479 
480 static const struct pci_error_handlers s2io_err_handler = {
481 	.error_detected = s2io_io_error_detected,
482 	.slot_reset = s2io_io_slot_reset,
483 	.resume = s2io_io_resume,
484 };
485 
486 static struct pci_driver s2io_driver = {
487 	.name = "S2IO",
488 	.id_table = s2io_tbl,
489 	.probe = s2io_init_nic,
490 	.remove = s2io_rem_nic,
491 	.err_handler = &s2io_err_handler,
492 };
493 
494 /* A simplifier macro used both by init and free shared_mem Fns(). */
495 #define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
496 
497 /* netqueue manipulation helper functions */
498 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
499 {
500 	if (!sp->config.multiq) {
501 		int i;
502 
503 		for (i = 0; i < sp->config.tx_fifo_num; i++)
504 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
505 	}
506 	netif_tx_stop_all_queues(sp->dev);
507 }
508 
509 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
510 {
511 	if (!sp->config.multiq)
512 		sp->mac_control.fifos[fifo_no].queue_state =
513 			FIFO_QUEUE_STOP;
514 
515 	netif_tx_stop_all_queues(sp->dev);
516 }
517 
518 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
519 {
520 	if (!sp->config.multiq) {
521 		int i;
522 
523 		for (i = 0; i < sp->config.tx_fifo_num; i++)
524 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
525 	}
526 	netif_tx_start_all_queues(sp->dev);
527 }
528 
529 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
530 {
531 	if (!sp->config.multiq) {
532 		int i;
533 
534 		for (i = 0; i < sp->config.tx_fifo_num; i++)
535 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
536 	}
537 	netif_tx_wake_all_queues(sp->dev);
538 }
539 
540 static inline void s2io_wake_tx_queue(
541 	struct fifo_info *fifo, int cnt, u8 multiq)
542 {
543 
544 	if (multiq) {
545 		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
546 			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
547 	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
548 		if (netif_queue_stopped(fifo->dev)) {
549 			fifo->queue_state = FIFO_QUEUE_START;
550 			netif_wake_queue(fifo->dev);
551 		}
552 	}
553 }
554 
555 /**
556  * init_shared_mem - Allocation and Initialization of Memory
557  * @nic: Device private variable.
558  * Description: The function allocates all the memory areas shared
559  * between the NIC and the driver. This includes Tx descriptors,
560  * Rx descriptors and the statistics block.
561  */
562 
563 static int init_shared_mem(struct s2io_nic *nic)
564 {
565 	u32 size;
566 	void *tmp_v_addr, *tmp_v_addr_next;
567 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
568 	struct RxD_block *pre_rxd_blk = NULL;
569 	int i, j, blk_cnt;
570 	int lst_size, lst_per_page;
571 	struct net_device *dev = nic->dev;
572 	unsigned long tmp;
573 	struct buffAdd *ba;
574 	struct config_param *config = &nic->config;
575 	struct mac_info *mac_control = &nic->mac_control;
576 	unsigned long long mem_allocated = 0;
577 
578 	/* Allocation and initialization of TXDLs in FIFOs */
579 	size = 0;
580 	for (i = 0; i < config->tx_fifo_num; i++) {
581 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
582 
583 		size += tx_cfg->fifo_len;
584 	}
585 	if (size > MAX_AVAILABLE_TXDS) {
586 		DBG_PRINT(ERR_DBG,
587 			  "Too many TxDs requested: %d, max supported: %d\n",
588 			  size, MAX_AVAILABLE_TXDS);
589 		return -EINVAL;
590 	}
591 
592 	size = 0;
593 	for (i = 0; i < config->tx_fifo_num; i++) {
594 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
595 
596 		size = tx_cfg->fifo_len;
597 		/*
598 		 * Legal values are from 2 to 8192
599 		 */
600 		if (size < 2) {
601 			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
602 				  "Valid lengths are 2 through 8192\n",
603 				  i, size);
604 			return -EINVAL;
605 		}
606 	}
607 
608 	lst_size = (sizeof(struct TxD) * config->max_txds);
609 	lst_per_page = PAGE_SIZE / lst_size;
610 
611 	for (i = 0; i < config->tx_fifo_num; i++) {
612 		struct fifo_info *fifo = &mac_control->fifos[i];
613 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
614 		int fifo_len = tx_cfg->fifo_len;
615 		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
616 
617 		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
618 		if (!fifo->list_info) {
619 			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
620 			return -ENOMEM;
621 		}
622 		mem_allocated += list_holder_size;
623 	}
624 	for (i = 0; i < config->tx_fifo_num; i++) {
625 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
626 						lst_per_page);
627 		struct fifo_info *fifo = &mac_control->fifos[i];
628 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
629 
630 		fifo->tx_curr_put_info.offset = 0;
631 		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
632 		fifo->tx_curr_get_info.offset = 0;
633 		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
634 		fifo->fifo_no = i;
635 		fifo->nic = nic;
636 		fifo->max_txds = MAX_SKB_FRAGS + 2;
637 		fifo->dev = dev;
638 
639 		for (j = 0; j < page_num; j++) {
640 			int k = 0;
641 			dma_addr_t tmp_p;
642 			void *tmp_v;
643 			tmp_v = pci_alloc_consistent(nic->pdev,
644 						     PAGE_SIZE, &tmp_p);
645 			if (!tmp_v) {
646 				DBG_PRINT(INFO_DBG,
647 					  "pci_alloc_consistent failed for TxDL\n");
648 				return -ENOMEM;
649 			}
650 			/* If we got a zero DMA address(can happen on
651 			 * certain platforms like PPC), reallocate.
652 			 * Store virtual address of page we don't want,
653 			 * to be freed later.
654 			 */
655 			if (!tmp_p) {
656 				mac_control->zerodma_virt_addr = tmp_v;
657 				DBG_PRINT(INIT_DBG,
658 					  "%s: Zero DMA address for TxDL. "
659 					  "Virtual address %p\n",
660 					  dev->name, tmp_v);
661 				tmp_v = pci_alloc_consistent(nic->pdev,
662 							     PAGE_SIZE, &tmp_p);
663 				if (!tmp_v) {
664 					DBG_PRINT(INFO_DBG,
665 						  "pci_alloc_consistent failed for TxDL\n");
666 					return -ENOMEM;
667 				}
668 				mem_allocated += PAGE_SIZE;
669 			}
670 			while (k < lst_per_page) {
671 				int l = (j * lst_per_page) + k;
672 				if (l == tx_cfg->fifo_len)
673 					break;
674 				fifo->list_info[l].list_virt_addr =
675 					tmp_v + (k * lst_size);
676 				fifo->list_info[l].list_phy_addr =
677 					tmp_p + (k * lst_size);
678 				k++;
679 			}
680 		}
681 	}
682 
683 	for (i = 0; i < config->tx_fifo_num; i++) {
684 		struct fifo_info *fifo = &mac_control->fifos[i];
685 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
686 
687 		size = tx_cfg->fifo_len;
688 		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
689 		if (!fifo->ufo_in_band_v)
690 			return -ENOMEM;
691 		mem_allocated += (size * sizeof(u64));
692 	}
693 
694 	/* Allocation and initialization of RXDs in Rings */
695 	size = 0;
696 	for (i = 0; i < config->rx_ring_num; i++) {
697 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
698 		struct ring_info *ring = &mac_control->rings[i];
699 
700 		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
701 			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
702 				  "multiple of RxDs per Block\n",
703 				  dev->name, i);
704 			return FAILURE;
705 		}
706 		size += rx_cfg->num_rxd;
707 		ring->block_count = rx_cfg->num_rxd /
708 			(rxd_count[nic->rxd_mode] + 1);
709 		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
710 	}
711 	if (nic->rxd_mode == RXD_MODE_1)
712 		size = (size * (sizeof(struct RxD1)));
713 	else
714 		size = (size * (sizeof(struct RxD3)));
715 
716 	for (i = 0; i < config->rx_ring_num; i++) {
717 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
718 		struct ring_info *ring = &mac_control->rings[i];
719 
720 		ring->rx_curr_get_info.block_index = 0;
721 		ring->rx_curr_get_info.offset = 0;
722 		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
723 		ring->rx_curr_put_info.block_index = 0;
724 		ring->rx_curr_put_info.offset = 0;
725 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
726 		ring->nic = nic;
727 		ring->ring_no = i;
728 
729 		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
730 		/*  Allocating all the Rx blocks */
731 		for (j = 0; j < blk_cnt; j++) {
732 			struct rx_block_info *rx_blocks;
733 			int l;
734 
735 			rx_blocks = &ring->rx_blocks[j];
736 			size = SIZE_OF_BLOCK;	/* size is always page size */
737 			tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
738 							  &tmp_p_addr);
739 			if (tmp_v_addr == NULL) {
740 				/*
741 				 * In case of failure, free_shared_mem()
742 				 * is called, which should free any
743 				 * memory that was alloced till the
744 				 * failure happened.
745 				 */
746 				rx_blocks->block_virt_addr = tmp_v_addr;
747 				return -ENOMEM;
748 			}
749 			mem_allocated += size;
750 			memset(tmp_v_addr, 0, size);
751 
752 			size = sizeof(struct rxd_info) *
753 				rxd_count[nic->rxd_mode];
754 			rx_blocks->block_virt_addr = tmp_v_addr;
755 			rx_blocks->block_dma_addr = tmp_p_addr;
756 			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
757 			if (!rx_blocks->rxds)
758 				return -ENOMEM;
759 			mem_allocated += size;
760 			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
761 				rx_blocks->rxds[l].virt_addr =
762 					rx_blocks->block_virt_addr +
763 					(rxd_size[nic->rxd_mode] * l);
764 				rx_blocks->rxds[l].dma_addr =
765 					rx_blocks->block_dma_addr +
766 					(rxd_size[nic->rxd_mode] * l);
767 			}
768 		}
769 		/* Interlinking all Rx Blocks */
770 		for (j = 0; j < blk_cnt; j++) {
771 			int next = (j + 1) % blk_cnt;
772 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
773 			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
774 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
775 			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
776 
777 			pre_rxd_blk = tmp_v_addr;
778 			pre_rxd_blk->reserved_2_pNext_RxD_block =
779 				(unsigned long)tmp_v_addr_next;
780 			pre_rxd_blk->pNext_RxD_Blk_physical =
781 				(u64)tmp_p_addr_next;
782 		}
783 	}
784 	if (nic->rxd_mode == RXD_MODE_3B) {
785 		/*
786 		 * Allocation of Storages for buffer addresses in 2BUFF mode
787 		 * and the buffers as well.
788 		 */
789 		for (i = 0; i < config->rx_ring_num; i++) {
790 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
791 			struct ring_info *ring = &mac_control->rings[i];
792 
793 			blk_cnt = rx_cfg->num_rxd /
794 				(rxd_count[nic->rxd_mode] + 1);
795 			size = sizeof(struct buffAdd *) * blk_cnt;
796 			ring->ba = kmalloc(size, GFP_KERNEL);
797 			if (!ring->ba)
798 				return -ENOMEM;
799 			mem_allocated += size;
800 			for (j = 0; j < blk_cnt; j++) {
801 				int k = 0;
802 
803 				size = sizeof(struct buffAdd) *
804 					(rxd_count[nic->rxd_mode] + 1);
805 				ring->ba[j] = kmalloc(size, GFP_KERNEL);
806 				if (!ring->ba[j])
807 					return -ENOMEM;
808 				mem_allocated += size;
809 				while (k != rxd_count[nic->rxd_mode]) {
810 					ba = &ring->ba[j][k];
811 					size = BUF0_LEN + ALIGN_SIZE;
812 					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
813 					if (!ba->ba_0_org)
814 						return -ENOMEM;
815 					mem_allocated += size;
816 					tmp = (unsigned long)ba->ba_0_org;
817 					tmp += ALIGN_SIZE;
818 					tmp &= ~((unsigned long)ALIGN_SIZE);
819 					ba->ba_0 = (void *)tmp;
820 
821 					size = BUF1_LEN + ALIGN_SIZE;
822 					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
823 					if (!ba->ba_1_org)
824 						return -ENOMEM;
825 					mem_allocated += size;
826 					tmp = (unsigned long)ba->ba_1_org;
827 					tmp += ALIGN_SIZE;
828 					tmp &= ~((unsigned long)ALIGN_SIZE);
829 					ba->ba_1 = (void *)tmp;
830 					k++;
831 				}
832 			}
833 		}
834 	}
835 
836 	/* Allocation and initialization of Statistics block */
837 	size = sizeof(struct stat_block);
838 	mac_control->stats_mem =
839 		pci_alloc_consistent(nic->pdev, size,
840 				     &mac_control->stats_mem_phy);
841 
842 	if (!mac_control->stats_mem) {
843 		/*
844 		 * In case of failure, free_shared_mem() is called, which
845 		 * should free any memory that was alloced till the
846 		 * failure happened.
847 		 */
848 		return -ENOMEM;
849 	}
850 	mem_allocated += size;
851 	mac_control->stats_mem_sz = size;
852 
853 	tmp_v_addr = mac_control->stats_mem;
854 	mac_control->stats_info = tmp_v_addr;
855 	memset(tmp_v_addr, 0, size);
856 	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
857 		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
858 	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
859 	return SUCCESS;
860 }
861 
862 /**
863  * free_shared_mem - Free the allocated Memory
864  * @nic:  Device private variable.
865  * Description: This function is to free all memory locations allocated by
866  * the init_shared_mem() function and return it to the kernel.
867  */
868 
869 static void free_shared_mem(struct s2io_nic *nic)
870 {
871 	int i, j, blk_cnt, size;
872 	void *tmp_v_addr;
873 	dma_addr_t tmp_p_addr;
874 	int lst_size, lst_per_page;
875 	struct net_device *dev;
876 	int page_num = 0;
877 	struct config_param *config;
878 	struct mac_info *mac_control;
879 	struct stat_block *stats;
880 	struct swStat *swstats;
881 
882 	if (!nic)
883 		return;
884 
885 	dev = nic->dev;
886 
887 	config = &nic->config;
888 	mac_control = &nic->mac_control;
889 	stats = mac_control->stats_info;
890 	swstats = &stats->sw_stat;
891 
892 	lst_size = sizeof(struct TxD) * config->max_txds;
893 	lst_per_page = PAGE_SIZE / lst_size;
894 
895 	for (i = 0; i < config->tx_fifo_num; i++) {
896 		struct fifo_info *fifo = &mac_control->fifos[i];
897 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
898 
899 		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
900 		for (j = 0; j < page_num; j++) {
901 			int mem_blks = (j * lst_per_page);
902 			struct list_info_hold *fli;
903 
904 			if (!fifo->list_info)
905 				return;
906 
907 			fli = &fifo->list_info[mem_blks];
908 			if (!fli->list_virt_addr)
909 				break;
910 			pci_free_consistent(nic->pdev, PAGE_SIZE,
911 					    fli->list_virt_addr,
912 					    fli->list_phy_addr);
913 			swstats->mem_freed += PAGE_SIZE;
914 		}
915 		/* If we got a zero DMA address during allocation,
916 		 * free the page now
917 		 */
918 		if (mac_control->zerodma_virt_addr) {
919 			pci_free_consistent(nic->pdev, PAGE_SIZE,
920 					    mac_control->zerodma_virt_addr,
921 					    (dma_addr_t)0);
922 			DBG_PRINT(INIT_DBG,
923 				  "%s: Freeing TxDL with zero DMA address. "
924 				  "Virtual address %p\n",
925 				  dev->name, mac_control->zerodma_virt_addr);
926 			swstats->mem_freed += PAGE_SIZE;
927 		}
928 		kfree(fifo->list_info);
929 		swstats->mem_freed += tx_cfg->fifo_len *
930 			sizeof(struct list_info_hold);
931 	}
932 
933 	size = SIZE_OF_BLOCK;
934 	for (i = 0; i < config->rx_ring_num; i++) {
935 		struct ring_info *ring = &mac_control->rings[i];
936 
937 		blk_cnt = ring->block_count;
938 		for (j = 0; j < blk_cnt; j++) {
939 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
940 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
941 			if (tmp_v_addr == NULL)
942 				break;
943 			pci_free_consistent(nic->pdev, size,
944 					    tmp_v_addr, tmp_p_addr);
945 			swstats->mem_freed += size;
946 			kfree(ring->rx_blocks[j].rxds);
947 			swstats->mem_freed += sizeof(struct rxd_info) *
948 				rxd_count[nic->rxd_mode];
949 		}
950 	}
951 
952 	if (nic->rxd_mode == RXD_MODE_3B) {
953 		/* Freeing buffer storage addresses in 2BUFF mode. */
954 		for (i = 0; i < config->rx_ring_num; i++) {
955 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
956 			struct ring_info *ring = &mac_control->rings[i];
957 
958 			blk_cnt = rx_cfg->num_rxd /
959 				(rxd_count[nic->rxd_mode] + 1);
960 			for (j = 0; j < blk_cnt; j++) {
961 				int k = 0;
962 				if (!ring->ba[j])
963 					continue;
964 				while (k != rxd_count[nic->rxd_mode]) {
965 					struct buffAdd *ba = &ring->ba[j][k];
966 					kfree(ba->ba_0_org);
967 					swstats->mem_freed +=
968 						BUF0_LEN + ALIGN_SIZE;
969 					kfree(ba->ba_1_org);
970 					swstats->mem_freed +=
971 						BUF1_LEN + ALIGN_SIZE;
972 					k++;
973 				}
974 				kfree(ring->ba[j]);
975 				swstats->mem_freed += sizeof(struct buffAdd) *
976 					(rxd_count[nic->rxd_mode] + 1);
977 			}
978 			kfree(ring->ba);
979 			swstats->mem_freed += sizeof(struct buffAdd *) *
980 				blk_cnt;
981 		}
982 	}
983 
984 	for (i = 0; i < nic->config.tx_fifo_num; i++) {
985 		struct fifo_info *fifo = &mac_control->fifos[i];
986 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
987 
988 		if (fifo->ufo_in_band_v) {
989 			swstats->mem_freed += tx_cfg->fifo_len *
990 				sizeof(u64);
991 			kfree(fifo->ufo_in_band_v);
992 		}
993 	}
994 
995 	if (mac_control->stats_mem) {
996 		swstats->mem_freed += mac_control->stats_mem_sz;
997 		pci_free_consistent(nic->pdev,
998 				    mac_control->stats_mem_sz,
999 				    mac_control->stats_mem,
1000 				    mac_control->stats_mem_phy);
1001 	}
1002 }
1003 
1004 /**
1005  * s2io_verify_pci_mode -
1006  */
1007 
1008 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1009 {
1010 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1011 	register u64 val64 = 0;
1012 	int     mode;
1013 
1014 	val64 = readq(&bar0->pci_mode);
1015 	mode = (u8)GET_PCI_MODE(val64);
1016 
1017 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1018 		return -1;      /* Unknown PCI mode */
1019 	return mode;
1020 }
1021 
1022 #define NEC_VENID   0x1033
1023 #define NEC_DEVID   0x0125
1024 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1025 {
1026 	struct pci_dev *tdev = NULL;
1027 	for_each_pci_dev(tdev) {
1028 		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1029 			if (tdev->bus == s2io_pdev->bus->parent) {
1030 				pci_dev_put(tdev);
1031 				return 1;
1032 			}
1033 		}
1034 	}
1035 	return 0;
1036 }
1037 
1038 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1039 /**
1040  * s2io_print_pci_mode -
1041  */
1042 static int s2io_print_pci_mode(struct s2io_nic *nic)
1043 {
1044 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1045 	register u64 val64 = 0;
1046 	int	mode;
1047 	struct config_param *config = &nic->config;
1048 	const char *pcimode;
1049 
1050 	val64 = readq(&bar0->pci_mode);
1051 	mode = (u8)GET_PCI_MODE(val64);
1052 
1053 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1054 		return -1;	/* Unknown PCI mode */
1055 
1056 	config->bus_speed = bus_speed[mode];
1057 
1058 	if (s2io_on_nec_bridge(nic->pdev)) {
1059 		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1060 			  nic->dev->name);
1061 		return mode;
1062 	}
1063 
1064 	switch (mode) {
1065 	case PCI_MODE_PCI_33:
1066 		pcimode = "33MHz PCI bus";
1067 		break;
1068 	case PCI_MODE_PCI_66:
1069 		pcimode = "66MHz PCI bus";
1070 		break;
1071 	case PCI_MODE_PCIX_M1_66:
1072 		pcimode = "66MHz PCIX(M1) bus";
1073 		break;
1074 	case PCI_MODE_PCIX_M1_100:
1075 		pcimode = "100MHz PCIX(M1) bus";
1076 		break;
1077 	case PCI_MODE_PCIX_M1_133:
1078 		pcimode = "133MHz PCIX(M1) bus";
1079 		break;
1080 	case PCI_MODE_PCIX_M2_66:
1081 		pcimode = "133MHz PCIX(M2) bus";
1082 		break;
1083 	case PCI_MODE_PCIX_M2_100:
1084 		pcimode = "200MHz PCIX(M2) bus";
1085 		break;
1086 	case PCI_MODE_PCIX_M2_133:
1087 		pcimode = "266MHz PCIX(M2) bus";
1088 		break;
1089 	default:
1090 		pcimode = "unsupported bus!";
1091 		mode = -1;
1092 	}
1093 
1094 	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1095 		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1096 
1097 	return mode;
1098 }
1099 
1100 /**
1101  *  init_tti - Initialization transmit traffic interrupt scheme
1102  *  @nic: device private variable
1103  *  @link: link status (UP/DOWN) used to enable/disable continuous
1104  *  transmit interrupts
1105  *  Description: The function configures transmit traffic interrupts
1106  *  Return Value:  SUCCESS on success and
1107  *  '-1' on failure
1108  */
1109 
1110 static int init_tti(struct s2io_nic *nic, int link)
1111 {
1112 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1113 	register u64 val64 = 0;
1114 	int i;
1115 	struct config_param *config = &nic->config;
1116 
1117 	for (i = 0; i < config->tx_fifo_num; i++) {
1118 		/*
1119 		 * TTI Initialization. Default Tx timer gets us about
1120 		 * 250 interrupts per sec. Continuous interrupts are enabled
1121 		 * by default.
1122 		 */
1123 		if (nic->device_type == XFRAME_II_DEVICE) {
1124 			int count = (nic->config.bus_speed * 125)/2;
1125 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1126 		} else
1127 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1128 
1129 		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1130 			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1131 			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1132 			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1133 		if (i == 0)
1134 			if (use_continuous_tx_intrs && (link == LINK_UP))
1135 				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1136 		writeq(val64, &bar0->tti_data1_mem);
1137 
1138 		if (nic->config.intr_type == MSI_X) {
1139 			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1140 				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1141 				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1142 				TTI_DATA2_MEM_TX_UFC_D(0x300);
1143 		} else {
1144 			if ((nic->config.tx_steering_type ==
1145 			     TX_DEFAULT_STEERING) &&
1146 			    (config->tx_fifo_num > 1) &&
1147 			    (i >= nic->udp_fifo_idx) &&
1148 			    (i < (nic->udp_fifo_idx +
1149 				  nic->total_udp_fifos)))
1150 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1151 					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1152 					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1153 					TTI_DATA2_MEM_TX_UFC_D(0x120);
1154 			else
1155 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1156 					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1157 					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1158 					TTI_DATA2_MEM_TX_UFC_D(0x80);
1159 		}
1160 
1161 		writeq(val64, &bar0->tti_data2_mem);
1162 
1163 		val64 = TTI_CMD_MEM_WE |
1164 			TTI_CMD_MEM_STROBE_NEW_CMD |
1165 			TTI_CMD_MEM_OFFSET(i);
1166 		writeq(val64, &bar0->tti_command_mem);
1167 
1168 		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1169 					  TTI_CMD_MEM_STROBE_NEW_CMD,
1170 					  S2IO_BIT_RESET) != SUCCESS)
1171 			return FAILURE;
1172 	}
1173 
1174 	return SUCCESS;
1175 }
1176 
1177 /**
1178  *  init_nic - Initialization of hardware
1179  *  @nic: device private variable
1180  *  Description: The function sequentially configures every block
1181  *  of the H/W from their reset values.
1182  *  Return Value:  SUCCESS on success and
1183  *  '-1' on failure (endian settings incorrect).
1184  */
1185 
1186 static int init_nic(struct s2io_nic *nic)
1187 {
1188 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1189 	struct net_device *dev = nic->dev;
1190 	register u64 val64 = 0;
1191 	void __iomem *add;
1192 	u32 time;
1193 	int i, j;
1194 	int dtx_cnt = 0;
1195 	unsigned long long mem_share;
1196 	int mem_size;
1197 	struct config_param *config = &nic->config;
1198 	struct mac_info *mac_control = &nic->mac_control;
1199 
1200 	/* to set the swapper controle on the card */
1201 	if (s2io_set_swapper(nic)) {
1202 		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1203 		return -EIO;
1204 	}
1205 
1206 	/*
1207 	 * Herc requires EOI to be removed from reset before XGXS, so..
1208 	 */
1209 	if (nic->device_type & XFRAME_II_DEVICE) {
1210 		val64 = 0xA500000000ULL;
1211 		writeq(val64, &bar0->sw_reset);
1212 		msleep(500);
1213 		val64 = readq(&bar0->sw_reset);
1214 	}
1215 
1216 	/* Remove XGXS from reset state */
1217 	val64 = 0;
1218 	writeq(val64, &bar0->sw_reset);
1219 	msleep(500);
1220 	val64 = readq(&bar0->sw_reset);
1221 
1222 	/* Ensure that it's safe to access registers by checking
1223 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1224 	 */
1225 	if (nic->device_type == XFRAME_II_DEVICE) {
1226 		for (i = 0; i < 50; i++) {
1227 			val64 = readq(&bar0->adapter_status);
1228 			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1229 				break;
1230 			msleep(10);
1231 		}
1232 		if (i == 50)
1233 			return -ENODEV;
1234 	}
1235 
1236 	/*  Enable Receiving broadcasts */
1237 	add = &bar0->mac_cfg;
1238 	val64 = readq(&bar0->mac_cfg);
1239 	val64 |= MAC_RMAC_BCAST_ENABLE;
1240 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1241 	writel((u32)val64, add);
1242 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1243 	writel((u32) (val64 >> 32), (add + 4));
1244 
1245 	/* Read registers in all blocks */
1246 	val64 = readq(&bar0->mac_int_mask);
1247 	val64 = readq(&bar0->mc_int_mask);
1248 	val64 = readq(&bar0->xgxs_int_mask);
1249 
1250 	/*  Set MTU */
1251 	val64 = dev->mtu;
1252 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1253 
1254 	if (nic->device_type & XFRAME_II_DEVICE) {
1255 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1256 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1257 					  &bar0->dtx_control, UF);
1258 			if (dtx_cnt & 0x1)
1259 				msleep(1); /* Necessary!! */
1260 			dtx_cnt++;
1261 		}
1262 	} else {
1263 		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1264 			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1265 					  &bar0->dtx_control, UF);
1266 			val64 = readq(&bar0->dtx_control);
1267 			dtx_cnt++;
1268 		}
1269 	}
1270 
1271 	/*  Tx DMA Initialization */
1272 	val64 = 0;
1273 	writeq(val64, &bar0->tx_fifo_partition_0);
1274 	writeq(val64, &bar0->tx_fifo_partition_1);
1275 	writeq(val64, &bar0->tx_fifo_partition_2);
1276 	writeq(val64, &bar0->tx_fifo_partition_3);
1277 
1278 	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1279 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1280 
1281 		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1282 			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1283 
1284 		if (i == (config->tx_fifo_num - 1)) {
1285 			if (i % 2 == 0)
1286 				i++;
1287 		}
1288 
1289 		switch (i) {
1290 		case 1:
1291 			writeq(val64, &bar0->tx_fifo_partition_0);
1292 			val64 = 0;
1293 			j = 0;
1294 			break;
1295 		case 3:
1296 			writeq(val64, &bar0->tx_fifo_partition_1);
1297 			val64 = 0;
1298 			j = 0;
1299 			break;
1300 		case 5:
1301 			writeq(val64, &bar0->tx_fifo_partition_2);
1302 			val64 = 0;
1303 			j = 0;
1304 			break;
1305 		case 7:
1306 			writeq(val64, &bar0->tx_fifo_partition_3);
1307 			val64 = 0;
1308 			j = 0;
1309 			break;
1310 		default:
1311 			j++;
1312 			break;
1313 		}
1314 	}
1315 
1316 	/*
1317 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1318 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1319 	 */
1320 	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1321 		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1322 
1323 	val64 = readq(&bar0->tx_fifo_partition_0);
1324 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1325 		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1326 
1327 	/*
1328 	 * Initialization of Tx_PA_CONFIG register to ignore packet
1329 	 * integrity checking.
1330 	 */
1331 	val64 = readq(&bar0->tx_pa_cfg);
1332 	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1333 		TX_PA_CFG_IGNORE_SNAP_OUI |
1334 		TX_PA_CFG_IGNORE_LLC_CTRL |
1335 		TX_PA_CFG_IGNORE_L2_ERR;
1336 	writeq(val64, &bar0->tx_pa_cfg);
1337 
1338 	/* Rx DMA initialization. */
1339 	val64 = 0;
1340 	for (i = 0; i < config->rx_ring_num; i++) {
1341 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1342 
1343 		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1344 	}
1345 	writeq(val64, &bar0->rx_queue_priority);
1346 
1347 	/*
1348 	 * Allocating equal share of memory to all the
1349 	 * configured Rings.
1350 	 */
1351 	val64 = 0;
1352 	if (nic->device_type & XFRAME_II_DEVICE)
1353 		mem_size = 32;
1354 	else
1355 		mem_size = 64;
1356 
1357 	for (i = 0; i < config->rx_ring_num; i++) {
1358 		switch (i) {
1359 		case 0:
1360 			mem_share = (mem_size / config->rx_ring_num +
1361 				     mem_size % config->rx_ring_num);
1362 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1363 			continue;
1364 		case 1:
1365 			mem_share = (mem_size / config->rx_ring_num);
1366 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1367 			continue;
1368 		case 2:
1369 			mem_share = (mem_size / config->rx_ring_num);
1370 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1371 			continue;
1372 		case 3:
1373 			mem_share = (mem_size / config->rx_ring_num);
1374 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1375 			continue;
1376 		case 4:
1377 			mem_share = (mem_size / config->rx_ring_num);
1378 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1379 			continue;
1380 		case 5:
1381 			mem_share = (mem_size / config->rx_ring_num);
1382 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1383 			continue;
1384 		case 6:
1385 			mem_share = (mem_size / config->rx_ring_num);
1386 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1387 			continue;
1388 		case 7:
1389 			mem_share = (mem_size / config->rx_ring_num);
1390 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1391 			continue;
1392 		}
1393 	}
1394 	writeq(val64, &bar0->rx_queue_cfg);
1395 
1396 	/*
1397 	 * Filling Tx round robin registers
1398 	 * as per the number of FIFOs for equal scheduling priority
1399 	 */
1400 	switch (config->tx_fifo_num) {
1401 	case 1:
1402 		val64 = 0x0;
1403 		writeq(val64, &bar0->tx_w_round_robin_0);
1404 		writeq(val64, &bar0->tx_w_round_robin_1);
1405 		writeq(val64, &bar0->tx_w_round_robin_2);
1406 		writeq(val64, &bar0->tx_w_round_robin_3);
1407 		writeq(val64, &bar0->tx_w_round_robin_4);
1408 		break;
1409 	case 2:
1410 		val64 = 0x0001000100010001ULL;
1411 		writeq(val64, &bar0->tx_w_round_robin_0);
1412 		writeq(val64, &bar0->tx_w_round_robin_1);
1413 		writeq(val64, &bar0->tx_w_round_robin_2);
1414 		writeq(val64, &bar0->tx_w_round_robin_3);
1415 		val64 = 0x0001000100000000ULL;
1416 		writeq(val64, &bar0->tx_w_round_robin_4);
1417 		break;
1418 	case 3:
1419 		val64 = 0x0001020001020001ULL;
1420 		writeq(val64, &bar0->tx_w_round_robin_0);
1421 		val64 = 0x0200010200010200ULL;
1422 		writeq(val64, &bar0->tx_w_round_robin_1);
1423 		val64 = 0x0102000102000102ULL;
1424 		writeq(val64, &bar0->tx_w_round_robin_2);
1425 		val64 = 0x0001020001020001ULL;
1426 		writeq(val64, &bar0->tx_w_round_robin_3);
1427 		val64 = 0x0200010200000000ULL;
1428 		writeq(val64, &bar0->tx_w_round_robin_4);
1429 		break;
1430 	case 4:
1431 		val64 = 0x0001020300010203ULL;
1432 		writeq(val64, &bar0->tx_w_round_robin_0);
1433 		writeq(val64, &bar0->tx_w_round_robin_1);
1434 		writeq(val64, &bar0->tx_w_round_robin_2);
1435 		writeq(val64, &bar0->tx_w_round_robin_3);
1436 		val64 = 0x0001020300000000ULL;
1437 		writeq(val64, &bar0->tx_w_round_robin_4);
1438 		break;
1439 	case 5:
1440 		val64 = 0x0001020304000102ULL;
1441 		writeq(val64, &bar0->tx_w_round_robin_0);
1442 		val64 = 0x0304000102030400ULL;
1443 		writeq(val64, &bar0->tx_w_round_robin_1);
1444 		val64 = 0x0102030400010203ULL;
1445 		writeq(val64, &bar0->tx_w_round_robin_2);
1446 		val64 = 0x0400010203040001ULL;
1447 		writeq(val64, &bar0->tx_w_round_robin_3);
1448 		val64 = 0x0203040000000000ULL;
1449 		writeq(val64, &bar0->tx_w_round_robin_4);
1450 		break;
1451 	case 6:
1452 		val64 = 0x0001020304050001ULL;
1453 		writeq(val64, &bar0->tx_w_round_robin_0);
1454 		val64 = 0x0203040500010203ULL;
1455 		writeq(val64, &bar0->tx_w_round_robin_1);
1456 		val64 = 0x0405000102030405ULL;
1457 		writeq(val64, &bar0->tx_w_round_robin_2);
1458 		val64 = 0x0001020304050001ULL;
1459 		writeq(val64, &bar0->tx_w_round_robin_3);
1460 		val64 = 0x0203040500000000ULL;
1461 		writeq(val64, &bar0->tx_w_round_robin_4);
1462 		break;
1463 	case 7:
1464 		val64 = 0x0001020304050600ULL;
1465 		writeq(val64, &bar0->tx_w_round_robin_0);
1466 		val64 = 0x0102030405060001ULL;
1467 		writeq(val64, &bar0->tx_w_round_robin_1);
1468 		val64 = 0x0203040506000102ULL;
1469 		writeq(val64, &bar0->tx_w_round_robin_2);
1470 		val64 = 0x0304050600010203ULL;
1471 		writeq(val64, &bar0->tx_w_round_robin_3);
1472 		val64 = 0x0405060000000000ULL;
1473 		writeq(val64, &bar0->tx_w_round_robin_4);
1474 		break;
1475 	case 8:
1476 		val64 = 0x0001020304050607ULL;
1477 		writeq(val64, &bar0->tx_w_round_robin_0);
1478 		writeq(val64, &bar0->tx_w_round_robin_1);
1479 		writeq(val64, &bar0->tx_w_round_robin_2);
1480 		writeq(val64, &bar0->tx_w_round_robin_3);
1481 		val64 = 0x0001020300000000ULL;
1482 		writeq(val64, &bar0->tx_w_round_robin_4);
1483 		break;
1484 	}
1485 
1486 	/* Enable all configured Tx FIFO partitions */
1487 	val64 = readq(&bar0->tx_fifo_partition_0);
1488 	val64 |= (TX_FIFO_PARTITION_EN);
1489 	writeq(val64, &bar0->tx_fifo_partition_0);
1490 
1491 	/* Filling the Rx round robin registers as per the
1492 	 * number of Rings and steering based on QoS with
1493 	 * equal priority.
1494 	 */
1495 	switch (config->rx_ring_num) {
1496 	case 1:
1497 		val64 = 0x0;
1498 		writeq(val64, &bar0->rx_w_round_robin_0);
1499 		writeq(val64, &bar0->rx_w_round_robin_1);
1500 		writeq(val64, &bar0->rx_w_round_robin_2);
1501 		writeq(val64, &bar0->rx_w_round_robin_3);
1502 		writeq(val64, &bar0->rx_w_round_robin_4);
1503 
1504 		val64 = 0x8080808080808080ULL;
1505 		writeq(val64, &bar0->rts_qos_steering);
1506 		break;
1507 	case 2:
1508 		val64 = 0x0001000100010001ULL;
1509 		writeq(val64, &bar0->rx_w_round_robin_0);
1510 		writeq(val64, &bar0->rx_w_round_robin_1);
1511 		writeq(val64, &bar0->rx_w_round_robin_2);
1512 		writeq(val64, &bar0->rx_w_round_robin_3);
1513 		val64 = 0x0001000100000000ULL;
1514 		writeq(val64, &bar0->rx_w_round_robin_4);
1515 
1516 		val64 = 0x8080808040404040ULL;
1517 		writeq(val64, &bar0->rts_qos_steering);
1518 		break;
1519 	case 3:
1520 		val64 = 0x0001020001020001ULL;
1521 		writeq(val64, &bar0->rx_w_round_robin_0);
1522 		val64 = 0x0200010200010200ULL;
1523 		writeq(val64, &bar0->rx_w_round_robin_1);
1524 		val64 = 0x0102000102000102ULL;
1525 		writeq(val64, &bar0->rx_w_round_robin_2);
1526 		val64 = 0x0001020001020001ULL;
1527 		writeq(val64, &bar0->rx_w_round_robin_3);
1528 		val64 = 0x0200010200000000ULL;
1529 		writeq(val64, &bar0->rx_w_round_robin_4);
1530 
1531 		val64 = 0x8080804040402020ULL;
1532 		writeq(val64, &bar0->rts_qos_steering);
1533 		break;
1534 	case 4:
1535 		val64 = 0x0001020300010203ULL;
1536 		writeq(val64, &bar0->rx_w_round_robin_0);
1537 		writeq(val64, &bar0->rx_w_round_robin_1);
1538 		writeq(val64, &bar0->rx_w_round_robin_2);
1539 		writeq(val64, &bar0->rx_w_round_robin_3);
1540 		val64 = 0x0001020300000000ULL;
1541 		writeq(val64, &bar0->rx_w_round_robin_4);
1542 
1543 		val64 = 0x8080404020201010ULL;
1544 		writeq(val64, &bar0->rts_qos_steering);
1545 		break;
1546 	case 5:
1547 		val64 = 0x0001020304000102ULL;
1548 		writeq(val64, &bar0->rx_w_round_robin_0);
1549 		val64 = 0x0304000102030400ULL;
1550 		writeq(val64, &bar0->rx_w_round_robin_1);
1551 		val64 = 0x0102030400010203ULL;
1552 		writeq(val64, &bar0->rx_w_round_robin_2);
1553 		val64 = 0x0400010203040001ULL;
1554 		writeq(val64, &bar0->rx_w_round_robin_3);
1555 		val64 = 0x0203040000000000ULL;
1556 		writeq(val64, &bar0->rx_w_round_robin_4);
1557 
1558 		val64 = 0x8080404020201008ULL;
1559 		writeq(val64, &bar0->rts_qos_steering);
1560 		break;
1561 	case 6:
1562 		val64 = 0x0001020304050001ULL;
1563 		writeq(val64, &bar0->rx_w_round_robin_0);
1564 		val64 = 0x0203040500010203ULL;
1565 		writeq(val64, &bar0->rx_w_round_robin_1);
1566 		val64 = 0x0405000102030405ULL;
1567 		writeq(val64, &bar0->rx_w_round_robin_2);
1568 		val64 = 0x0001020304050001ULL;
1569 		writeq(val64, &bar0->rx_w_round_robin_3);
1570 		val64 = 0x0203040500000000ULL;
1571 		writeq(val64, &bar0->rx_w_round_robin_4);
1572 
1573 		val64 = 0x8080404020100804ULL;
1574 		writeq(val64, &bar0->rts_qos_steering);
1575 		break;
1576 	case 7:
1577 		val64 = 0x0001020304050600ULL;
1578 		writeq(val64, &bar0->rx_w_round_robin_0);
1579 		val64 = 0x0102030405060001ULL;
1580 		writeq(val64, &bar0->rx_w_round_robin_1);
1581 		val64 = 0x0203040506000102ULL;
1582 		writeq(val64, &bar0->rx_w_round_robin_2);
1583 		val64 = 0x0304050600010203ULL;
1584 		writeq(val64, &bar0->rx_w_round_robin_3);
1585 		val64 = 0x0405060000000000ULL;
1586 		writeq(val64, &bar0->rx_w_round_robin_4);
1587 
1588 		val64 = 0x8080402010080402ULL;
1589 		writeq(val64, &bar0->rts_qos_steering);
1590 		break;
1591 	case 8:
1592 		val64 = 0x0001020304050607ULL;
1593 		writeq(val64, &bar0->rx_w_round_robin_0);
1594 		writeq(val64, &bar0->rx_w_round_robin_1);
1595 		writeq(val64, &bar0->rx_w_round_robin_2);
1596 		writeq(val64, &bar0->rx_w_round_robin_3);
1597 		val64 = 0x0001020300000000ULL;
1598 		writeq(val64, &bar0->rx_w_round_robin_4);
1599 
1600 		val64 = 0x8040201008040201ULL;
1601 		writeq(val64, &bar0->rts_qos_steering);
1602 		break;
1603 	}
1604 
1605 	/* UDP Fix */
1606 	val64 = 0;
1607 	for (i = 0; i < 8; i++)
1608 		writeq(val64, &bar0->rts_frm_len_n[i]);
1609 
1610 	/* Set the default rts frame length for the rings configured */
1611 	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1612 	for (i = 0 ; i < config->rx_ring_num ; i++)
1613 		writeq(val64, &bar0->rts_frm_len_n[i]);
1614 
1615 	/* Set the frame length for the configured rings
1616 	 * desired by the user
1617 	 */
1618 	for (i = 0; i < config->rx_ring_num; i++) {
1619 		/* If rts_frm_len[i] == 0 then it is assumed that user not
1620 		 * specified frame length steering.
1621 		 * If the user provides the frame length then program
1622 		 * the rts_frm_len register for those values or else
1623 		 * leave it as it is.
1624 		 */
1625 		if (rts_frm_len[i] != 0) {
1626 			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1627 			       &bar0->rts_frm_len_n[i]);
1628 		}
1629 	}
1630 
1631 	/* Disable differentiated services steering logic */
1632 	for (i = 0; i < 64; i++) {
1633 		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1634 			DBG_PRINT(ERR_DBG,
1635 				  "%s: rts_ds_steer failed on codepoint %d\n",
1636 				  dev->name, i);
1637 			return -ENODEV;
1638 		}
1639 	}
1640 
1641 	/* Program statistics memory */
1642 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1643 
1644 	if (nic->device_type == XFRAME_II_DEVICE) {
1645 		val64 = STAT_BC(0x320);
1646 		writeq(val64, &bar0->stat_byte_cnt);
1647 	}
1648 
1649 	/*
1650 	 * Initializing the sampling rate for the device to calculate the
1651 	 * bandwidth utilization.
1652 	 */
1653 	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1654 		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1655 	writeq(val64, &bar0->mac_link_util);
1656 
1657 	/*
1658 	 * Initializing the Transmit and Receive Traffic Interrupt
1659 	 * Scheme.
1660 	 */
1661 
1662 	/* Initialize TTI */
1663 	if (SUCCESS != init_tti(nic, nic->last_link_state))
1664 		return -ENODEV;
1665 
1666 	/* RTI Initialization */
1667 	if (nic->device_type == XFRAME_II_DEVICE) {
1668 		/*
1669 		 * Programmed to generate Apprx 500 Intrs per
1670 		 * second
1671 		 */
1672 		int count = (nic->config.bus_speed * 125)/4;
1673 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1674 	} else
1675 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1676 	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1677 		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1678 		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1679 		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1680 
1681 	writeq(val64, &bar0->rti_data1_mem);
1682 
1683 	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1684 		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1685 	if (nic->config.intr_type == MSI_X)
1686 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1687 			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1688 	else
1689 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1690 			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1691 	writeq(val64, &bar0->rti_data2_mem);
1692 
1693 	for (i = 0; i < config->rx_ring_num; i++) {
1694 		val64 = RTI_CMD_MEM_WE |
1695 			RTI_CMD_MEM_STROBE_NEW_CMD |
1696 			RTI_CMD_MEM_OFFSET(i);
1697 		writeq(val64, &bar0->rti_command_mem);
1698 
1699 		/*
1700 		 * Once the operation completes, the Strobe bit of the
1701 		 * command register will be reset. We poll for this
1702 		 * particular condition. We wait for a maximum of 500ms
1703 		 * for the operation to complete, if it's not complete
1704 		 * by then we return error.
1705 		 */
1706 		time = 0;
1707 		while (true) {
1708 			val64 = readq(&bar0->rti_command_mem);
1709 			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1710 				break;
1711 
1712 			if (time > 10) {
1713 				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1714 					  dev->name);
1715 				return -ENODEV;
1716 			}
1717 			time++;
1718 			msleep(50);
1719 		}
1720 	}
1721 
1722 	/*
1723 	 * Initializing proper values as Pause threshold into all
1724 	 * the 8 Queues on Rx side.
1725 	 */
1726 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1727 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1728 
1729 	/* Disable RMAC PAD STRIPPING */
1730 	add = &bar0->mac_cfg;
1731 	val64 = readq(&bar0->mac_cfg);
1732 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1733 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1734 	writel((u32) (val64), add);
1735 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1736 	writel((u32) (val64 >> 32), (add + 4));
1737 	val64 = readq(&bar0->mac_cfg);
1738 
1739 	/* Enable FCS stripping by adapter */
1740 	add = &bar0->mac_cfg;
1741 	val64 = readq(&bar0->mac_cfg);
1742 	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1743 	if (nic->device_type == XFRAME_II_DEVICE)
1744 		writeq(val64, &bar0->mac_cfg);
1745 	else {
1746 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1747 		writel((u32) (val64), add);
1748 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1749 		writel((u32) (val64 >> 32), (add + 4));
1750 	}
1751 
1752 	/*
1753 	 * Set the time value to be inserted in the pause frame
1754 	 * generated by xena.
1755 	 */
1756 	val64 = readq(&bar0->rmac_pause_cfg);
1757 	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1758 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1759 	writeq(val64, &bar0->rmac_pause_cfg);
1760 
1761 	/*
1762 	 * Set the Threshold Limit for Generating the pause frame
1763 	 * If the amount of data in any Queue exceeds ratio of
1764 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1765 	 * pause frame is generated
1766 	 */
1767 	val64 = 0;
1768 	for (i = 0; i < 4; i++) {
1769 		val64 |= (((u64)0xFF00 |
1770 			   nic->mac_control.mc_pause_threshold_q0q3)
1771 			  << (i * 2 * 8));
1772 	}
1773 	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1774 
1775 	val64 = 0;
1776 	for (i = 0; i < 4; i++) {
1777 		val64 |= (((u64)0xFF00 |
1778 			   nic->mac_control.mc_pause_threshold_q4q7)
1779 			  << (i * 2 * 8));
1780 	}
1781 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1782 
1783 	/*
1784 	 * TxDMA will stop Read request if the number of read split has
1785 	 * exceeded the limit pointed by shared_splits
1786 	 */
1787 	val64 = readq(&bar0->pic_control);
1788 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1789 	writeq(val64, &bar0->pic_control);
1790 
1791 	if (nic->config.bus_speed == 266) {
1792 		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1793 		writeq(0x0, &bar0->read_retry_delay);
1794 		writeq(0x0, &bar0->write_retry_delay);
1795 	}
1796 
1797 	/*
1798 	 * Programming the Herc to split every write transaction
1799 	 * that does not start on an ADB to reduce disconnects.
1800 	 */
1801 	if (nic->device_type == XFRAME_II_DEVICE) {
1802 		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1803 			MISC_LINK_STABILITY_PRD(3);
1804 		writeq(val64, &bar0->misc_control);
1805 		val64 = readq(&bar0->pic_control2);
1806 		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1807 		writeq(val64, &bar0->pic_control2);
1808 	}
1809 	if (strstr(nic->product_name, "CX4")) {
1810 		val64 = TMAC_AVG_IPG(0x17);
1811 		writeq(val64, &bar0->tmac_avg_ipg);
1812 	}
1813 
1814 	return SUCCESS;
1815 }
1816 #define LINK_UP_DOWN_INTERRUPT		1
1817 #define MAC_RMAC_ERR_TIMER		2
1818 
1819 static int s2io_link_fault_indication(struct s2io_nic *nic)
1820 {
1821 	if (nic->device_type == XFRAME_II_DEVICE)
1822 		return LINK_UP_DOWN_INTERRUPT;
1823 	else
1824 		return MAC_RMAC_ERR_TIMER;
1825 }
1826 
1827 /**
1828  *  do_s2io_write_bits -  update alarm bits in alarm register
1829  *  @value: alarm bits
1830  *  @flag: interrupt status
1831  *  @addr: address value
1832  *  Description: update alarm bits in alarm register
1833  *  Return Value:
1834  *  NONE.
1835  */
1836 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1837 {
1838 	u64 temp64;
1839 
1840 	temp64 = readq(addr);
1841 
1842 	if (flag == ENABLE_INTRS)
1843 		temp64 &= ~((u64)value);
1844 	else
1845 		temp64 |= ((u64)value);
1846 	writeq(temp64, addr);
1847 }
1848 
1849 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1850 {
1851 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1852 	register u64 gen_int_mask = 0;
1853 	u64 interruptible;
1854 
1855 	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1856 	if (mask & TX_DMA_INTR) {
1857 		gen_int_mask |= TXDMA_INT_M;
1858 
1859 		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1860 				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1861 				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1862 				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1863 
1864 		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1865 				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1866 				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1867 				   &bar0->pfc_err_mask);
1868 
1869 		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1870 				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1871 				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1872 
1873 		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1874 				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1875 				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1876 				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1877 				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1878 				   PCC_TXB_ECC_SG_ERR,
1879 				   flag, &bar0->pcc_err_mask);
1880 
1881 		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1882 				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1883 
1884 		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1885 				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1886 				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1887 				   flag, &bar0->lso_err_mask);
1888 
1889 		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1890 				   flag, &bar0->tpa_err_mask);
1891 
1892 		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1893 	}
1894 
1895 	if (mask & TX_MAC_INTR) {
1896 		gen_int_mask |= TXMAC_INT_M;
1897 		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1898 				   &bar0->mac_int_mask);
1899 		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1900 				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1901 				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1902 				   flag, &bar0->mac_tmac_err_mask);
1903 	}
1904 
1905 	if (mask & TX_XGXS_INTR) {
1906 		gen_int_mask |= TXXGXS_INT_M;
1907 		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1908 				   &bar0->xgxs_int_mask);
1909 		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1910 				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1911 				   flag, &bar0->xgxs_txgxs_err_mask);
1912 	}
1913 
1914 	if (mask & RX_DMA_INTR) {
1915 		gen_int_mask |= RXDMA_INT_M;
1916 		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1917 				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1918 				   flag, &bar0->rxdma_int_mask);
1919 		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1920 				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1921 				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1922 				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1923 		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1924 				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1925 				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1926 				   &bar0->prc_pcix_err_mask);
1927 		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1928 				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1929 				   &bar0->rpa_err_mask);
1930 		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1931 				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1932 				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1933 				   RDA_FRM_ECC_SG_ERR |
1934 				   RDA_MISC_ERR|RDA_PCIX_ERR,
1935 				   flag, &bar0->rda_err_mask);
1936 		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1937 				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1938 				   flag, &bar0->rti_err_mask);
1939 	}
1940 
1941 	if (mask & RX_MAC_INTR) {
1942 		gen_int_mask |= RXMAC_INT_M;
1943 		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1944 				   &bar0->mac_int_mask);
1945 		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1946 				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1947 				 RMAC_DOUBLE_ECC_ERR);
1948 		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1949 			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1950 		do_s2io_write_bits(interruptible,
1951 				   flag, &bar0->mac_rmac_err_mask);
1952 	}
1953 
1954 	if (mask & RX_XGXS_INTR) {
1955 		gen_int_mask |= RXXGXS_INT_M;
1956 		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1957 				   &bar0->xgxs_int_mask);
1958 		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1959 				   &bar0->xgxs_rxgxs_err_mask);
1960 	}
1961 
1962 	if (mask & MC_INTR) {
1963 		gen_int_mask |= MC_INT_M;
1964 		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1965 				   flag, &bar0->mc_int_mask);
1966 		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1967 				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1968 				   &bar0->mc_err_mask);
1969 	}
1970 	nic->general_int_mask = gen_int_mask;
1971 
1972 	/* Remove this line when alarm interrupts are enabled */
1973 	nic->general_int_mask = 0;
1974 }
1975 
1976 /**
1977  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1978  *  @nic: device private variable,
1979  *  @mask: A mask indicating which Intr block must be modified and,
1980  *  @flag: A flag indicating whether to enable or disable the Intrs.
1981  *  Description: This function will either disable or enable the interrupts
1982  *  depending on the flag argument. The mask argument can be used to
1983  *  enable/disable any Intr block.
1984  *  Return Value: NONE.
1985  */
1986 
1987 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1988 {
1989 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1990 	register u64 temp64 = 0, intr_mask = 0;
1991 
1992 	intr_mask = nic->general_int_mask;
1993 
1994 	/*  Top level interrupt classification */
1995 	/*  PIC Interrupts */
1996 	if (mask & TX_PIC_INTR) {
1997 		/*  Enable PIC Intrs in the general intr mask register */
1998 		intr_mask |= TXPIC_INT_M;
1999 		if (flag == ENABLE_INTRS) {
2000 			/*
2001 			 * If Hercules adapter enable GPIO otherwise
2002 			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2003 			 * interrupts for now.
2004 			 * TODO
2005 			 */
2006 			if (s2io_link_fault_indication(nic) ==
2007 			    LINK_UP_DOWN_INTERRUPT) {
2008 				do_s2io_write_bits(PIC_INT_GPIO, flag,
2009 						   &bar0->pic_int_mask);
2010 				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2011 						   &bar0->gpio_int_mask);
2012 			} else
2013 				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2014 		} else if (flag == DISABLE_INTRS) {
2015 			/*
2016 			 * Disable PIC Intrs in the general
2017 			 * intr mask register
2018 			 */
2019 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2020 		}
2021 	}
2022 
2023 	/*  Tx traffic interrupts */
2024 	if (mask & TX_TRAFFIC_INTR) {
2025 		intr_mask |= TXTRAFFIC_INT_M;
2026 		if (flag == ENABLE_INTRS) {
2027 			/*
2028 			 * Enable all the Tx side interrupts
2029 			 * writing 0 Enables all 64 TX interrupt levels
2030 			 */
2031 			writeq(0x0, &bar0->tx_traffic_mask);
2032 		} else if (flag == DISABLE_INTRS) {
2033 			/*
2034 			 * Disable Tx Traffic Intrs in the general intr mask
2035 			 * register.
2036 			 */
2037 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2038 		}
2039 	}
2040 
2041 	/*  Rx traffic interrupts */
2042 	if (mask & RX_TRAFFIC_INTR) {
2043 		intr_mask |= RXTRAFFIC_INT_M;
2044 		if (flag == ENABLE_INTRS) {
2045 			/* writing 0 Enables all 8 RX interrupt levels */
2046 			writeq(0x0, &bar0->rx_traffic_mask);
2047 		} else if (flag == DISABLE_INTRS) {
2048 			/*
2049 			 * Disable Rx Traffic Intrs in the general intr mask
2050 			 * register.
2051 			 */
2052 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2053 		}
2054 	}
2055 
2056 	temp64 = readq(&bar0->general_int_mask);
2057 	if (flag == ENABLE_INTRS)
2058 		temp64 &= ~((u64)intr_mask);
2059 	else
2060 		temp64 = DISABLE_ALL_INTRS;
2061 	writeq(temp64, &bar0->general_int_mask);
2062 
2063 	nic->general_int_mask = readq(&bar0->general_int_mask);
2064 }
2065 
2066 /**
2067  *  verify_pcc_quiescent- Checks for PCC quiescent state
2068  *  Return: 1 If PCC is quiescence
2069  *          0 If PCC is not quiescence
2070  */
2071 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2072 {
2073 	int ret = 0, herc;
2074 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2075 	u64 val64 = readq(&bar0->adapter_status);
2076 
2077 	herc = (sp->device_type == XFRAME_II_DEVICE);
2078 
2079 	if (flag == false) {
2080 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2081 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2082 				ret = 1;
2083 		} else {
2084 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2085 				ret = 1;
2086 		}
2087 	} else {
2088 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2089 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2090 			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2091 				ret = 1;
2092 		} else {
2093 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2094 			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2095 				ret = 1;
2096 		}
2097 	}
2098 
2099 	return ret;
2100 }
2101 /**
2102  *  verify_xena_quiescence - Checks whether the H/W is ready
2103  *  Description: Returns whether the H/W is ready to go or not. Depending
2104  *  on whether adapter enable bit was written or not the comparison
2105  *  differs and the calling function passes the input argument flag to
2106  *  indicate this.
2107  *  Return: 1 If xena is quiescence
2108  *          0 If Xena is not quiescence
2109  */
2110 
2111 static int verify_xena_quiescence(struct s2io_nic *sp)
2112 {
2113 	int  mode;
2114 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2115 	u64 val64 = readq(&bar0->adapter_status);
2116 	mode = s2io_verify_pci_mode(sp);
2117 
2118 	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2119 		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2120 		return 0;
2121 	}
2122 	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2123 		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2124 		return 0;
2125 	}
2126 	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2127 		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2128 		return 0;
2129 	}
2130 	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2131 		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2132 		return 0;
2133 	}
2134 	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2135 		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2136 		return 0;
2137 	}
2138 	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2139 		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2140 		return 0;
2141 	}
2142 	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2143 		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2144 		return 0;
2145 	}
2146 	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2147 		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2148 		return 0;
2149 	}
2150 
2151 	/*
2152 	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2153 	 * the the P_PLL_LOCK bit in the adapter_status register will
2154 	 * not be asserted.
2155 	 */
2156 	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2157 	    sp->device_type == XFRAME_II_DEVICE &&
2158 	    mode != PCI_MODE_PCI_33) {
2159 		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2160 		return 0;
2161 	}
2162 	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2163 	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2164 		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2165 		return 0;
2166 	}
2167 	return 1;
2168 }
2169 
2170 /**
2171  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2172  * @sp: Pointer to device specifc structure
2173  * Description :
2174  * New procedure to clear mac address reading  problems on Alpha platforms
2175  *
2176  */
2177 
2178 static void fix_mac_address(struct s2io_nic *sp)
2179 {
2180 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2181 	int i = 0;
2182 
2183 	while (fix_mac[i] != END_SIGN) {
2184 		writeq(fix_mac[i++], &bar0->gpio_control);
2185 		udelay(10);
2186 		(void) readq(&bar0->gpio_control);
2187 	}
2188 }
2189 
2190 /**
2191  *  start_nic - Turns the device on
2192  *  @nic : device private variable.
2193  *  Description:
2194  *  This function actually turns the device on. Before this  function is
2195  *  called,all Registers are configured from their reset states
2196  *  and shared memory is allocated but the NIC is still quiescent. On
2197  *  calling this function, the device interrupts are cleared and the NIC is
2198  *  literally switched on by writing into the adapter control register.
2199  *  Return Value:
2200  *  SUCCESS on success and -1 on failure.
2201  */
2202 
2203 static int start_nic(struct s2io_nic *nic)
2204 {
2205 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2206 	struct net_device *dev = nic->dev;
2207 	register u64 val64 = 0;
2208 	u16 subid, i;
2209 	struct config_param *config = &nic->config;
2210 	struct mac_info *mac_control = &nic->mac_control;
2211 
2212 	/*  PRC Initialization and configuration */
2213 	for (i = 0; i < config->rx_ring_num; i++) {
2214 		struct ring_info *ring = &mac_control->rings[i];
2215 
2216 		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2217 		       &bar0->prc_rxd0_n[i]);
2218 
2219 		val64 = readq(&bar0->prc_ctrl_n[i]);
2220 		if (nic->rxd_mode == RXD_MODE_1)
2221 			val64 |= PRC_CTRL_RC_ENABLED;
2222 		else
2223 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2224 		if (nic->device_type == XFRAME_II_DEVICE)
2225 			val64 |= PRC_CTRL_GROUP_READS;
2226 		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2227 		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2228 		writeq(val64, &bar0->prc_ctrl_n[i]);
2229 	}
2230 
2231 	if (nic->rxd_mode == RXD_MODE_3B) {
2232 		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2233 		val64 = readq(&bar0->rx_pa_cfg);
2234 		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2235 		writeq(val64, &bar0->rx_pa_cfg);
2236 	}
2237 
2238 	if (vlan_tag_strip == 0) {
2239 		val64 = readq(&bar0->rx_pa_cfg);
2240 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2241 		writeq(val64, &bar0->rx_pa_cfg);
2242 		nic->vlan_strip_flag = 0;
2243 	}
2244 
2245 	/*
2246 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2247 	 * for around 100ms, which is approximately the time required
2248 	 * for the device to be ready for operation.
2249 	 */
2250 	val64 = readq(&bar0->mc_rldram_mrs);
2251 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2252 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2253 	val64 = readq(&bar0->mc_rldram_mrs);
2254 
2255 	msleep(100);	/* Delay by around 100 ms. */
2256 
2257 	/* Enabling ECC Protection. */
2258 	val64 = readq(&bar0->adapter_control);
2259 	val64 &= ~ADAPTER_ECC_EN;
2260 	writeq(val64, &bar0->adapter_control);
2261 
2262 	/*
2263 	 * Verify if the device is ready to be enabled, if so enable
2264 	 * it.
2265 	 */
2266 	val64 = readq(&bar0->adapter_status);
2267 	if (!verify_xena_quiescence(nic)) {
2268 		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2269 			  "Adapter status reads: 0x%llx\n",
2270 			  dev->name, (unsigned long long)val64);
2271 		return FAILURE;
2272 	}
2273 
2274 	/*
2275 	 * With some switches, link might be already up at this point.
2276 	 * Because of this weird behavior, when we enable laser,
2277 	 * we may not get link. We need to handle this. We cannot
2278 	 * figure out which switch is misbehaving. So we are forced to
2279 	 * make a global change.
2280 	 */
2281 
2282 	/* Enabling Laser. */
2283 	val64 = readq(&bar0->adapter_control);
2284 	val64 |= ADAPTER_EOI_TX_ON;
2285 	writeq(val64, &bar0->adapter_control);
2286 
2287 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2288 		/*
2289 		 * Dont see link state interrupts initially on some switches,
2290 		 * so directly scheduling the link state task here.
2291 		 */
2292 		schedule_work(&nic->set_link_task);
2293 	}
2294 	/* SXE-002: Initialize link and activity LED */
2295 	subid = nic->pdev->subsystem_device;
2296 	if (((subid & 0xFF) >= 0x07) &&
2297 	    (nic->device_type == XFRAME_I_DEVICE)) {
2298 		val64 = readq(&bar0->gpio_control);
2299 		val64 |= 0x0000800000000000ULL;
2300 		writeq(val64, &bar0->gpio_control);
2301 		val64 = 0x0411040400000000ULL;
2302 		writeq(val64, (void __iomem *)bar0 + 0x2700);
2303 	}
2304 
2305 	return SUCCESS;
2306 }
2307 /**
2308  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2309  */
2310 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2311 					struct TxD *txdlp, int get_off)
2312 {
2313 	struct s2io_nic *nic = fifo_data->nic;
2314 	struct sk_buff *skb;
2315 	struct TxD *txds;
2316 	u16 j, frg_cnt;
2317 
2318 	txds = txdlp;
2319 	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2320 		pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2321 				 sizeof(u64), PCI_DMA_TODEVICE);
2322 		txds++;
2323 	}
2324 
2325 	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2326 	if (!skb) {
2327 		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2328 		return NULL;
2329 	}
2330 	pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2331 			 skb_headlen(skb), PCI_DMA_TODEVICE);
2332 	frg_cnt = skb_shinfo(skb)->nr_frags;
2333 	if (frg_cnt) {
2334 		txds++;
2335 		for (j = 0; j < frg_cnt; j++, txds++) {
2336 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2337 			if (!txds->Buffer_Pointer)
2338 				break;
2339 			pci_unmap_page(nic->pdev,
2340 				       (dma_addr_t)txds->Buffer_Pointer,
2341 				       skb_frag_size(frag), PCI_DMA_TODEVICE);
2342 		}
2343 	}
2344 	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2345 	return skb;
2346 }
2347 
2348 /**
2349  *  free_tx_buffers - Free all queued Tx buffers
2350  *  @nic : device private variable.
2351  *  Description:
2352  *  Free all queued Tx buffers.
2353  *  Return Value: void
2354  */
2355 
2356 static void free_tx_buffers(struct s2io_nic *nic)
2357 {
2358 	struct net_device *dev = nic->dev;
2359 	struct sk_buff *skb;
2360 	struct TxD *txdp;
2361 	int i, j;
2362 	int cnt = 0;
2363 	struct config_param *config = &nic->config;
2364 	struct mac_info *mac_control = &nic->mac_control;
2365 	struct stat_block *stats = mac_control->stats_info;
2366 	struct swStat *swstats = &stats->sw_stat;
2367 
2368 	for (i = 0; i < config->tx_fifo_num; i++) {
2369 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2370 		struct fifo_info *fifo = &mac_control->fifos[i];
2371 		unsigned long flags;
2372 
2373 		spin_lock_irqsave(&fifo->tx_lock, flags);
2374 		for (j = 0; j < tx_cfg->fifo_len; j++) {
2375 			txdp = fifo->list_info[j].list_virt_addr;
2376 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2377 			if (skb) {
2378 				swstats->mem_freed += skb->truesize;
2379 				dev_kfree_skb(skb);
2380 				cnt++;
2381 			}
2382 		}
2383 		DBG_PRINT(INTR_DBG,
2384 			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2385 			  dev->name, cnt, i);
2386 		fifo->tx_curr_get_info.offset = 0;
2387 		fifo->tx_curr_put_info.offset = 0;
2388 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2389 	}
2390 }
2391 
2392 /**
2393  *   stop_nic -  To stop the nic
2394  *   @nic ; device private variable.
2395  *   Description:
2396  *   This function does exactly the opposite of what the start_nic()
2397  *   function does. This function is called to stop the device.
2398  *   Return Value:
2399  *   void.
2400  */
2401 
2402 static void stop_nic(struct s2io_nic *nic)
2403 {
2404 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2405 	register u64 val64 = 0;
2406 	u16 interruptible;
2407 
2408 	/*  Disable all interrupts */
2409 	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2410 	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2411 	interruptible |= TX_PIC_INTR;
2412 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2413 
2414 	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2415 	val64 = readq(&bar0->adapter_control);
2416 	val64 &= ~(ADAPTER_CNTL_EN);
2417 	writeq(val64, &bar0->adapter_control);
2418 }
2419 
2420 /**
2421  *  fill_rx_buffers - Allocates the Rx side skbs
2422  *  @ring_info: per ring structure
2423  *  @from_card_up: If this is true, we will map the buffer to get
2424  *     the dma address for buf0 and buf1 to give it to the card.
2425  *     Else we will sync the already mapped buffer to give it to the card.
2426  *  Description:
2427  *  The function allocates Rx side skbs and puts the physical
2428  *  address of these buffers into the RxD buffer pointers, so that the NIC
2429  *  can DMA the received frame into these locations.
2430  *  The NIC supports 3 receive modes, viz
2431  *  1. single buffer,
2432  *  2. three buffer and
2433  *  3. Five buffer modes.
2434  *  Each mode defines how many fragments the received frame will be split
2435  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2436  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2437  *  is split into 3 fragments. As of now only single buffer mode is
2438  *  supported.
2439  *   Return Value:
2440  *  SUCCESS on success or an appropriate -ve value on failure.
2441  */
2442 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2443 			   int from_card_up)
2444 {
2445 	struct sk_buff *skb;
2446 	struct RxD_t *rxdp;
2447 	int off, size, block_no, block_no1;
2448 	u32 alloc_tab = 0;
2449 	u32 alloc_cnt;
2450 	u64 tmp;
2451 	struct buffAdd *ba;
2452 	struct RxD_t *first_rxdp = NULL;
2453 	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2454 	struct RxD1 *rxdp1;
2455 	struct RxD3 *rxdp3;
2456 	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2457 
2458 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2459 
2460 	block_no1 = ring->rx_curr_get_info.block_index;
2461 	while (alloc_tab < alloc_cnt) {
2462 		block_no = ring->rx_curr_put_info.block_index;
2463 
2464 		off = ring->rx_curr_put_info.offset;
2465 
2466 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2467 
2468 		if ((block_no == block_no1) &&
2469 		    (off == ring->rx_curr_get_info.offset) &&
2470 		    (rxdp->Host_Control)) {
2471 			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2472 				  ring->dev->name);
2473 			goto end;
2474 		}
2475 		if (off && (off == ring->rxd_count)) {
2476 			ring->rx_curr_put_info.block_index++;
2477 			if (ring->rx_curr_put_info.block_index ==
2478 			    ring->block_count)
2479 				ring->rx_curr_put_info.block_index = 0;
2480 			block_no = ring->rx_curr_put_info.block_index;
2481 			off = 0;
2482 			ring->rx_curr_put_info.offset = off;
2483 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2484 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2485 				  ring->dev->name, rxdp);
2486 
2487 		}
2488 
2489 		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2490 		    ((ring->rxd_mode == RXD_MODE_3B) &&
2491 		     (rxdp->Control_2 & s2BIT(0)))) {
2492 			ring->rx_curr_put_info.offset = off;
2493 			goto end;
2494 		}
2495 		/* calculate size of skb based on ring mode */
2496 		size = ring->mtu +
2497 			HEADER_ETHERNET_II_802_3_SIZE +
2498 			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2499 		if (ring->rxd_mode == RXD_MODE_1)
2500 			size += NET_IP_ALIGN;
2501 		else
2502 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2503 
2504 		/* allocate skb */
2505 		skb = netdev_alloc_skb(nic->dev, size);
2506 		if (!skb) {
2507 			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2508 				  ring->dev->name);
2509 			if (first_rxdp) {
2510 				dma_wmb();
2511 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2512 			}
2513 			swstats->mem_alloc_fail_cnt++;
2514 
2515 			return -ENOMEM ;
2516 		}
2517 		swstats->mem_allocated += skb->truesize;
2518 
2519 		if (ring->rxd_mode == RXD_MODE_1) {
2520 			/* 1 buffer mode - normal operation mode */
2521 			rxdp1 = (struct RxD1 *)rxdp;
2522 			memset(rxdp, 0, sizeof(struct RxD1));
2523 			skb_reserve(skb, NET_IP_ALIGN);
2524 			rxdp1->Buffer0_ptr =
2525 				pci_map_single(ring->pdev, skb->data,
2526 					       size - NET_IP_ALIGN,
2527 					       PCI_DMA_FROMDEVICE);
2528 			if (pci_dma_mapping_error(nic->pdev,
2529 						  rxdp1->Buffer0_ptr))
2530 				goto pci_map_failed;
2531 
2532 			rxdp->Control_2 =
2533 				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2534 			rxdp->Host_Control = (unsigned long)skb;
2535 		} else if (ring->rxd_mode == RXD_MODE_3B) {
2536 			/*
2537 			 * 2 buffer mode -
2538 			 * 2 buffer mode provides 128
2539 			 * byte aligned receive buffers.
2540 			 */
2541 
2542 			rxdp3 = (struct RxD3 *)rxdp;
2543 			/* save buffer pointers to avoid frequent dma mapping */
2544 			Buffer0_ptr = rxdp3->Buffer0_ptr;
2545 			Buffer1_ptr = rxdp3->Buffer1_ptr;
2546 			memset(rxdp, 0, sizeof(struct RxD3));
2547 			/* restore the buffer pointers for dma sync*/
2548 			rxdp3->Buffer0_ptr = Buffer0_ptr;
2549 			rxdp3->Buffer1_ptr = Buffer1_ptr;
2550 
2551 			ba = &ring->ba[block_no][off];
2552 			skb_reserve(skb, BUF0_LEN);
2553 			tmp = (u64)(unsigned long)skb->data;
2554 			tmp += ALIGN_SIZE;
2555 			tmp &= ~ALIGN_SIZE;
2556 			skb->data = (void *) (unsigned long)tmp;
2557 			skb_reset_tail_pointer(skb);
2558 
2559 			if (from_card_up) {
2560 				rxdp3->Buffer0_ptr =
2561 					pci_map_single(ring->pdev, ba->ba_0,
2562 						       BUF0_LEN,
2563 						       PCI_DMA_FROMDEVICE);
2564 				if (pci_dma_mapping_error(nic->pdev,
2565 							  rxdp3->Buffer0_ptr))
2566 					goto pci_map_failed;
2567 			} else
2568 				pci_dma_sync_single_for_device(ring->pdev,
2569 							       (dma_addr_t)rxdp3->Buffer0_ptr,
2570 							       BUF0_LEN,
2571 							       PCI_DMA_FROMDEVICE);
2572 
2573 			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2574 			if (ring->rxd_mode == RXD_MODE_3B) {
2575 				/* Two buffer mode */
2576 
2577 				/*
2578 				 * Buffer2 will have L3/L4 header plus
2579 				 * L4 payload
2580 				 */
2581 				rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2582 								    skb->data,
2583 								    ring->mtu + 4,
2584 								    PCI_DMA_FROMDEVICE);
2585 
2586 				if (pci_dma_mapping_error(nic->pdev,
2587 							  rxdp3->Buffer2_ptr))
2588 					goto pci_map_failed;
2589 
2590 				if (from_card_up) {
2591 					rxdp3->Buffer1_ptr =
2592 						pci_map_single(ring->pdev,
2593 							       ba->ba_1,
2594 							       BUF1_LEN,
2595 							       PCI_DMA_FROMDEVICE);
2596 
2597 					if (pci_dma_mapping_error(nic->pdev,
2598 								  rxdp3->Buffer1_ptr)) {
2599 						pci_unmap_single(ring->pdev,
2600 								 (dma_addr_t)(unsigned long)
2601 								 skb->data,
2602 								 ring->mtu + 4,
2603 								 PCI_DMA_FROMDEVICE);
2604 						goto pci_map_failed;
2605 					}
2606 				}
2607 				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2608 				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2609 					(ring->mtu + 4);
2610 			}
2611 			rxdp->Control_2 |= s2BIT(0);
2612 			rxdp->Host_Control = (unsigned long) (skb);
2613 		}
2614 		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2615 			rxdp->Control_1 |= RXD_OWN_XENA;
2616 		off++;
2617 		if (off == (ring->rxd_count + 1))
2618 			off = 0;
2619 		ring->rx_curr_put_info.offset = off;
2620 
2621 		rxdp->Control_2 |= SET_RXD_MARKER;
2622 		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2623 			if (first_rxdp) {
2624 				dma_wmb();
2625 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2626 			}
2627 			first_rxdp = rxdp;
2628 		}
2629 		ring->rx_bufs_left += 1;
2630 		alloc_tab++;
2631 	}
2632 
2633 end:
2634 	/* Transfer ownership of first descriptor to adapter just before
2635 	 * exiting. Before that, use memory barrier so that ownership
2636 	 * and other fields are seen by adapter correctly.
2637 	 */
2638 	if (first_rxdp) {
2639 		dma_wmb();
2640 		first_rxdp->Control_1 |= RXD_OWN_XENA;
2641 	}
2642 
2643 	return SUCCESS;
2644 
2645 pci_map_failed:
2646 	swstats->pci_map_fail_cnt++;
2647 	swstats->mem_freed += skb->truesize;
2648 	dev_kfree_skb_irq(skb);
2649 	return -ENOMEM;
2650 }
2651 
2652 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2653 {
2654 	struct net_device *dev = sp->dev;
2655 	int j;
2656 	struct sk_buff *skb;
2657 	struct RxD_t *rxdp;
2658 	struct RxD1 *rxdp1;
2659 	struct RxD3 *rxdp3;
2660 	struct mac_info *mac_control = &sp->mac_control;
2661 	struct stat_block *stats = mac_control->stats_info;
2662 	struct swStat *swstats = &stats->sw_stat;
2663 
2664 	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2665 		rxdp = mac_control->rings[ring_no].
2666 			rx_blocks[blk].rxds[j].virt_addr;
2667 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2668 		if (!skb)
2669 			continue;
2670 		if (sp->rxd_mode == RXD_MODE_1) {
2671 			rxdp1 = (struct RxD1 *)rxdp;
2672 			pci_unmap_single(sp->pdev,
2673 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2674 					 dev->mtu +
2675 					 HEADER_ETHERNET_II_802_3_SIZE +
2676 					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2677 					 PCI_DMA_FROMDEVICE);
2678 			memset(rxdp, 0, sizeof(struct RxD1));
2679 		} else if (sp->rxd_mode == RXD_MODE_3B) {
2680 			rxdp3 = (struct RxD3 *)rxdp;
2681 			pci_unmap_single(sp->pdev,
2682 					 (dma_addr_t)rxdp3->Buffer0_ptr,
2683 					 BUF0_LEN,
2684 					 PCI_DMA_FROMDEVICE);
2685 			pci_unmap_single(sp->pdev,
2686 					 (dma_addr_t)rxdp3->Buffer1_ptr,
2687 					 BUF1_LEN,
2688 					 PCI_DMA_FROMDEVICE);
2689 			pci_unmap_single(sp->pdev,
2690 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2691 					 dev->mtu + 4,
2692 					 PCI_DMA_FROMDEVICE);
2693 			memset(rxdp, 0, sizeof(struct RxD3));
2694 		}
2695 		swstats->mem_freed += skb->truesize;
2696 		dev_kfree_skb(skb);
2697 		mac_control->rings[ring_no].rx_bufs_left -= 1;
2698 	}
2699 }
2700 
2701 /**
2702  *  free_rx_buffers - Frees all Rx buffers
2703  *  @sp: device private variable.
2704  *  Description:
2705  *  This function will free all Rx buffers allocated by host.
2706  *  Return Value:
2707  *  NONE.
2708  */
2709 
2710 static void free_rx_buffers(struct s2io_nic *sp)
2711 {
2712 	struct net_device *dev = sp->dev;
2713 	int i, blk = 0, buf_cnt = 0;
2714 	struct config_param *config = &sp->config;
2715 	struct mac_info *mac_control = &sp->mac_control;
2716 
2717 	for (i = 0; i < config->rx_ring_num; i++) {
2718 		struct ring_info *ring = &mac_control->rings[i];
2719 
2720 		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2721 			free_rxd_blk(sp, i, blk);
2722 
2723 		ring->rx_curr_put_info.block_index = 0;
2724 		ring->rx_curr_get_info.block_index = 0;
2725 		ring->rx_curr_put_info.offset = 0;
2726 		ring->rx_curr_get_info.offset = 0;
2727 		ring->rx_bufs_left = 0;
2728 		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2729 			  dev->name, buf_cnt, i);
2730 	}
2731 }
2732 
2733 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2734 {
2735 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2736 		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2737 			  ring->dev->name);
2738 	}
2739 	return 0;
2740 }
2741 
2742 /**
2743  * s2io_poll - Rx interrupt handler for NAPI support
2744  * @napi : pointer to the napi structure.
2745  * @budget : The number of packets that were budgeted to be processed
2746  * during  one pass through the 'Poll" function.
2747  * Description:
2748  * Comes into picture only if NAPI support has been incorporated. It does
2749  * the same thing that rx_intr_handler does, but not in a interrupt context
2750  * also It will process only a given number of packets.
2751  * Return value:
2752  * 0 on success and 1 if there are No Rx packets to be processed.
2753  */
2754 
2755 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2756 {
2757 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2758 	struct net_device *dev = ring->dev;
2759 	int pkts_processed = 0;
2760 	u8 __iomem *addr = NULL;
2761 	u8 val8 = 0;
2762 	struct s2io_nic *nic = netdev_priv(dev);
2763 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2764 	int budget_org = budget;
2765 
2766 	if (unlikely(!is_s2io_card_up(nic)))
2767 		return 0;
2768 
2769 	pkts_processed = rx_intr_handler(ring, budget);
2770 	s2io_chk_rx_buffers(nic, ring);
2771 
2772 	if (pkts_processed < budget_org) {
2773 		napi_complete_done(napi, pkts_processed);
2774 		/*Re Enable MSI-Rx Vector*/
2775 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2776 		addr += 7 - ring->ring_no;
2777 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2778 		writeb(val8, addr);
2779 		val8 = readb(addr);
2780 	}
2781 	return pkts_processed;
2782 }
2783 
2784 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2785 {
2786 	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2787 	int pkts_processed = 0;
2788 	int ring_pkts_processed, i;
2789 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2790 	int budget_org = budget;
2791 	struct config_param *config = &nic->config;
2792 	struct mac_info *mac_control = &nic->mac_control;
2793 
2794 	if (unlikely(!is_s2io_card_up(nic)))
2795 		return 0;
2796 
2797 	for (i = 0; i < config->rx_ring_num; i++) {
2798 		struct ring_info *ring = &mac_control->rings[i];
2799 		ring_pkts_processed = rx_intr_handler(ring, budget);
2800 		s2io_chk_rx_buffers(nic, ring);
2801 		pkts_processed += ring_pkts_processed;
2802 		budget -= ring_pkts_processed;
2803 		if (budget <= 0)
2804 			break;
2805 	}
2806 	if (pkts_processed < budget_org) {
2807 		napi_complete_done(napi, pkts_processed);
2808 		/* Re enable the Rx interrupts for the ring */
2809 		writeq(0, &bar0->rx_traffic_mask);
2810 		readl(&bar0->rx_traffic_mask);
2811 	}
2812 	return pkts_processed;
2813 }
2814 
2815 #ifdef CONFIG_NET_POLL_CONTROLLER
2816 /**
2817  * s2io_netpoll - netpoll event handler entry point
2818  * @dev : pointer to the device structure.
2819  * Description:
2820  * 	This function will be called by upper layer to check for events on the
2821  * interface in situations where interrupts are disabled. It is used for
2822  * specific in-kernel networking tasks, such as remote consoles and kernel
2823  * debugging over the network (example netdump in RedHat).
2824  */
2825 static void s2io_netpoll(struct net_device *dev)
2826 {
2827 	struct s2io_nic *nic = netdev_priv(dev);
2828 	const int irq = nic->pdev->irq;
2829 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2830 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2831 	int i;
2832 	struct config_param *config = &nic->config;
2833 	struct mac_info *mac_control = &nic->mac_control;
2834 
2835 	if (pci_channel_offline(nic->pdev))
2836 		return;
2837 
2838 	disable_irq(irq);
2839 
2840 	writeq(val64, &bar0->rx_traffic_int);
2841 	writeq(val64, &bar0->tx_traffic_int);
2842 
2843 	/* we need to free up the transmitted skbufs or else netpoll will
2844 	 * run out of skbs and will fail and eventually netpoll application such
2845 	 * as netdump will fail.
2846 	 */
2847 	for (i = 0; i < config->tx_fifo_num; i++)
2848 		tx_intr_handler(&mac_control->fifos[i]);
2849 
2850 	/* check for received packet and indicate up to network */
2851 	for (i = 0; i < config->rx_ring_num; i++) {
2852 		struct ring_info *ring = &mac_control->rings[i];
2853 
2854 		rx_intr_handler(ring, 0);
2855 	}
2856 
2857 	for (i = 0; i < config->rx_ring_num; i++) {
2858 		struct ring_info *ring = &mac_control->rings[i];
2859 
2860 		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2861 			DBG_PRINT(INFO_DBG,
2862 				  "%s: Out of memory in Rx Netpoll!!\n",
2863 				  dev->name);
2864 			break;
2865 		}
2866 	}
2867 	enable_irq(irq);
2868 }
2869 #endif
2870 
2871 /**
2872  *  rx_intr_handler - Rx interrupt handler
2873  *  @ring_info: per ring structure.
2874  *  @budget: budget for napi processing.
2875  *  Description:
2876  *  If the interrupt is because of a received frame or if the
2877  *  receive ring contains fresh as yet un-processed frames,this function is
2878  *  called. It picks out the RxD at which place the last Rx processing had
2879  *  stopped and sends the skb to the OSM's Rx handler and then increments
2880  *  the offset.
2881  *  Return Value:
2882  *  No. of napi packets processed.
2883  */
2884 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2885 {
2886 	int get_block, put_block;
2887 	struct rx_curr_get_info get_info, put_info;
2888 	struct RxD_t *rxdp;
2889 	struct sk_buff *skb;
2890 	int pkt_cnt = 0, napi_pkts = 0;
2891 	int i;
2892 	struct RxD1 *rxdp1;
2893 	struct RxD3 *rxdp3;
2894 
2895 	if (budget <= 0)
2896 		return napi_pkts;
2897 
2898 	get_info = ring_data->rx_curr_get_info;
2899 	get_block = get_info.block_index;
2900 	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2901 	put_block = put_info.block_index;
2902 	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2903 
2904 	while (RXD_IS_UP2DT(rxdp)) {
2905 		/*
2906 		 * If your are next to put index then it's
2907 		 * FIFO full condition
2908 		 */
2909 		if ((get_block == put_block) &&
2910 		    (get_info.offset + 1) == put_info.offset) {
2911 			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2912 				  ring_data->dev->name);
2913 			break;
2914 		}
2915 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2916 		if (skb == NULL) {
2917 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2918 				  ring_data->dev->name);
2919 			return 0;
2920 		}
2921 		if (ring_data->rxd_mode == RXD_MODE_1) {
2922 			rxdp1 = (struct RxD1 *)rxdp;
2923 			pci_unmap_single(ring_data->pdev, (dma_addr_t)
2924 					 rxdp1->Buffer0_ptr,
2925 					 ring_data->mtu +
2926 					 HEADER_ETHERNET_II_802_3_SIZE +
2927 					 HEADER_802_2_SIZE +
2928 					 HEADER_SNAP_SIZE,
2929 					 PCI_DMA_FROMDEVICE);
2930 		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2931 			rxdp3 = (struct RxD3 *)rxdp;
2932 			pci_dma_sync_single_for_cpu(ring_data->pdev,
2933 						    (dma_addr_t)rxdp3->Buffer0_ptr,
2934 						    BUF0_LEN,
2935 						    PCI_DMA_FROMDEVICE);
2936 			pci_unmap_single(ring_data->pdev,
2937 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2938 					 ring_data->mtu + 4,
2939 					 PCI_DMA_FROMDEVICE);
2940 		}
2941 		prefetch(skb->data);
2942 		rx_osm_handler(ring_data, rxdp);
2943 		get_info.offset++;
2944 		ring_data->rx_curr_get_info.offset = get_info.offset;
2945 		rxdp = ring_data->rx_blocks[get_block].
2946 			rxds[get_info.offset].virt_addr;
2947 		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2948 			get_info.offset = 0;
2949 			ring_data->rx_curr_get_info.offset = get_info.offset;
2950 			get_block++;
2951 			if (get_block == ring_data->block_count)
2952 				get_block = 0;
2953 			ring_data->rx_curr_get_info.block_index = get_block;
2954 			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2955 		}
2956 
2957 		if (ring_data->nic->config.napi) {
2958 			budget--;
2959 			napi_pkts++;
2960 			if (!budget)
2961 				break;
2962 		}
2963 		pkt_cnt++;
2964 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2965 			break;
2966 	}
2967 	if (ring_data->lro) {
2968 		/* Clear all LRO sessions before exiting */
2969 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2970 			struct lro *lro = &ring_data->lro0_n[i];
2971 			if (lro->in_use) {
2972 				update_L3L4_header(ring_data->nic, lro);
2973 				queue_rx_frame(lro->parent, lro->vlan_tag);
2974 				clear_lro_session(lro);
2975 			}
2976 		}
2977 	}
2978 	return napi_pkts;
2979 }
2980 
2981 /**
2982  *  tx_intr_handler - Transmit interrupt handler
2983  *  @nic : device private variable
2984  *  Description:
2985  *  If an interrupt was raised to indicate DMA complete of the
2986  *  Tx packet, this function is called. It identifies the last TxD
2987  *  whose buffer was freed and frees all skbs whose data have already
2988  *  DMA'ed into the NICs internal memory.
2989  *  Return Value:
2990  *  NONE
2991  */
2992 
2993 static void tx_intr_handler(struct fifo_info *fifo_data)
2994 {
2995 	struct s2io_nic *nic = fifo_data->nic;
2996 	struct tx_curr_get_info get_info, put_info;
2997 	struct sk_buff *skb = NULL;
2998 	struct TxD *txdlp;
2999 	int pkt_cnt = 0;
3000 	unsigned long flags = 0;
3001 	u8 err_mask;
3002 	struct stat_block *stats = nic->mac_control.stats_info;
3003 	struct swStat *swstats = &stats->sw_stat;
3004 
3005 	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3006 		return;
3007 
3008 	get_info = fifo_data->tx_curr_get_info;
3009 	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3010 	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3011 	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3012 	       (get_info.offset != put_info.offset) &&
3013 	       (txdlp->Host_Control)) {
3014 		/* Check for TxD errors */
3015 		if (txdlp->Control_1 & TXD_T_CODE) {
3016 			unsigned long long err;
3017 			err = txdlp->Control_1 & TXD_T_CODE;
3018 			if (err & 0x1) {
3019 				swstats->parity_err_cnt++;
3020 			}
3021 
3022 			/* update t_code statistics */
3023 			err_mask = err >> 48;
3024 			switch (err_mask) {
3025 			case 2:
3026 				swstats->tx_buf_abort_cnt++;
3027 				break;
3028 
3029 			case 3:
3030 				swstats->tx_desc_abort_cnt++;
3031 				break;
3032 
3033 			case 7:
3034 				swstats->tx_parity_err_cnt++;
3035 				break;
3036 
3037 			case 10:
3038 				swstats->tx_link_loss_cnt++;
3039 				break;
3040 
3041 			case 15:
3042 				swstats->tx_list_proc_err_cnt++;
3043 				break;
3044 			}
3045 		}
3046 
3047 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3048 		if (skb == NULL) {
3049 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3050 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3051 				  __func__);
3052 			return;
3053 		}
3054 		pkt_cnt++;
3055 
3056 		/* Updating the statistics block */
3057 		swstats->mem_freed += skb->truesize;
3058 		dev_kfree_skb_irq(skb);
3059 
3060 		get_info.offset++;
3061 		if (get_info.offset == get_info.fifo_len + 1)
3062 			get_info.offset = 0;
3063 		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3064 		fifo_data->tx_curr_get_info.offset = get_info.offset;
3065 	}
3066 
3067 	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3068 
3069 	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3070 }
3071 
3072 /**
3073  *  s2io_mdio_write - Function to write in to MDIO registers
3074  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3075  *  @addr     : address value
3076  *  @value    : data value
3077  *  @dev      : pointer to net_device structure
3078  *  Description:
3079  *  This function is used to write values to the MDIO registers
3080  *  NONE
3081  */
3082 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3083 			    struct net_device *dev)
3084 {
3085 	u64 val64;
3086 	struct s2io_nic *sp = netdev_priv(dev);
3087 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3088 
3089 	/* address transaction */
3090 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3091 		MDIO_MMD_DEV_ADDR(mmd_type) |
3092 		MDIO_MMS_PRT_ADDR(0x0);
3093 	writeq(val64, &bar0->mdio_control);
3094 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3095 	writeq(val64, &bar0->mdio_control);
3096 	udelay(100);
3097 
3098 	/* Data transaction */
3099 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3100 		MDIO_MMD_DEV_ADDR(mmd_type) |
3101 		MDIO_MMS_PRT_ADDR(0x0) |
3102 		MDIO_MDIO_DATA(value) |
3103 		MDIO_OP(MDIO_OP_WRITE_TRANS);
3104 	writeq(val64, &bar0->mdio_control);
3105 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3106 	writeq(val64, &bar0->mdio_control);
3107 	udelay(100);
3108 
3109 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3110 		MDIO_MMD_DEV_ADDR(mmd_type) |
3111 		MDIO_MMS_PRT_ADDR(0x0) |
3112 		MDIO_OP(MDIO_OP_READ_TRANS);
3113 	writeq(val64, &bar0->mdio_control);
3114 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3115 	writeq(val64, &bar0->mdio_control);
3116 	udelay(100);
3117 }
3118 
3119 /**
3120  *  s2io_mdio_read - Function to write in to MDIO registers
3121  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3122  *  @addr     : address value
3123  *  @dev      : pointer to net_device structure
3124  *  Description:
3125  *  This function is used to read values to the MDIO registers
3126  *  NONE
3127  */
3128 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3129 {
3130 	u64 val64 = 0x0;
3131 	u64 rval64 = 0x0;
3132 	struct s2io_nic *sp = netdev_priv(dev);
3133 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3134 
3135 	/* address transaction */
3136 	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3137 			 | MDIO_MMD_DEV_ADDR(mmd_type)
3138 			 | MDIO_MMS_PRT_ADDR(0x0));
3139 	writeq(val64, &bar0->mdio_control);
3140 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3141 	writeq(val64, &bar0->mdio_control);
3142 	udelay(100);
3143 
3144 	/* Data transaction */
3145 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3146 		MDIO_MMD_DEV_ADDR(mmd_type) |
3147 		MDIO_MMS_PRT_ADDR(0x0) |
3148 		MDIO_OP(MDIO_OP_READ_TRANS);
3149 	writeq(val64, &bar0->mdio_control);
3150 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3151 	writeq(val64, &bar0->mdio_control);
3152 	udelay(100);
3153 
3154 	/* Read the value from regs */
3155 	rval64 = readq(&bar0->mdio_control);
3156 	rval64 = rval64 & 0xFFFF0000;
3157 	rval64 = rval64 >> 16;
3158 	return rval64;
3159 }
3160 
3161 /**
3162  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3163  *  @counter      : counter value to be updated
3164  *  @flag         : flag to indicate the status
3165  *  @type         : counter type
3166  *  Description:
3167  *  This function is to check the status of the xpak counters value
3168  *  NONE
3169  */
3170 
3171 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3172 				  u16 flag, u16 type)
3173 {
3174 	u64 mask = 0x3;
3175 	u64 val64;
3176 	int i;
3177 	for (i = 0; i < index; i++)
3178 		mask = mask << 0x2;
3179 
3180 	if (flag > 0) {
3181 		*counter = *counter + 1;
3182 		val64 = *regs_stat & mask;
3183 		val64 = val64 >> (index * 0x2);
3184 		val64 = val64 + 1;
3185 		if (val64 == 3) {
3186 			switch (type) {
3187 			case 1:
3188 				DBG_PRINT(ERR_DBG,
3189 					  "Take Xframe NIC out of service.\n");
3190 				DBG_PRINT(ERR_DBG,
3191 "Excessive temperatures may result in premature transceiver failure.\n");
3192 				break;
3193 			case 2:
3194 				DBG_PRINT(ERR_DBG,
3195 					  "Take Xframe NIC out of service.\n");
3196 				DBG_PRINT(ERR_DBG,
3197 "Excessive bias currents may indicate imminent laser diode failure.\n");
3198 				break;
3199 			case 3:
3200 				DBG_PRINT(ERR_DBG,
3201 					  "Take Xframe NIC out of service.\n");
3202 				DBG_PRINT(ERR_DBG,
3203 "Excessive laser output power may saturate far-end receiver.\n");
3204 				break;
3205 			default:
3206 				DBG_PRINT(ERR_DBG,
3207 					  "Incorrect XPAK Alarm type\n");
3208 			}
3209 			val64 = 0x0;
3210 		}
3211 		val64 = val64 << (index * 0x2);
3212 		*regs_stat = (*regs_stat & (~mask)) | (val64);
3213 
3214 	} else {
3215 		*regs_stat = *regs_stat & (~mask);
3216 	}
3217 }
3218 
3219 /**
3220  *  s2io_updt_xpak_counter - Function to update the xpak counters
3221  *  @dev         : pointer to net_device struct
3222  *  Description:
3223  *  This function is to upate the status of the xpak counters value
3224  *  NONE
3225  */
3226 static void s2io_updt_xpak_counter(struct net_device *dev)
3227 {
3228 	u16 flag  = 0x0;
3229 	u16 type  = 0x0;
3230 	u16 val16 = 0x0;
3231 	u64 val64 = 0x0;
3232 	u64 addr  = 0x0;
3233 
3234 	struct s2io_nic *sp = netdev_priv(dev);
3235 	struct stat_block *stats = sp->mac_control.stats_info;
3236 	struct xpakStat *xstats = &stats->xpak_stat;
3237 
3238 	/* Check the communication with the MDIO slave */
3239 	addr = MDIO_CTRL1;
3240 	val64 = 0x0;
3241 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3242 	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3243 		DBG_PRINT(ERR_DBG,
3244 			  "ERR: MDIO slave access failed - Returned %llx\n",
3245 			  (unsigned long long)val64);
3246 		return;
3247 	}
3248 
3249 	/* Check for the expected value of control reg 1 */
3250 	if (val64 != MDIO_CTRL1_SPEED10G) {
3251 		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3252 			  "Returned: %llx- Expected: 0x%x\n",
3253 			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3254 		return;
3255 	}
3256 
3257 	/* Loading the DOM register to MDIO register */
3258 	addr = 0xA100;
3259 	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3260 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3261 
3262 	/* Reading the Alarm flags */
3263 	addr = 0xA070;
3264 	val64 = 0x0;
3265 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3266 
3267 	flag = CHECKBIT(val64, 0x7);
3268 	type = 1;
3269 	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3270 			      &xstats->xpak_regs_stat,
3271 			      0x0, flag, type);
3272 
3273 	if (CHECKBIT(val64, 0x6))
3274 		xstats->alarm_transceiver_temp_low++;
3275 
3276 	flag = CHECKBIT(val64, 0x3);
3277 	type = 2;
3278 	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3279 			      &xstats->xpak_regs_stat,
3280 			      0x2, flag, type);
3281 
3282 	if (CHECKBIT(val64, 0x2))
3283 		xstats->alarm_laser_bias_current_low++;
3284 
3285 	flag = CHECKBIT(val64, 0x1);
3286 	type = 3;
3287 	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3288 			      &xstats->xpak_regs_stat,
3289 			      0x4, flag, type);
3290 
3291 	if (CHECKBIT(val64, 0x0))
3292 		xstats->alarm_laser_output_power_low++;
3293 
3294 	/* Reading the Warning flags */
3295 	addr = 0xA074;
3296 	val64 = 0x0;
3297 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3298 
3299 	if (CHECKBIT(val64, 0x7))
3300 		xstats->warn_transceiver_temp_high++;
3301 
3302 	if (CHECKBIT(val64, 0x6))
3303 		xstats->warn_transceiver_temp_low++;
3304 
3305 	if (CHECKBIT(val64, 0x3))
3306 		xstats->warn_laser_bias_current_high++;
3307 
3308 	if (CHECKBIT(val64, 0x2))
3309 		xstats->warn_laser_bias_current_low++;
3310 
3311 	if (CHECKBIT(val64, 0x1))
3312 		xstats->warn_laser_output_power_high++;
3313 
3314 	if (CHECKBIT(val64, 0x0))
3315 		xstats->warn_laser_output_power_low++;
3316 }
3317 
3318 /**
3319  *  wait_for_cmd_complete - waits for a command to complete.
3320  *  @sp : private member of the device structure, which is a pointer to the
3321  *  s2io_nic structure.
3322  *  Description: Function that waits for a command to Write into RMAC
3323  *  ADDR DATA registers to be completed and returns either success or
3324  *  error depending on whether the command was complete or not.
3325  *  Return value:
3326  *   SUCCESS on success and FAILURE on failure.
3327  */
3328 
3329 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3330 				 int bit_state)
3331 {
3332 	int ret = FAILURE, cnt = 0, delay = 1;
3333 	u64 val64;
3334 
3335 	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3336 		return FAILURE;
3337 
3338 	do {
3339 		val64 = readq(addr);
3340 		if (bit_state == S2IO_BIT_RESET) {
3341 			if (!(val64 & busy_bit)) {
3342 				ret = SUCCESS;
3343 				break;
3344 			}
3345 		} else {
3346 			if (val64 & busy_bit) {
3347 				ret = SUCCESS;
3348 				break;
3349 			}
3350 		}
3351 
3352 		if (in_interrupt())
3353 			mdelay(delay);
3354 		else
3355 			msleep(delay);
3356 
3357 		if (++cnt >= 10)
3358 			delay = 50;
3359 	} while (cnt < 20);
3360 	return ret;
3361 }
3362 /**
3363  * check_pci_device_id - Checks if the device id is supported
3364  * @id : device id
3365  * Description: Function to check if the pci device id is supported by driver.
3366  * Return value: Actual device id if supported else PCI_ANY_ID
3367  */
3368 static u16 check_pci_device_id(u16 id)
3369 {
3370 	switch (id) {
3371 	case PCI_DEVICE_ID_HERC_WIN:
3372 	case PCI_DEVICE_ID_HERC_UNI:
3373 		return XFRAME_II_DEVICE;
3374 	case PCI_DEVICE_ID_S2IO_UNI:
3375 	case PCI_DEVICE_ID_S2IO_WIN:
3376 		return XFRAME_I_DEVICE;
3377 	default:
3378 		return PCI_ANY_ID;
3379 	}
3380 }
3381 
3382 /**
3383  *  s2io_reset - Resets the card.
3384  *  @sp : private member of the device structure.
3385  *  Description: Function to Reset the card. This function then also
3386  *  restores the previously saved PCI configuration space registers as
3387  *  the card reset also resets the configuration space.
3388  *  Return value:
3389  *  void.
3390  */
3391 
3392 static void s2io_reset(struct s2io_nic *sp)
3393 {
3394 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3395 	u64 val64;
3396 	u16 subid, pci_cmd;
3397 	int i;
3398 	u16 val16;
3399 	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3400 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3401 	struct stat_block *stats;
3402 	struct swStat *swstats;
3403 
3404 	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3405 		  __func__, pci_name(sp->pdev));
3406 
3407 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3408 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3409 
3410 	val64 = SW_RESET_ALL;
3411 	writeq(val64, &bar0->sw_reset);
3412 	if (strstr(sp->product_name, "CX4"))
3413 		msleep(750);
3414 	msleep(250);
3415 	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3416 
3417 		/* Restore the PCI state saved during initialization. */
3418 		pci_restore_state(sp->pdev);
3419 		pci_save_state(sp->pdev);
3420 		pci_read_config_word(sp->pdev, 0x2, &val16);
3421 		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3422 			break;
3423 		msleep(200);
3424 	}
3425 
3426 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3427 		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3428 
3429 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3430 
3431 	s2io_init_pci(sp);
3432 
3433 	/* Set swapper to enable I/O register access */
3434 	s2io_set_swapper(sp);
3435 
3436 	/* restore mac_addr entries */
3437 	do_s2io_restore_unicast_mc(sp);
3438 
3439 	/* Restore the MSIX table entries from local variables */
3440 	restore_xmsi_data(sp);
3441 
3442 	/* Clear certain PCI/PCI-X fields after reset */
3443 	if (sp->device_type == XFRAME_II_DEVICE) {
3444 		/* Clear "detected parity error" bit */
3445 		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3446 
3447 		/* Clearing PCIX Ecc status register */
3448 		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3449 
3450 		/* Clearing PCI_STATUS error reflected here */
3451 		writeq(s2BIT(62), &bar0->txpic_int_reg);
3452 	}
3453 
3454 	/* Reset device statistics maintained by OS */
3455 	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3456 
3457 	stats = sp->mac_control.stats_info;
3458 	swstats = &stats->sw_stat;
3459 
3460 	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3461 	up_cnt = swstats->link_up_cnt;
3462 	down_cnt = swstats->link_down_cnt;
3463 	up_time = swstats->link_up_time;
3464 	down_time = swstats->link_down_time;
3465 	reset_cnt = swstats->soft_reset_cnt;
3466 	mem_alloc_cnt = swstats->mem_allocated;
3467 	mem_free_cnt = swstats->mem_freed;
3468 	watchdog_cnt = swstats->watchdog_timer_cnt;
3469 
3470 	memset(stats, 0, sizeof(struct stat_block));
3471 
3472 	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3473 	swstats->link_up_cnt = up_cnt;
3474 	swstats->link_down_cnt = down_cnt;
3475 	swstats->link_up_time = up_time;
3476 	swstats->link_down_time = down_time;
3477 	swstats->soft_reset_cnt = reset_cnt;
3478 	swstats->mem_allocated = mem_alloc_cnt;
3479 	swstats->mem_freed = mem_free_cnt;
3480 	swstats->watchdog_timer_cnt = watchdog_cnt;
3481 
3482 	/* SXE-002: Configure link and activity LED to turn it off */
3483 	subid = sp->pdev->subsystem_device;
3484 	if (((subid & 0xFF) >= 0x07) &&
3485 	    (sp->device_type == XFRAME_I_DEVICE)) {
3486 		val64 = readq(&bar0->gpio_control);
3487 		val64 |= 0x0000800000000000ULL;
3488 		writeq(val64, &bar0->gpio_control);
3489 		val64 = 0x0411040400000000ULL;
3490 		writeq(val64, (void __iomem *)bar0 + 0x2700);
3491 	}
3492 
3493 	/*
3494 	 * Clear spurious ECC interrupts that would have occurred on
3495 	 * XFRAME II cards after reset.
3496 	 */
3497 	if (sp->device_type == XFRAME_II_DEVICE) {
3498 		val64 = readq(&bar0->pcc_err_reg);
3499 		writeq(val64, &bar0->pcc_err_reg);
3500 	}
3501 
3502 	sp->device_enabled_once = false;
3503 }
3504 
3505 /**
3506  *  s2io_set_swapper - to set the swapper controle on the card
3507  *  @sp : private member of the device structure,
3508  *  pointer to the s2io_nic structure.
3509  *  Description: Function to set the swapper control on the card
3510  *  correctly depending on the 'endianness' of the system.
3511  *  Return value:
3512  *  SUCCESS on success and FAILURE on failure.
3513  */
3514 
3515 static int s2io_set_swapper(struct s2io_nic *sp)
3516 {
3517 	struct net_device *dev = sp->dev;
3518 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3519 	u64 val64, valt, valr;
3520 
3521 	/*
3522 	 * Set proper endian settings and verify the same by reading
3523 	 * the PIF Feed-back register.
3524 	 */
3525 
3526 	val64 = readq(&bar0->pif_rd_swapper_fb);
3527 	if (val64 != 0x0123456789ABCDEFULL) {
3528 		int i = 0;
3529 		static const u64 value[] = {
3530 			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3531 			0x8100008181000081ULL,	/* FE=1, SE=0 */
3532 			0x4200004242000042ULL,	/* FE=0, SE=1 */
3533 			0			/* FE=0, SE=0 */
3534 		};
3535 
3536 		while (i < 4) {
3537 			writeq(value[i], &bar0->swapper_ctrl);
3538 			val64 = readq(&bar0->pif_rd_swapper_fb);
3539 			if (val64 == 0x0123456789ABCDEFULL)
3540 				break;
3541 			i++;
3542 		}
3543 		if (i == 4) {
3544 			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3545 				  "feedback read %llx\n",
3546 				  dev->name, (unsigned long long)val64);
3547 			return FAILURE;
3548 		}
3549 		valr = value[i];
3550 	} else {
3551 		valr = readq(&bar0->swapper_ctrl);
3552 	}
3553 
3554 	valt = 0x0123456789ABCDEFULL;
3555 	writeq(valt, &bar0->xmsi_address);
3556 	val64 = readq(&bar0->xmsi_address);
3557 
3558 	if (val64 != valt) {
3559 		int i = 0;
3560 		static const u64 value[] = {
3561 			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3562 			0x0081810000818100ULL,	/* FE=1, SE=0 */
3563 			0x0042420000424200ULL,	/* FE=0, SE=1 */
3564 			0			/* FE=0, SE=0 */
3565 		};
3566 
3567 		while (i < 4) {
3568 			writeq((value[i] | valr), &bar0->swapper_ctrl);
3569 			writeq(valt, &bar0->xmsi_address);
3570 			val64 = readq(&bar0->xmsi_address);
3571 			if (val64 == valt)
3572 				break;
3573 			i++;
3574 		}
3575 		if (i == 4) {
3576 			unsigned long long x = val64;
3577 			DBG_PRINT(ERR_DBG,
3578 				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3579 			return FAILURE;
3580 		}
3581 	}
3582 	val64 = readq(&bar0->swapper_ctrl);
3583 	val64 &= 0xFFFF000000000000ULL;
3584 
3585 #ifdef __BIG_ENDIAN
3586 	/*
3587 	 * The device by default set to a big endian format, so a
3588 	 * big endian driver need not set anything.
3589 	 */
3590 	val64 |= (SWAPPER_CTRL_TXP_FE |
3591 		  SWAPPER_CTRL_TXP_SE |
3592 		  SWAPPER_CTRL_TXD_R_FE |
3593 		  SWAPPER_CTRL_TXD_W_FE |
3594 		  SWAPPER_CTRL_TXF_R_FE |
3595 		  SWAPPER_CTRL_RXD_R_FE |
3596 		  SWAPPER_CTRL_RXD_W_FE |
3597 		  SWAPPER_CTRL_RXF_W_FE |
3598 		  SWAPPER_CTRL_XMSI_FE |
3599 		  SWAPPER_CTRL_STATS_FE |
3600 		  SWAPPER_CTRL_STATS_SE);
3601 	if (sp->config.intr_type == INTA)
3602 		val64 |= SWAPPER_CTRL_XMSI_SE;
3603 	writeq(val64, &bar0->swapper_ctrl);
3604 #else
3605 	/*
3606 	 * Initially we enable all bits to make it accessible by the
3607 	 * driver, then we selectively enable only those bits that
3608 	 * we want to set.
3609 	 */
3610 	val64 |= (SWAPPER_CTRL_TXP_FE |
3611 		  SWAPPER_CTRL_TXP_SE |
3612 		  SWAPPER_CTRL_TXD_R_FE |
3613 		  SWAPPER_CTRL_TXD_R_SE |
3614 		  SWAPPER_CTRL_TXD_W_FE |
3615 		  SWAPPER_CTRL_TXD_W_SE |
3616 		  SWAPPER_CTRL_TXF_R_FE |
3617 		  SWAPPER_CTRL_RXD_R_FE |
3618 		  SWAPPER_CTRL_RXD_R_SE |
3619 		  SWAPPER_CTRL_RXD_W_FE |
3620 		  SWAPPER_CTRL_RXD_W_SE |
3621 		  SWAPPER_CTRL_RXF_W_FE |
3622 		  SWAPPER_CTRL_XMSI_FE |
3623 		  SWAPPER_CTRL_STATS_FE |
3624 		  SWAPPER_CTRL_STATS_SE);
3625 	if (sp->config.intr_type == INTA)
3626 		val64 |= SWAPPER_CTRL_XMSI_SE;
3627 	writeq(val64, &bar0->swapper_ctrl);
3628 #endif
3629 	val64 = readq(&bar0->swapper_ctrl);
3630 
3631 	/*
3632 	 * Verifying if endian settings are accurate by reading a
3633 	 * feedback register.
3634 	 */
3635 	val64 = readq(&bar0->pif_rd_swapper_fb);
3636 	if (val64 != 0x0123456789ABCDEFULL) {
3637 		/* Endian settings are incorrect, calls for another dekko. */
3638 		DBG_PRINT(ERR_DBG,
3639 			  "%s: Endian settings are wrong, feedback read %llx\n",
3640 			  dev->name, (unsigned long long)val64);
3641 		return FAILURE;
3642 	}
3643 
3644 	return SUCCESS;
3645 }
3646 
3647 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3648 {
3649 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3650 	u64 val64;
3651 	int ret = 0, cnt = 0;
3652 
3653 	do {
3654 		val64 = readq(&bar0->xmsi_access);
3655 		if (!(val64 & s2BIT(15)))
3656 			break;
3657 		mdelay(1);
3658 		cnt++;
3659 	} while (cnt < 5);
3660 	if (cnt == 5) {
3661 		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3662 		ret = 1;
3663 	}
3664 
3665 	return ret;
3666 }
3667 
3668 static void restore_xmsi_data(struct s2io_nic *nic)
3669 {
3670 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3671 	u64 val64;
3672 	int i, msix_index;
3673 
3674 	if (nic->device_type == XFRAME_I_DEVICE)
3675 		return;
3676 
3677 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3678 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3679 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3680 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3681 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3682 		writeq(val64, &bar0->xmsi_access);
3683 		if (wait_for_msix_trans(nic, msix_index))
3684 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3685 				  __func__, msix_index);
3686 	}
3687 }
3688 
3689 static void store_xmsi_data(struct s2io_nic *nic)
3690 {
3691 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3692 	u64 val64, addr, data;
3693 	int i, msix_index;
3694 
3695 	if (nic->device_type == XFRAME_I_DEVICE)
3696 		return;
3697 
3698 	/* Store and display */
3699 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3700 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3701 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3702 		writeq(val64, &bar0->xmsi_access);
3703 		if (wait_for_msix_trans(nic, msix_index)) {
3704 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3705 				  __func__, msix_index);
3706 			continue;
3707 		}
3708 		addr = readq(&bar0->xmsi_address);
3709 		data = readq(&bar0->xmsi_data);
3710 		if (addr && data) {
3711 			nic->msix_info[i].addr = addr;
3712 			nic->msix_info[i].data = data;
3713 		}
3714 	}
3715 }
3716 
3717 static int s2io_enable_msi_x(struct s2io_nic *nic)
3718 {
3719 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3720 	u64 rx_mat;
3721 	u16 msi_control; /* Temp variable */
3722 	int ret, i, j, msix_indx = 1;
3723 	int size;
3724 	struct stat_block *stats = nic->mac_control.stats_info;
3725 	struct swStat *swstats = &stats->sw_stat;
3726 
3727 	size = nic->num_entries * sizeof(struct msix_entry);
3728 	nic->entries = kzalloc(size, GFP_KERNEL);
3729 	if (!nic->entries) {
3730 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3731 			  __func__);
3732 		swstats->mem_alloc_fail_cnt++;
3733 		return -ENOMEM;
3734 	}
3735 	swstats->mem_allocated += size;
3736 
3737 	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3738 	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3739 	if (!nic->s2io_entries) {
3740 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3741 			  __func__);
3742 		swstats->mem_alloc_fail_cnt++;
3743 		kfree(nic->entries);
3744 		swstats->mem_freed
3745 			+= (nic->num_entries * sizeof(struct msix_entry));
3746 		return -ENOMEM;
3747 	}
3748 	swstats->mem_allocated += size;
3749 
3750 	nic->entries[0].entry = 0;
3751 	nic->s2io_entries[0].entry = 0;
3752 	nic->s2io_entries[0].in_use = MSIX_FLG;
3753 	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3754 	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3755 
3756 	for (i = 1; i < nic->num_entries; i++) {
3757 		nic->entries[i].entry = ((i - 1) * 8) + 1;
3758 		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3759 		nic->s2io_entries[i].arg = NULL;
3760 		nic->s2io_entries[i].in_use = 0;
3761 	}
3762 
3763 	rx_mat = readq(&bar0->rx_mat);
3764 	for (j = 0; j < nic->config.rx_ring_num; j++) {
3765 		rx_mat |= RX_MAT_SET(j, msix_indx);
3766 		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3767 		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3768 		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3769 		msix_indx += 8;
3770 	}
3771 	writeq(rx_mat, &bar0->rx_mat);
3772 	readq(&bar0->rx_mat);
3773 
3774 	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3775 				    nic->num_entries, nic->num_entries);
3776 	/* We fail init if error or we get less vectors than min required */
3777 	if (ret < 0) {
3778 		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3779 		kfree(nic->entries);
3780 		swstats->mem_freed += nic->num_entries *
3781 			sizeof(struct msix_entry);
3782 		kfree(nic->s2io_entries);
3783 		swstats->mem_freed += nic->num_entries *
3784 			sizeof(struct s2io_msix_entry);
3785 		nic->entries = NULL;
3786 		nic->s2io_entries = NULL;
3787 		return -ENOMEM;
3788 	}
3789 
3790 	/*
3791 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3792 	 * in the herc NIC. (Temp change, needs to be removed later)
3793 	 */
3794 	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3795 	msi_control |= 0x1; /* Enable MSI */
3796 	pci_write_config_word(nic->pdev, 0x42, msi_control);
3797 
3798 	return 0;
3799 }
3800 
3801 /* Handle software interrupt used during MSI(X) test */
3802 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3803 {
3804 	struct s2io_nic *sp = dev_id;
3805 
3806 	sp->msi_detected = 1;
3807 	wake_up(&sp->msi_wait);
3808 
3809 	return IRQ_HANDLED;
3810 }
3811 
3812 /* Test interrupt path by forcing a a software IRQ */
3813 static int s2io_test_msi(struct s2io_nic *sp)
3814 {
3815 	struct pci_dev *pdev = sp->pdev;
3816 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3817 	int err;
3818 	u64 val64, saved64;
3819 
3820 	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3821 			  sp->name, sp);
3822 	if (err) {
3823 		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3824 			  sp->dev->name, pci_name(pdev), pdev->irq);
3825 		return err;
3826 	}
3827 
3828 	init_waitqueue_head(&sp->msi_wait);
3829 	sp->msi_detected = 0;
3830 
3831 	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3832 	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3833 	val64 |= SCHED_INT_CTRL_TIMER_EN;
3834 	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3835 	writeq(val64, &bar0->scheduled_int_ctrl);
3836 
3837 	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3838 
3839 	if (!sp->msi_detected) {
3840 		/* MSI(X) test failed, go back to INTx mode */
3841 		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3842 			  "using MSI(X) during test\n",
3843 			  sp->dev->name, pci_name(pdev));
3844 
3845 		err = -EOPNOTSUPP;
3846 	}
3847 
3848 	free_irq(sp->entries[1].vector, sp);
3849 
3850 	writeq(saved64, &bar0->scheduled_int_ctrl);
3851 
3852 	return err;
3853 }
3854 
3855 static void remove_msix_isr(struct s2io_nic *sp)
3856 {
3857 	int i;
3858 	u16 msi_control;
3859 
3860 	for (i = 0; i < sp->num_entries; i++) {
3861 		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3862 			int vector = sp->entries[i].vector;
3863 			void *arg = sp->s2io_entries[i].arg;
3864 			free_irq(vector, arg);
3865 		}
3866 	}
3867 
3868 	kfree(sp->entries);
3869 	kfree(sp->s2io_entries);
3870 	sp->entries = NULL;
3871 	sp->s2io_entries = NULL;
3872 
3873 	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3874 	msi_control &= 0xFFFE; /* Disable MSI */
3875 	pci_write_config_word(sp->pdev, 0x42, msi_control);
3876 
3877 	pci_disable_msix(sp->pdev);
3878 }
3879 
3880 static void remove_inta_isr(struct s2io_nic *sp)
3881 {
3882 	free_irq(sp->pdev->irq, sp->dev);
3883 }
3884 
3885 /* ********************************************************* *
3886  * Functions defined below concern the OS part of the driver *
3887  * ********************************************************* */
3888 
3889 /**
3890  *  s2io_open - open entry point of the driver
3891  *  @dev : pointer to the device structure.
3892  *  Description:
3893  *  This function is the open entry point of the driver. It mainly calls a
3894  *  function to allocate Rx buffers and inserts them into the buffer
3895  *  descriptors and then enables the Rx part of the NIC.
3896  *  Return value:
3897  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3898  *   file on failure.
3899  */
3900 
3901 static int s2io_open(struct net_device *dev)
3902 {
3903 	struct s2io_nic *sp = netdev_priv(dev);
3904 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3905 	int err = 0;
3906 
3907 	/*
3908 	 * Make sure you have link off by default every time
3909 	 * Nic is initialized
3910 	 */
3911 	netif_carrier_off(dev);
3912 	sp->last_link_state = 0;
3913 
3914 	/* Initialize H/W and enable interrupts */
3915 	err = s2io_card_up(sp);
3916 	if (err) {
3917 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3918 			  dev->name);
3919 		goto hw_init_failed;
3920 	}
3921 
3922 	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3923 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3924 		s2io_card_down(sp);
3925 		err = -ENODEV;
3926 		goto hw_init_failed;
3927 	}
3928 	s2io_start_all_tx_queue(sp);
3929 	return 0;
3930 
3931 hw_init_failed:
3932 	if (sp->config.intr_type == MSI_X) {
3933 		if (sp->entries) {
3934 			kfree(sp->entries);
3935 			swstats->mem_freed += sp->num_entries *
3936 				sizeof(struct msix_entry);
3937 		}
3938 		if (sp->s2io_entries) {
3939 			kfree(sp->s2io_entries);
3940 			swstats->mem_freed += sp->num_entries *
3941 				sizeof(struct s2io_msix_entry);
3942 		}
3943 	}
3944 	return err;
3945 }
3946 
3947 /**
3948  *  s2io_close -close entry point of the driver
3949  *  @dev : device pointer.
3950  *  Description:
3951  *  This is the stop entry point of the driver. It needs to undo exactly
3952  *  whatever was done by the open entry point,thus it's usually referred to
3953  *  as the close function.Among other things this function mainly stops the
3954  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3955  *  Return value:
3956  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3957  *  file on failure.
3958  */
3959 
3960 static int s2io_close(struct net_device *dev)
3961 {
3962 	struct s2io_nic *sp = netdev_priv(dev);
3963 	struct config_param *config = &sp->config;
3964 	u64 tmp64;
3965 	int offset;
3966 
3967 	/* Return if the device is already closed               *
3968 	 *  Can happen when s2io_card_up failed in change_mtu    *
3969 	 */
3970 	if (!is_s2io_card_up(sp))
3971 		return 0;
3972 
3973 	s2io_stop_all_tx_queue(sp);
3974 	/* delete all populated mac entries */
3975 	for (offset = 1; offset < config->max_mc_addr; offset++) {
3976 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3977 		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3978 			do_s2io_delete_unicast_mc(sp, tmp64);
3979 	}
3980 
3981 	s2io_card_down(sp);
3982 
3983 	return 0;
3984 }
3985 
3986 /**
3987  *  s2io_xmit - Tx entry point of te driver
3988  *  @skb : the socket buffer containing the Tx data.
3989  *  @dev : device pointer.
3990  *  Description :
3991  *  This function is the Tx entry point of the driver. S2IO NIC supports
3992  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3993  *  NOTE: when device can't queue the pkt,just the trans_start variable will
3994  *  not be upadted.
3995  *  Return value:
3996  *  0 on success & 1 on failure.
3997  */
3998 
3999 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4000 {
4001 	struct s2io_nic *sp = netdev_priv(dev);
4002 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4003 	register u64 val64;
4004 	struct TxD *txdp;
4005 	struct TxFIFO_element __iomem *tx_fifo;
4006 	unsigned long flags = 0;
4007 	u16 vlan_tag = 0;
4008 	struct fifo_info *fifo = NULL;
4009 	int offload_type;
4010 	int enable_per_list_interrupt = 0;
4011 	struct config_param *config = &sp->config;
4012 	struct mac_info *mac_control = &sp->mac_control;
4013 	struct stat_block *stats = mac_control->stats_info;
4014 	struct swStat *swstats = &stats->sw_stat;
4015 
4016 	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4017 
4018 	if (unlikely(skb->len <= 0)) {
4019 		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4020 		dev_kfree_skb_any(skb);
4021 		return NETDEV_TX_OK;
4022 	}
4023 
4024 	if (!is_s2io_card_up(sp)) {
4025 		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4026 			  dev->name);
4027 		dev_kfree_skb_any(skb);
4028 		return NETDEV_TX_OK;
4029 	}
4030 
4031 	queue = 0;
4032 	if (skb_vlan_tag_present(skb))
4033 		vlan_tag = skb_vlan_tag_get(skb);
4034 	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4035 		if (skb->protocol == htons(ETH_P_IP)) {
4036 			struct iphdr *ip;
4037 			struct tcphdr *th;
4038 			ip = ip_hdr(skb);
4039 
4040 			if (!ip_is_fragment(ip)) {
4041 				th = (struct tcphdr *)(((unsigned char *)ip) +
4042 						       ip->ihl*4);
4043 
4044 				if (ip->protocol == IPPROTO_TCP) {
4045 					queue_len = sp->total_tcp_fifos;
4046 					queue = (ntohs(th->source) +
4047 						 ntohs(th->dest)) &
4048 						sp->fifo_selector[queue_len - 1];
4049 					if (queue >= queue_len)
4050 						queue = queue_len - 1;
4051 				} else if (ip->protocol == IPPROTO_UDP) {
4052 					queue_len = sp->total_udp_fifos;
4053 					queue = (ntohs(th->source) +
4054 						 ntohs(th->dest)) &
4055 						sp->fifo_selector[queue_len - 1];
4056 					if (queue >= queue_len)
4057 						queue = queue_len - 1;
4058 					queue += sp->udp_fifo_idx;
4059 					if (skb->len > 1024)
4060 						enable_per_list_interrupt = 1;
4061 				}
4062 			}
4063 		}
4064 	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4065 		/* get fifo number based on skb->priority value */
4066 		queue = config->fifo_mapping
4067 			[skb->priority & (MAX_TX_FIFOS - 1)];
4068 	fifo = &mac_control->fifos[queue];
4069 
4070 	spin_lock_irqsave(&fifo->tx_lock, flags);
4071 
4072 	if (sp->config.multiq) {
4073 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4074 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4075 			return NETDEV_TX_BUSY;
4076 		}
4077 	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4078 		if (netif_queue_stopped(dev)) {
4079 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4080 			return NETDEV_TX_BUSY;
4081 		}
4082 	}
4083 
4084 	put_off = (u16)fifo->tx_curr_put_info.offset;
4085 	get_off = (u16)fifo->tx_curr_get_info.offset;
4086 	txdp = fifo->list_info[put_off].list_virt_addr;
4087 
4088 	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4089 	/* Avoid "put" pointer going beyond "get" pointer */
4090 	if (txdp->Host_Control ||
4091 	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4092 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4093 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4094 		dev_kfree_skb_any(skb);
4095 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4096 		return NETDEV_TX_OK;
4097 	}
4098 
4099 	offload_type = s2io_offload_type(skb);
4100 	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4101 		txdp->Control_1 |= TXD_TCP_LSO_EN;
4102 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4103 	}
4104 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4105 		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4106 				    TXD_TX_CKO_TCP_EN |
4107 				    TXD_TX_CKO_UDP_EN);
4108 	}
4109 	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4110 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4111 	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4112 	if (enable_per_list_interrupt)
4113 		if (put_off & (queue_len >> 5))
4114 			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4115 	if (vlan_tag) {
4116 		txdp->Control_2 |= TXD_VLAN_ENABLE;
4117 		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4118 	}
4119 
4120 	frg_len = skb_headlen(skb);
4121 	txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4122 					      frg_len, PCI_DMA_TODEVICE);
4123 	if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4124 		goto pci_map_failed;
4125 
4126 	txdp->Host_Control = (unsigned long)skb;
4127 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4128 
4129 	frg_cnt = skb_shinfo(skb)->nr_frags;
4130 	/* For fragmented SKB. */
4131 	for (i = 0; i < frg_cnt; i++) {
4132 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4133 		/* A '0' length fragment will be ignored */
4134 		if (!skb_frag_size(frag))
4135 			continue;
4136 		txdp++;
4137 		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4138 							     frag, 0,
4139 							     skb_frag_size(frag),
4140 							     DMA_TO_DEVICE);
4141 		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4142 	}
4143 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4144 
4145 	tx_fifo = mac_control->tx_FIFO_start[queue];
4146 	val64 = fifo->list_info[put_off].list_phy_addr;
4147 	writeq(val64, &tx_fifo->TxDL_Pointer);
4148 
4149 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4150 		 TX_FIFO_LAST_LIST);
4151 	if (offload_type)
4152 		val64 |= TX_FIFO_SPECIAL_FUNC;
4153 
4154 	writeq(val64, &tx_fifo->List_Control);
4155 
4156 	mmiowb();
4157 
4158 	put_off++;
4159 	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4160 		put_off = 0;
4161 	fifo->tx_curr_put_info.offset = put_off;
4162 
4163 	/* Avoid "put" pointer going beyond "get" pointer */
4164 	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4165 		swstats->fifo_full_cnt++;
4166 		DBG_PRINT(TX_DBG,
4167 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4168 			  put_off, get_off);
4169 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4170 	}
4171 	swstats->mem_allocated += skb->truesize;
4172 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4173 
4174 	if (sp->config.intr_type == MSI_X)
4175 		tx_intr_handler(fifo);
4176 
4177 	return NETDEV_TX_OK;
4178 
4179 pci_map_failed:
4180 	swstats->pci_map_fail_cnt++;
4181 	s2io_stop_tx_queue(sp, fifo->fifo_no);
4182 	swstats->mem_freed += skb->truesize;
4183 	dev_kfree_skb_any(skb);
4184 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4185 	return NETDEV_TX_OK;
4186 }
4187 
4188 static void
4189 s2io_alarm_handle(struct timer_list *t)
4190 {
4191 	struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4192 	struct net_device *dev = sp->dev;
4193 
4194 	s2io_handle_errors(dev);
4195 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4196 }
4197 
4198 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4199 {
4200 	struct ring_info *ring = (struct ring_info *)dev_id;
4201 	struct s2io_nic *sp = ring->nic;
4202 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4203 
4204 	if (unlikely(!is_s2io_card_up(sp)))
4205 		return IRQ_HANDLED;
4206 
4207 	if (sp->config.napi) {
4208 		u8 __iomem *addr = NULL;
4209 		u8 val8 = 0;
4210 
4211 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4212 		addr += (7 - ring->ring_no);
4213 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4214 		writeb(val8, addr);
4215 		val8 = readb(addr);
4216 		napi_schedule(&ring->napi);
4217 	} else {
4218 		rx_intr_handler(ring, 0);
4219 		s2io_chk_rx_buffers(sp, ring);
4220 	}
4221 
4222 	return IRQ_HANDLED;
4223 }
4224 
4225 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4226 {
4227 	int i;
4228 	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4229 	struct s2io_nic *sp = fifos->nic;
4230 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4231 	struct config_param *config  = &sp->config;
4232 	u64 reason;
4233 
4234 	if (unlikely(!is_s2io_card_up(sp)))
4235 		return IRQ_NONE;
4236 
4237 	reason = readq(&bar0->general_int_status);
4238 	if (unlikely(reason == S2IO_MINUS_ONE))
4239 		/* Nothing much can be done. Get out */
4240 		return IRQ_HANDLED;
4241 
4242 	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4243 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4244 
4245 		if (reason & GEN_INTR_TXPIC)
4246 			s2io_txpic_intr_handle(sp);
4247 
4248 		if (reason & GEN_INTR_TXTRAFFIC)
4249 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4250 
4251 		for (i = 0; i < config->tx_fifo_num; i++)
4252 			tx_intr_handler(&fifos[i]);
4253 
4254 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4255 		readl(&bar0->general_int_status);
4256 		return IRQ_HANDLED;
4257 	}
4258 	/* The interrupt was not raised by us */
4259 	return IRQ_NONE;
4260 }
4261 
4262 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4263 {
4264 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4265 	u64 val64;
4266 
4267 	val64 = readq(&bar0->pic_int_status);
4268 	if (val64 & PIC_INT_GPIO) {
4269 		val64 = readq(&bar0->gpio_int_reg);
4270 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4271 		    (val64 & GPIO_INT_REG_LINK_UP)) {
4272 			/*
4273 			 * This is unstable state so clear both up/down
4274 			 * interrupt and adapter to re-evaluate the link state.
4275 			 */
4276 			val64 |= GPIO_INT_REG_LINK_DOWN;
4277 			val64 |= GPIO_INT_REG_LINK_UP;
4278 			writeq(val64, &bar0->gpio_int_reg);
4279 			val64 = readq(&bar0->gpio_int_mask);
4280 			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4281 				   GPIO_INT_MASK_LINK_DOWN);
4282 			writeq(val64, &bar0->gpio_int_mask);
4283 		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4284 			val64 = readq(&bar0->adapter_status);
4285 			/* Enable Adapter */
4286 			val64 = readq(&bar0->adapter_control);
4287 			val64 |= ADAPTER_CNTL_EN;
4288 			writeq(val64, &bar0->adapter_control);
4289 			val64 |= ADAPTER_LED_ON;
4290 			writeq(val64, &bar0->adapter_control);
4291 			if (!sp->device_enabled_once)
4292 				sp->device_enabled_once = 1;
4293 
4294 			s2io_link(sp, LINK_UP);
4295 			/*
4296 			 * unmask link down interrupt and mask link-up
4297 			 * intr
4298 			 */
4299 			val64 = readq(&bar0->gpio_int_mask);
4300 			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4301 			val64 |= GPIO_INT_MASK_LINK_UP;
4302 			writeq(val64, &bar0->gpio_int_mask);
4303 
4304 		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4305 			val64 = readq(&bar0->adapter_status);
4306 			s2io_link(sp, LINK_DOWN);
4307 			/* Link is down so unmaks link up interrupt */
4308 			val64 = readq(&bar0->gpio_int_mask);
4309 			val64 &= ~GPIO_INT_MASK_LINK_UP;
4310 			val64 |= GPIO_INT_MASK_LINK_DOWN;
4311 			writeq(val64, &bar0->gpio_int_mask);
4312 
4313 			/* turn off LED */
4314 			val64 = readq(&bar0->adapter_control);
4315 			val64 = val64 & (~ADAPTER_LED_ON);
4316 			writeq(val64, &bar0->adapter_control);
4317 		}
4318 	}
4319 	val64 = readq(&bar0->gpio_int_mask);
4320 }
4321 
4322 /**
4323  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4324  *  @value: alarm bits
4325  *  @addr: address value
4326  *  @cnt: counter variable
4327  *  Description: Check for alarm and increment the counter
4328  *  Return Value:
4329  *  1 - if alarm bit set
4330  *  0 - if alarm bit is not set
4331  */
4332 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4333 				 unsigned long long *cnt)
4334 {
4335 	u64 val64;
4336 	val64 = readq(addr);
4337 	if (val64 & value) {
4338 		writeq(val64, addr);
4339 		(*cnt)++;
4340 		return 1;
4341 	}
4342 	return 0;
4343 
4344 }
4345 
4346 /**
4347  *  s2io_handle_errors - Xframe error indication handler
4348  *  @nic: device private variable
4349  *  Description: Handle alarms such as loss of link, single or
4350  *  double ECC errors, critical and serious errors.
4351  *  Return Value:
4352  *  NONE
4353  */
4354 static void s2io_handle_errors(void *dev_id)
4355 {
4356 	struct net_device *dev = (struct net_device *)dev_id;
4357 	struct s2io_nic *sp = netdev_priv(dev);
4358 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4359 	u64 temp64 = 0, val64 = 0;
4360 	int i = 0;
4361 
4362 	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4363 	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4364 
4365 	if (!is_s2io_card_up(sp))
4366 		return;
4367 
4368 	if (pci_channel_offline(sp->pdev))
4369 		return;
4370 
4371 	memset(&sw_stat->ring_full_cnt, 0,
4372 	       sizeof(sw_stat->ring_full_cnt));
4373 
4374 	/* Handling the XPAK counters update */
4375 	if (stats->xpak_timer_count < 72000) {
4376 		/* waiting for an hour */
4377 		stats->xpak_timer_count++;
4378 	} else {
4379 		s2io_updt_xpak_counter(dev);
4380 		/* reset the count to zero */
4381 		stats->xpak_timer_count = 0;
4382 	}
4383 
4384 	/* Handling link status change error Intr */
4385 	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4386 		val64 = readq(&bar0->mac_rmac_err_reg);
4387 		writeq(val64, &bar0->mac_rmac_err_reg);
4388 		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4389 			schedule_work(&sp->set_link_task);
4390 	}
4391 
4392 	/* In case of a serious error, the device will be Reset. */
4393 	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4394 				  &sw_stat->serious_err_cnt))
4395 		goto reset;
4396 
4397 	/* Check for data parity error */
4398 	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4399 				  &sw_stat->parity_err_cnt))
4400 		goto reset;
4401 
4402 	/* Check for ring full counter */
4403 	if (sp->device_type == XFRAME_II_DEVICE) {
4404 		val64 = readq(&bar0->ring_bump_counter1);
4405 		for (i = 0; i < 4; i++) {
4406 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4407 			temp64 >>= 64 - ((i+1)*16);
4408 			sw_stat->ring_full_cnt[i] += temp64;
4409 		}
4410 
4411 		val64 = readq(&bar0->ring_bump_counter2);
4412 		for (i = 0; i < 4; i++) {
4413 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4414 			temp64 >>= 64 - ((i+1)*16);
4415 			sw_stat->ring_full_cnt[i+4] += temp64;
4416 		}
4417 	}
4418 
4419 	val64 = readq(&bar0->txdma_int_status);
4420 	/*check for pfc_err*/
4421 	if (val64 & TXDMA_PFC_INT) {
4422 		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4423 					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4424 					  PFC_PCIX_ERR,
4425 					  &bar0->pfc_err_reg,
4426 					  &sw_stat->pfc_err_cnt))
4427 			goto reset;
4428 		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4429 				      &bar0->pfc_err_reg,
4430 				      &sw_stat->pfc_err_cnt);
4431 	}
4432 
4433 	/*check for tda_err*/
4434 	if (val64 & TXDMA_TDA_INT) {
4435 		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4436 					  TDA_SM0_ERR_ALARM |
4437 					  TDA_SM1_ERR_ALARM,
4438 					  &bar0->tda_err_reg,
4439 					  &sw_stat->tda_err_cnt))
4440 			goto reset;
4441 		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4442 				      &bar0->tda_err_reg,
4443 				      &sw_stat->tda_err_cnt);
4444 	}
4445 	/*check for pcc_err*/
4446 	if (val64 & TXDMA_PCC_INT) {
4447 		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4448 					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4449 					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4450 					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4451 					  PCC_TXB_ECC_DB_ERR,
4452 					  &bar0->pcc_err_reg,
4453 					  &sw_stat->pcc_err_cnt))
4454 			goto reset;
4455 		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4456 				      &bar0->pcc_err_reg,
4457 				      &sw_stat->pcc_err_cnt);
4458 	}
4459 
4460 	/*check for tti_err*/
4461 	if (val64 & TXDMA_TTI_INT) {
4462 		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4463 					  &bar0->tti_err_reg,
4464 					  &sw_stat->tti_err_cnt))
4465 			goto reset;
4466 		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4467 				      &bar0->tti_err_reg,
4468 				      &sw_stat->tti_err_cnt);
4469 	}
4470 
4471 	/*check for lso_err*/
4472 	if (val64 & TXDMA_LSO_INT) {
4473 		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4474 					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4475 					  &bar0->lso_err_reg,
4476 					  &sw_stat->lso_err_cnt))
4477 			goto reset;
4478 		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4479 				      &bar0->lso_err_reg,
4480 				      &sw_stat->lso_err_cnt);
4481 	}
4482 
4483 	/*check for tpa_err*/
4484 	if (val64 & TXDMA_TPA_INT) {
4485 		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4486 					  &bar0->tpa_err_reg,
4487 					  &sw_stat->tpa_err_cnt))
4488 			goto reset;
4489 		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4490 				      &bar0->tpa_err_reg,
4491 				      &sw_stat->tpa_err_cnt);
4492 	}
4493 
4494 	/*check for sm_err*/
4495 	if (val64 & TXDMA_SM_INT) {
4496 		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4497 					  &bar0->sm_err_reg,
4498 					  &sw_stat->sm_err_cnt))
4499 			goto reset;
4500 	}
4501 
4502 	val64 = readq(&bar0->mac_int_status);
4503 	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4504 		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4505 					  &bar0->mac_tmac_err_reg,
4506 					  &sw_stat->mac_tmac_err_cnt))
4507 			goto reset;
4508 		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4509 				      TMAC_DESC_ECC_SG_ERR |
4510 				      TMAC_DESC_ECC_DB_ERR,
4511 				      &bar0->mac_tmac_err_reg,
4512 				      &sw_stat->mac_tmac_err_cnt);
4513 	}
4514 
4515 	val64 = readq(&bar0->xgxs_int_status);
4516 	if (val64 & XGXS_INT_STATUS_TXGXS) {
4517 		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4518 					  &bar0->xgxs_txgxs_err_reg,
4519 					  &sw_stat->xgxs_txgxs_err_cnt))
4520 			goto reset;
4521 		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4522 				      &bar0->xgxs_txgxs_err_reg,
4523 				      &sw_stat->xgxs_txgxs_err_cnt);
4524 	}
4525 
4526 	val64 = readq(&bar0->rxdma_int_status);
4527 	if (val64 & RXDMA_INT_RC_INT_M) {
4528 		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4529 					  RC_FTC_ECC_DB_ERR |
4530 					  RC_PRCn_SM_ERR_ALARM |
4531 					  RC_FTC_SM_ERR_ALARM,
4532 					  &bar0->rc_err_reg,
4533 					  &sw_stat->rc_err_cnt))
4534 			goto reset;
4535 		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4536 				      RC_FTC_ECC_SG_ERR |
4537 				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4538 				      &sw_stat->rc_err_cnt);
4539 		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4540 					  PRC_PCI_AB_WR_Rn |
4541 					  PRC_PCI_AB_F_WR_Rn,
4542 					  &bar0->prc_pcix_err_reg,
4543 					  &sw_stat->prc_pcix_err_cnt))
4544 			goto reset;
4545 		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4546 				      PRC_PCI_DP_WR_Rn |
4547 				      PRC_PCI_DP_F_WR_Rn,
4548 				      &bar0->prc_pcix_err_reg,
4549 				      &sw_stat->prc_pcix_err_cnt);
4550 	}
4551 
4552 	if (val64 & RXDMA_INT_RPA_INT_M) {
4553 		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4554 					  &bar0->rpa_err_reg,
4555 					  &sw_stat->rpa_err_cnt))
4556 			goto reset;
4557 		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4558 				      &bar0->rpa_err_reg,
4559 				      &sw_stat->rpa_err_cnt);
4560 	}
4561 
4562 	if (val64 & RXDMA_INT_RDA_INT_M) {
4563 		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4564 					  RDA_FRM_ECC_DB_N_AERR |
4565 					  RDA_SM1_ERR_ALARM |
4566 					  RDA_SM0_ERR_ALARM |
4567 					  RDA_RXD_ECC_DB_SERR,
4568 					  &bar0->rda_err_reg,
4569 					  &sw_stat->rda_err_cnt))
4570 			goto reset;
4571 		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4572 				      RDA_FRM_ECC_SG_ERR |
4573 				      RDA_MISC_ERR |
4574 				      RDA_PCIX_ERR,
4575 				      &bar0->rda_err_reg,
4576 				      &sw_stat->rda_err_cnt);
4577 	}
4578 
4579 	if (val64 & RXDMA_INT_RTI_INT_M) {
4580 		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4581 					  &bar0->rti_err_reg,
4582 					  &sw_stat->rti_err_cnt))
4583 			goto reset;
4584 		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4585 				      &bar0->rti_err_reg,
4586 				      &sw_stat->rti_err_cnt);
4587 	}
4588 
4589 	val64 = readq(&bar0->mac_int_status);
4590 	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4591 		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4592 					  &bar0->mac_rmac_err_reg,
4593 					  &sw_stat->mac_rmac_err_cnt))
4594 			goto reset;
4595 		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4596 				      RMAC_SINGLE_ECC_ERR |
4597 				      RMAC_DOUBLE_ECC_ERR,
4598 				      &bar0->mac_rmac_err_reg,
4599 				      &sw_stat->mac_rmac_err_cnt);
4600 	}
4601 
4602 	val64 = readq(&bar0->xgxs_int_status);
4603 	if (val64 & XGXS_INT_STATUS_RXGXS) {
4604 		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4605 					  &bar0->xgxs_rxgxs_err_reg,
4606 					  &sw_stat->xgxs_rxgxs_err_cnt))
4607 			goto reset;
4608 	}
4609 
4610 	val64 = readq(&bar0->mc_int_status);
4611 	if (val64 & MC_INT_STATUS_MC_INT) {
4612 		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4613 					  &bar0->mc_err_reg,
4614 					  &sw_stat->mc_err_cnt))
4615 			goto reset;
4616 
4617 		/* Handling Ecc errors */
4618 		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4619 			writeq(val64, &bar0->mc_err_reg);
4620 			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4621 				sw_stat->double_ecc_errs++;
4622 				if (sp->device_type != XFRAME_II_DEVICE) {
4623 					/*
4624 					 * Reset XframeI only if critical error
4625 					 */
4626 					if (val64 &
4627 					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4628 					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4629 						goto reset;
4630 				}
4631 			} else
4632 				sw_stat->single_ecc_errs++;
4633 		}
4634 	}
4635 	return;
4636 
4637 reset:
4638 	s2io_stop_all_tx_queue(sp);
4639 	schedule_work(&sp->rst_timer_task);
4640 	sw_stat->soft_reset_cnt++;
4641 }
4642 
4643 /**
4644  *  s2io_isr - ISR handler of the device .
4645  *  @irq: the irq of the device.
4646  *  @dev_id: a void pointer to the dev structure of the NIC.
4647  *  Description:  This function is the ISR handler of the device. It
4648  *  identifies the reason for the interrupt and calls the relevant
4649  *  service routines. As a contongency measure, this ISR allocates the
4650  *  recv buffers, if their numbers are below the panic value which is
4651  *  presently set to 25% of the original number of rcv buffers allocated.
4652  *  Return value:
4653  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4654  *   IRQ_NONE: will be returned if interrupt is not from our device
4655  */
4656 static irqreturn_t s2io_isr(int irq, void *dev_id)
4657 {
4658 	struct net_device *dev = (struct net_device *)dev_id;
4659 	struct s2io_nic *sp = netdev_priv(dev);
4660 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4661 	int i;
4662 	u64 reason = 0;
4663 	struct mac_info *mac_control;
4664 	struct config_param *config;
4665 
4666 	/* Pretend we handled any irq's from a disconnected card */
4667 	if (pci_channel_offline(sp->pdev))
4668 		return IRQ_NONE;
4669 
4670 	if (!is_s2io_card_up(sp))
4671 		return IRQ_NONE;
4672 
4673 	config = &sp->config;
4674 	mac_control = &sp->mac_control;
4675 
4676 	/*
4677 	 * Identify the cause for interrupt and call the appropriate
4678 	 * interrupt handler. Causes for the interrupt could be;
4679 	 * 1. Rx of packet.
4680 	 * 2. Tx complete.
4681 	 * 3. Link down.
4682 	 */
4683 	reason = readq(&bar0->general_int_status);
4684 
4685 	if (unlikely(reason == S2IO_MINUS_ONE))
4686 		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4687 
4688 	if (reason &
4689 	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4690 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4691 
4692 		if (config->napi) {
4693 			if (reason & GEN_INTR_RXTRAFFIC) {
4694 				napi_schedule(&sp->napi);
4695 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4696 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4697 				readl(&bar0->rx_traffic_int);
4698 			}
4699 		} else {
4700 			/*
4701 			 * rx_traffic_int reg is an R1 register, writing all 1's
4702 			 * will ensure that the actual interrupt causing bit
4703 			 * get's cleared and hence a read can be avoided.
4704 			 */
4705 			if (reason & GEN_INTR_RXTRAFFIC)
4706 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4707 
4708 			for (i = 0; i < config->rx_ring_num; i++) {
4709 				struct ring_info *ring = &mac_control->rings[i];
4710 
4711 				rx_intr_handler(ring, 0);
4712 			}
4713 		}
4714 
4715 		/*
4716 		 * tx_traffic_int reg is an R1 register, writing all 1's
4717 		 * will ensure that the actual interrupt causing bit get's
4718 		 * cleared and hence a read can be avoided.
4719 		 */
4720 		if (reason & GEN_INTR_TXTRAFFIC)
4721 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4722 
4723 		for (i = 0; i < config->tx_fifo_num; i++)
4724 			tx_intr_handler(&mac_control->fifos[i]);
4725 
4726 		if (reason & GEN_INTR_TXPIC)
4727 			s2io_txpic_intr_handle(sp);
4728 
4729 		/*
4730 		 * Reallocate the buffers from the interrupt handler itself.
4731 		 */
4732 		if (!config->napi) {
4733 			for (i = 0; i < config->rx_ring_num; i++) {
4734 				struct ring_info *ring = &mac_control->rings[i];
4735 
4736 				s2io_chk_rx_buffers(sp, ring);
4737 			}
4738 		}
4739 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4740 		readl(&bar0->general_int_status);
4741 
4742 		return IRQ_HANDLED;
4743 
4744 	} else if (!reason) {
4745 		/* The interrupt was not raised by us */
4746 		return IRQ_NONE;
4747 	}
4748 
4749 	return IRQ_HANDLED;
4750 }
4751 
4752 /**
4753  * s2io_updt_stats -
4754  */
4755 static void s2io_updt_stats(struct s2io_nic *sp)
4756 {
4757 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4758 	u64 val64;
4759 	int cnt = 0;
4760 
4761 	if (is_s2io_card_up(sp)) {
4762 		/* Apprx 30us on a 133 MHz bus */
4763 		val64 = SET_UPDT_CLICKS(10) |
4764 			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4765 		writeq(val64, &bar0->stat_cfg);
4766 		do {
4767 			udelay(100);
4768 			val64 = readq(&bar0->stat_cfg);
4769 			if (!(val64 & s2BIT(0)))
4770 				break;
4771 			cnt++;
4772 			if (cnt == 5)
4773 				break; /* Updt failed */
4774 		} while (1);
4775 	}
4776 }
4777 
4778 /**
4779  *  s2io_get_stats - Updates the device statistics structure.
4780  *  @dev : pointer to the device structure.
4781  *  Description:
4782  *  This function updates the device statistics structure in the s2io_nic
4783  *  structure and returns a pointer to the same.
4784  *  Return value:
4785  *  pointer to the updated net_device_stats structure.
4786  */
4787 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4788 {
4789 	struct s2io_nic *sp = netdev_priv(dev);
4790 	struct mac_info *mac_control = &sp->mac_control;
4791 	struct stat_block *stats = mac_control->stats_info;
4792 	u64 delta;
4793 
4794 	/* Configure Stats for immediate updt */
4795 	s2io_updt_stats(sp);
4796 
4797 	/* A device reset will cause the on-adapter statistics to be zero'ed.
4798 	 * This can be done while running by changing the MTU.  To prevent the
4799 	 * system from having the stats zero'ed, the driver keeps a copy of the
4800 	 * last update to the system (which is also zero'ed on reset).  This
4801 	 * enables the driver to accurately know the delta between the last
4802 	 * update and the current update.
4803 	 */
4804 	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4805 		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4806 	sp->stats.rx_packets += delta;
4807 	dev->stats.rx_packets += delta;
4808 
4809 	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4810 		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4811 	sp->stats.tx_packets += delta;
4812 	dev->stats.tx_packets += delta;
4813 
4814 	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4815 		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4816 	sp->stats.rx_bytes += delta;
4817 	dev->stats.rx_bytes += delta;
4818 
4819 	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4820 		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4821 	sp->stats.tx_bytes += delta;
4822 	dev->stats.tx_bytes += delta;
4823 
4824 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4825 	sp->stats.rx_errors += delta;
4826 	dev->stats.rx_errors += delta;
4827 
4828 	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4829 		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4830 	sp->stats.tx_errors += delta;
4831 	dev->stats.tx_errors += delta;
4832 
4833 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4834 	sp->stats.rx_dropped += delta;
4835 	dev->stats.rx_dropped += delta;
4836 
4837 	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4838 	sp->stats.tx_dropped += delta;
4839 	dev->stats.tx_dropped += delta;
4840 
4841 	/* The adapter MAC interprets pause frames as multicast packets, but
4842 	 * does not pass them up.  This erroneously increases the multicast
4843 	 * packet count and needs to be deducted when the multicast frame count
4844 	 * is queried.
4845 	 */
4846 	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4847 		le32_to_cpu(stats->rmac_vld_mcst_frms);
4848 	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4849 	delta -= sp->stats.multicast;
4850 	sp->stats.multicast += delta;
4851 	dev->stats.multicast += delta;
4852 
4853 	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4854 		le32_to_cpu(stats->rmac_usized_frms)) +
4855 		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4856 	sp->stats.rx_length_errors += delta;
4857 	dev->stats.rx_length_errors += delta;
4858 
4859 	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4860 	sp->stats.rx_crc_errors += delta;
4861 	dev->stats.rx_crc_errors += delta;
4862 
4863 	return &dev->stats;
4864 }
4865 
4866 /**
4867  *  s2io_set_multicast - entry point for multicast address enable/disable.
4868  *  @dev : pointer to the device structure
4869  *  Description:
4870  *  This function is a driver entry point which gets called by the kernel
4871  *  whenever multicast addresses must be enabled/disabled. This also gets
4872  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4873  *  determine, if multicast address must be enabled or if promiscuous mode
4874  *  is to be disabled etc.
4875  *  Return value:
4876  *  void.
4877  */
4878 
4879 static void s2io_set_multicast(struct net_device *dev)
4880 {
4881 	int i, j, prev_cnt;
4882 	struct netdev_hw_addr *ha;
4883 	struct s2io_nic *sp = netdev_priv(dev);
4884 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4885 	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4886 		0xfeffffffffffULL;
4887 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4888 	void __iomem *add;
4889 	struct config_param *config = &sp->config;
4890 
4891 	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4892 		/*  Enable all Multicast addresses */
4893 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4894 		       &bar0->rmac_addr_data0_mem);
4895 		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4896 		       &bar0->rmac_addr_data1_mem);
4897 		val64 = RMAC_ADDR_CMD_MEM_WE |
4898 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4899 			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4900 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4901 		/* Wait till command completes */
4902 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4903 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4904 				      S2IO_BIT_RESET);
4905 
4906 		sp->m_cast_flg = 1;
4907 		sp->all_multi_pos = config->max_mc_addr - 1;
4908 	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4909 		/*  Disable all Multicast addresses */
4910 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4911 		       &bar0->rmac_addr_data0_mem);
4912 		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4913 		       &bar0->rmac_addr_data1_mem);
4914 		val64 = RMAC_ADDR_CMD_MEM_WE |
4915 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4916 			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4917 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4918 		/* Wait till command completes */
4919 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4920 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4921 				      S2IO_BIT_RESET);
4922 
4923 		sp->m_cast_flg = 0;
4924 		sp->all_multi_pos = 0;
4925 	}
4926 
4927 	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4928 		/*  Put the NIC into promiscuous mode */
4929 		add = &bar0->mac_cfg;
4930 		val64 = readq(&bar0->mac_cfg);
4931 		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4932 
4933 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4934 		writel((u32)val64, add);
4935 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4936 		writel((u32) (val64 >> 32), (add + 4));
4937 
4938 		if (vlan_tag_strip != 1) {
4939 			val64 = readq(&bar0->rx_pa_cfg);
4940 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4941 			writeq(val64, &bar0->rx_pa_cfg);
4942 			sp->vlan_strip_flag = 0;
4943 		}
4944 
4945 		val64 = readq(&bar0->mac_cfg);
4946 		sp->promisc_flg = 1;
4947 		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4948 			  dev->name);
4949 	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4950 		/*  Remove the NIC from promiscuous mode */
4951 		add = &bar0->mac_cfg;
4952 		val64 = readq(&bar0->mac_cfg);
4953 		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4954 
4955 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4956 		writel((u32)val64, add);
4957 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4958 		writel((u32) (val64 >> 32), (add + 4));
4959 
4960 		if (vlan_tag_strip != 0) {
4961 			val64 = readq(&bar0->rx_pa_cfg);
4962 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4963 			writeq(val64, &bar0->rx_pa_cfg);
4964 			sp->vlan_strip_flag = 1;
4965 		}
4966 
4967 		val64 = readq(&bar0->mac_cfg);
4968 		sp->promisc_flg = 0;
4969 		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4970 	}
4971 
4972 	/*  Update individual M_CAST address list */
4973 	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4974 		if (netdev_mc_count(dev) >
4975 		    (config->max_mc_addr - config->max_mac_addr)) {
4976 			DBG_PRINT(ERR_DBG,
4977 				  "%s: No more Rx filters can be added - "
4978 				  "please enable ALL_MULTI instead\n",
4979 				  dev->name);
4980 			return;
4981 		}
4982 
4983 		prev_cnt = sp->mc_addr_count;
4984 		sp->mc_addr_count = netdev_mc_count(dev);
4985 
4986 		/* Clear out the previous list of Mc in the H/W. */
4987 		for (i = 0; i < prev_cnt; i++) {
4988 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4989 			       &bar0->rmac_addr_data0_mem);
4990 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4991 			       &bar0->rmac_addr_data1_mem);
4992 			val64 = RMAC_ADDR_CMD_MEM_WE |
4993 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4994 				RMAC_ADDR_CMD_MEM_OFFSET
4995 				(config->mc_start_offset + i);
4996 			writeq(val64, &bar0->rmac_addr_cmd_mem);
4997 
4998 			/* Wait for command completes */
4999 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5000 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5001 						  S2IO_BIT_RESET)) {
5002 				DBG_PRINT(ERR_DBG,
5003 					  "%s: Adding Multicasts failed\n",
5004 					  dev->name);
5005 				return;
5006 			}
5007 		}
5008 
5009 		/* Create the new Rx filter list and update the same in H/W. */
5010 		i = 0;
5011 		netdev_for_each_mc_addr(ha, dev) {
5012 			mac_addr = 0;
5013 			for (j = 0; j < ETH_ALEN; j++) {
5014 				mac_addr |= ha->addr[j];
5015 				mac_addr <<= 8;
5016 			}
5017 			mac_addr >>= 8;
5018 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5019 			       &bar0->rmac_addr_data0_mem);
5020 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5021 			       &bar0->rmac_addr_data1_mem);
5022 			val64 = RMAC_ADDR_CMD_MEM_WE |
5023 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5024 				RMAC_ADDR_CMD_MEM_OFFSET
5025 				(i + config->mc_start_offset);
5026 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5027 
5028 			/* Wait for command completes */
5029 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5030 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5031 						  S2IO_BIT_RESET)) {
5032 				DBG_PRINT(ERR_DBG,
5033 					  "%s: Adding Multicasts failed\n",
5034 					  dev->name);
5035 				return;
5036 			}
5037 			i++;
5038 		}
5039 	}
5040 }
5041 
5042 /* read from CAM unicast & multicast addresses and store it in
5043  * def_mac_addr structure
5044  */
5045 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5046 {
5047 	int offset;
5048 	u64 mac_addr = 0x0;
5049 	struct config_param *config = &sp->config;
5050 
5051 	/* store unicast & multicast mac addresses */
5052 	for (offset = 0; offset < config->max_mc_addr; offset++) {
5053 		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5054 		/* if read fails disable the entry */
5055 		if (mac_addr == FAILURE)
5056 			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5057 		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5058 	}
5059 }
5060 
5061 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5062 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5063 {
5064 	int offset;
5065 	struct config_param *config = &sp->config;
5066 	/* restore unicast mac address */
5067 	for (offset = 0; offset < config->max_mac_addr; offset++)
5068 		do_s2io_prog_unicast(sp->dev,
5069 				     sp->def_mac_addr[offset].mac_addr);
5070 
5071 	/* restore multicast mac address */
5072 	for (offset = config->mc_start_offset;
5073 	     offset < config->max_mc_addr; offset++)
5074 		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5075 }
5076 
5077 /* add a multicast MAC address to CAM */
5078 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5079 {
5080 	int i;
5081 	u64 mac_addr = 0;
5082 	struct config_param *config = &sp->config;
5083 
5084 	for (i = 0; i < ETH_ALEN; i++) {
5085 		mac_addr <<= 8;
5086 		mac_addr |= addr[i];
5087 	}
5088 	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5089 		return SUCCESS;
5090 
5091 	/* check if the multicast mac already preset in CAM */
5092 	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5093 		u64 tmp64;
5094 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5095 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5096 			break;
5097 
5098 		if (tmp64 == mac_addr)
5099 			return SUCCESS;
5100 	}
5101 	if (i == config->max_mc_addr) {
5102 		DBG_PRINT(ERR_DBG,
5103 			  "CAM full no space left for multicast MAC\n");
5104 		return FAILURE;
5105 	}
5106 	/* Update the internal structure with this new mac address */
5107 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5108 
5109 	return do_s2io_add_mac(sp, mac_addr, i);
5110 }
5111 
5112 /* add MAC address to CAM */
5113 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5114 {
5115 	u64 val64;
5116 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5117 
5118 	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5119 	       &bar0->rmac_addr_data0_mem);
5120 
5121 	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5122 		RMAC_ADDR_CMD_MEM_OFFSET(off);
5123 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5124 
5125 	/* Wait till command completes */
5126 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5127 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5128 				  S2IO_BIT_RESET)) {
5129 		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5130 		return FAILURE;
5131 	}
5132 	return SUCCESS;
5133 }
5134 /* deletes a specified unicast/multicast mac entry from CAM */
5135 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5136 {
5137 	int offset;
5138 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5139 	struct config_param *config = &sp->config;
5140 
5141 	for (offset = 1;
5142 	     offset < config->max_mc_addr; offset++) {
5143 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5144 		if (tmp64 == addr) {
5145 			/* disable the entry by writing  0xffffffffffffULL */
5146 			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5147 				return FAILURE;
5148 			/* store the new mac list from CAM */
5149 			do_s2io_store_unicast_mc(sp);
5150 			return SUCCESS;
5151 		}
5152 	}
5153 	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5154 		  (unsigned long long)addr);
5155 	return FAILURE;
5156 }
5157 
5158 /* read mac entries from CAM */
5159 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5160 {
5161 	u64 tmp64 = 0xffffffffffff0000ULL, val64;
5162 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5163 
5164 	/* read mac addr */
5165 	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5166 		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5167 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5168 
5169 	/* Wait till command completes */
5170 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5171 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5172 				  S2IO_BIT_RESET)) {
5173 		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5174 		return FAILURE;
5175 	}
5176 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5177 
5178 	return tmp64 >> 16;
5179 }
5180 
5181 /**
5182  * s2io_set_mac_addr - driver entry point
5183  */
5184 
5185 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5186 {
5187 	struct sockaddr *addr = p;
5188 
5189 	if (!is_valid_ether_addr(addr->sa_data))
5190 		return -EADDRNOTAVAIL;
5191 
5192 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5193 
5194 	/* store the MAC address in CAM */
5195 	return do_s2io_prog_unicast(dev, dev->dev_addr);
5196 }
5197 /**
5198  *  do_s2io_prog_unicast - Programs the Xframe mac address
5199  *  @dev : pointer to the device structure.
5200  *  @addr: a uchar pointer to the new mac address which is to be set.
5201  *  Description : This procedure will program the Xframe to receive
5202  *  frames with new Mac Address
5203  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5204  *  as defined in errno.h file on failure.
5205  */
5206 
5207 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5208 {
5209 	struct s2io_nic *sp = netdev_priv(dev);
5210 	register u64 mac_addr = 0, perm_addr = 0;
5211 	int i;
5212 	u64 tmp64;
5213 	struct config_param *config = &sp->config;
5214 
5215 	/*
5216 	 * Set the new MAC address as the new unicast filter and reflect this
5217 	 * change on the device address registered with the OS. It will be
5218 	 * at offset 0.
5219 	 */
5220 	for (i = 0; i < ETH_ALEN; i++) {
5221 		mac_addr <<= 8;
5222 		mac_addr |= addr[i];
5223 		perm_addr <<= 8;
5224 		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5225 	}
5226 
5227 	/* check if the dev_addr is different than perm_addr */
5228 	if (mac_addr == perm_addr)
5229 		return SUCCESS;
5230 
5231 	/* check if the mac already preset in CAM */
5232 	for (i = 1; i < config->max_mac_addr; i++) {
5233 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5234 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5235 			break;
5236 
5237 		if (tmp64 == mac_addr) {
5238 			DBG_PRINT(INFO_DBG,
5239 				  "MAC addr:0x%llx already present in CAM\n",
5240 				  (unsigned long long)mac_addr);
5241 			return SUCCESS;
5242 		}
5243 	}
5244 	if (i == config->max_mac_addr) {
5245 		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5246 		return FAILURE;
5247 	}
5248 	/* Update the internal structure with this new mac address */
5249 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5250 
5251 	return do_s2io_add_mac(sp, mac_addr, i);
5252 }
5253 
5254 /**
5255  * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5256  * @sp : private member of the device structure, which is a pointer to the
5257  * s2io_nic structure.
5258  * @cmd: pointer to the structure with parameters given by ethtool to set
5259  * link information.
5260  * Description:
5261  * The function sets different link parameters provided by the user onto
5262  * the NIC.
5263  * Return value:
5264  * 0 on success.
5265  */
5266 
5267 static int
5268 s2io_ethtool_set_link_ksettings(struct net_device *dev,
5269 				const struct ethtool_link_ksettings *cmd)
5270 {
5271 	struct s2io_nic *sp = netdev_priv(dev);
5272 	if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5273 	    (cmd->base.speed != SPEED_10000) ||
5274 	    (cmd->base.duplex != DUPLEX_FULL))
5275 		return -EINVAL;
5276 	else {
5277 		s2io_close(sp->dev);
5278 		s2io_open(sp->dev);
5279 	}
5280 
5281 	return 0;
5282 }
5283 
5284 /**
5285  * s2io_ethtol_get_link_ksettings - Return link specific information.
5286  * @sp : private member of the device structure, pointer to the
5287  *      s2io_nic structure.
5288  * @cmd : pointer to the structure with parameters given by ethtool
5289  * to return link information.
5290  * Description:
5291  * Returns link specific information like speed, duplex etc.. to ethtool.
5292  * Return value :
5293  * return 0 on success.
5294  */
5295 
5296 static int
5297 s2io_ethtool_get_link_ksettings(struct net_device *dev,
5298 				struct ethtool_link_ksettings *cmd)
5299 {
5300 	struct s2io_nic *sp = netdev_priv(dev);
5301 
5302 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
5303 	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5304 	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5305 
5306 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5307 	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5308 	ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5309 
5310 	cmd->base.port = PORT_FIBRE;
5311 
5312 	if (netif_carrier_ok(sp->dev)) {
5313 		cmd->base.speed = SPEED_10000;
5314 		cmd->base.duplex = DUPLEX_FULL;
5315 	} else {
5316 		cmd->base.speed = SPEED_UNKNOWN;
5317 		cmd->base.duplex = DUPLEX_UNKNOWN;
5318 	}
5319 
5320 	cmd->base.autoneg = AUTONEG_DISABLE;
5321 	return 0;
5322 }
5323 
5324 /**
5325  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5326  * @sp : private member of the device structure, which is a pointer to the
5327  * s2io_nic structure.
5328  * @info : pointer to the structure with parameters given by ethtool to
5329  * return driver information.
5330  * Description:
5331  * Returns driver specefic information like name, version etc.. to ethtool.
5332  * Return value:
5333  *  void
5334  */
5335 
5336 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5337 				  struct ethtool_drvinfo *info)
5338 {
5339 	struct s2io_nic *sp = netdev_priv(dev);
5340 
5341 	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5342 	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5343 	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5344 }
5345 
5346 /**
5347  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5348  *  @sp: private member of the device structure, which is a pointer to the
5349  *  s2io_nic structure.
5350  *  @regs : pointer to the structure with parameters given by ethtool for
5351  *  dumping the registers.
5352  *  @reg_space: The input argument into which all the registers are dumped.
5353  *  Description:
5354  *  Dumps the entire register space of xFrame NIC into the user given
5355  *  buffer area.
5356  * Return value :
5357  * void .
5358  */
5359 
5360 static void s2io_ethtool_gregs(struct net_device *dev,
5361 			       struct ethtool_regs *regs, void *space)
5362 {
5363 	int i;
5364 	u64 reg;
5365 	u8 *reg_space = (u8 *)space;
5366 	struct s2io_nic *sp = netdev_priv(dev);
5367 
5368 	regs->len = XENA_REG_SPACE;
5369 	regs->version = sp->pdev->subsystem_device;
5370 
5371 	for (i = 0; i < regs->len; i += 8) {
5372 		reg = readq(sp->bar0 + i);
5373 		memcpy((reg_space + i), &reg, 8);
5374 	}
5375 }
5376 
5377 /*
5378  *  s2io_set_led - control NIC led
5379  */
5380 static void s2io_set_led(struct s2io_nic *sp, bool on)
5381 {
5382 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5383 	u16 subid = sp->pdev->subsystem_device;
5384 	u64 val64;
5385 
5386 	if ((sp->device_type == XFRAME_II_DEVICE) ||
5387 	    ((subid & 0xFF) >= 0x07)) {
5388 		val64 = readq(&bar0->gpio_control);
5389 		if (on)
5390 			val64 |= GPIO_CTRL_GPIO_0;
5391 		else
5392 			val64 &= ~GPIO_CTRL_GPIO_0;
5393 
5394 		writeq(val64, &bar0->gpio_control);
5395 	} else {
5396 		val64 = readq(&bar0->adapter_control);
5397 		if (on)
5398 			val64 |= ADAPTER_LED_ON;
5399 		else
5400 			val64 &= ~ADAPTER_LED_ON;
5401 
5402 		writeq(val64, &bar0->adapter_control);
5403 	}
5404 
5405 }
5406 
5407 /**
5408  * s2io_ethtool_set_led - To physically identify the nic on the system.
5409  * @dev : network device
5410  * @state: led setting
5411  *
5412  * Description: Used to physically identify the NIC on the system.
5413  * The Link LED will blink for a time specified by the user for
5414  * identification.
5415  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5416  * identification is possible only if it's link is up.
5417  */
5418 
5419 static int s2io_ethtool_set_led(struct net_device *dev,
5420 				enum ethtool_phys_id_state state)
5421 {
5422 	struct s2io_nic *sp = netdev_priv(dev);
5423 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5424 	u16 subid = sp->pdev->subsystem_device;
5425 
5426 	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5427 		u64 val64 = readq(&bar0->adapter_control);
5428 		if (!(val64 & ADAPTER_CNTL_EN)) {
5429 			pr_err("Adapter Link down, cannot blink LED\n");
5430 			return -EAGAIN;
5431 		}
5432 	}
5433 
5434 	switch (state) {
5435 	case ETHTOOL_ID_ACTIVE:
5436 		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5437 		return 1;	/* cycle on/off once per second */
5438 
5439 	case ETHTOOL_ID_ON:
5440 		s2io_set_led(sp, true);
5441 		break;
5442 
5443 	case ETHTOOL_ID_OFF:
5444 		s2io_set_led(sp, false);
5445 		break;
5446 
5447 	case ETHTOOL_ID_INACTIVE:
5448 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5449 			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5450 	}
5451 
5452 	return 0;
5453 }
5454 
5455 static void s2io_ethtool_gringparam(struct net_device *dev,
5456 				    struct ethtool_ringparam *ering)
5457 {
5458 	struct s2io_nic *sp = netdev_priv(dev);
5459 	int i, tx_desc_count = 0, rx_desc_count = 0;
5460 
5461 	if (sp->rxd_mode == RXD_MODE_1) {
5462 		ering->rx_max_pending = MAX_RX_DESC_1;
5463 		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5464 	} else {
5465 		ering->rx_max_pending = MAX_RX_DESC_2;
5466 		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5467 	}
5468 
5469 	ering->tx_max_pending = MAX_TX_DESC;
5470 
5471 	for (i = 0; i < sp->config.rx_ring_num; i++)
5472 		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5473 	ering->rx_pending = rx_desc_count;
5474 	ering->rx_jumbo_pending = rx_desc_count;
5475 
5476 	for (i = 0; i < sp->config.tx_fifo_num; i++)
5477 		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5478 	ering->tx_pending = tx_desc_count;
5479 	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5480 }
5481 
5482 /**
5483  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5484  * @sp : private member of the device structure, which is a pointer to the
5485  *	s2io_nic structure.
5486  * @ep : pointer to the structure with pause parameters given by ethtool.
5487  * Description:
5488  * Returns the Pause frame generation and reception capability of the NIC.
5489  * Return value:
5490  *  void
5491  */
5492 static void s2io_ethtool_getpause_data(struct net_device *dev,
5493 				       struct ethtool_pauseparam *ep)
5494 {
5495 	u64 val64;
5496 	struct s2io_nic *sp = netdev_priv(dev);
5497 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5498 
5499 	val64 = readq(&bar0->rmac_pause_cfg);
5500 	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5501 		ep->tx_pause = true;
5502 	if (val64 & RMAC_PAUSE_RX_ENABLE)
5503 		ep->rx_pause = true;
5504 	ep->autoneg = false;
5505 }
5506 
5507 /**
5508  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5509  * @sp : private member of the device structure, which is a pointer to the
5510  *      s2io_nic structure.
5511  * @ep : pointer to the structure with pause parameters given by ethtool.
5512  * Description:
5513  * It can be used to set or reset Pause frame generation or reception
5514  * support of the NIC.
5515  * Return value:
5516  * int, returns 0 on Success
5517  */
5518 
5519 static int s2io_ethtool_setpause_data(struct net_device *dev,
5520 				      struct ethtool_pauseparam *ep)
5521 {
5522 	u64 val64;
5523 	struct s2io_nic *sp = netdev_priv(dev);
5524 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5525 
5526 	val64 = readq(&bar0->rmac_pause_cfg);
5527 	if (ep->tx_pause)
5528 		val64 |= RMAC_PAUSE_GEN_ENABLE;
5529 	else
5530 		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5531 	if (ep->rx_pause)
5532 		val64 |= RMAC_PAUSE_RX_ENABLE;
5533 	else
5534 		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5535 	writeq(val64, &bar0->rmac_pause_cfg);
5536 	return 0;
5537 }
5538 
5539 /**
5540  * read_eeprom - reads 4 bytes of data from user given offset.
5541  * @sp : private member of the device structure, which is a pointer to the
5542  *      s2io_nic structure.
5543  * @off : offset at which the data must be written
5544  * @data : Its an output parameter where the data read at the given
5545  *	offset is stored.
5546  * Description:
5547  * Will read 4 bytes of data from the user given offset and return the
5548  * read data.
5549  * NOTE: Will allow to read only part of the EEPROM visible through the
5550  *   I2C bus.
5551  * Return value:
5552  *  -1 on failure and 0 on success.
5553  */
5554 
5555 #define S2IO_DEV_ID		5
5556 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5557 {
5558 	int ret = -1;
5559 	u32 exit_cnt = 0;
5560 	u64 val64;
5561 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5562 
5563 	if (sp->device_type == XFRAME_I_DEVICE) {
5564 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5565 			I2C_CONTROL_ADDR(off) |
5566 			I2C_CONTROL_BYTE_CNT(0x3) |
5567 			I2C_CONTROL_READ |
5568 			I2C_CONTROL_CNTL_START;
5569 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5570 
5571 		while (exit_cnt < 5) {
5572 			val64 = readq(&bar0->i2c_control);
5573 			if (I2C_CONTROL_CNTL_END(val64)) {
5574 				*data = I2C_CONTROL_GET_DATA(val64);
5575 				ret = 0;
5576 				break;
5577 			}
5578 			msleep(50);
5579 			exit_cnt++;
5580 		}
5581 	}
5582 
5583 	if (sp->device_type == XFRAME_II_DEVICE) {
5584 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5585 			SPI_CONTROL_BYTECNT(0x3) |
5586 			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5587 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5588 		val64 |= SPI_CONTROL_REQ;
5589 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5590 		while (exit_cnt < 5) {
5591 			val64 = readq(&bar0->spi_control);
5592 			if (val64 & SPI_CONTROL_NACK) {
5593 				ret = 1;
5594 				break;
5595 			} else if (val64 & SPI_CONTROL_DONE) {
5596 				*data = readq(&bar0->spi_data);
5597 				*data &= 0xffffff;
5598 				ret = 0;
5599 				break;
5600 			}
5601 			msleep(50);
5602 			exit_cnt++;
5603 		}
5604 	}
5605 	return ret;
5606 }
5607 
5608 /**
5609  *  write_eeprom - actually writes the relevant part of the data value.
5610  *  @sp : private member of the device structure, which is a pointer to the
5611  *       s2io_nic structure.
5612  *  @off : offset at which the data must be written
5613  *  @data : The data that is to be written
5614  *  @cnt : Number of bytes of the data that are actually to be written into
5615  *  the Eeprom. (max of 3)
5616  * Description:
5617  *  Actually writes the relevant part of the data value into the Eeprom
5618  *  through the I2C bus.
5619  * Return value:
5620  *  0 on success, -1 on failure.
5621  */
5622 
5623 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5624 {
5625 	int exit_cnt = 0, ret = -1;
5626 	u64 val64;
5627 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5628 
5629 	if (sp->device_type == XFRAME_I_DEVICE) {
5630 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5631 			I2C_CONTROL_ADDR(off) |
5632 			I2C_CONTROL_BYTE_CNT(cnt) |
5633 			I2C_CONTROL_SET_DATA((u32)data) |
5634 			I2C_CONTROL_CNTL_START;
5635 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5636 
5637 		while (exit_cnt < 5) {
5638 			val64 = readq(&bar0->i2c_control);
5639 			if (I2C_CONTROL_CNTL_END(val64)) {
5640 				if (!(val64 & I2C_CONTROL_NACK))
5641 					ret = 0;
5642 				break;
5643 			}
5644 			msleep(50);
5645 			exit_cnt++;
5646 		}
5647 	}
5648 
5649 	if (sp->device_type == XFRAME_II_DEVICE) {
5650 		int write_cnt = (cnt == 8) ? 0 : cnt;
5651 		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5652 
5653 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5654 			SPI_CONTROL_BYTECNT(write_cnt) |
5655 			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5656 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5657 		val64 |= SPI_CONTROL_REQ;
5658 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5659 		while (exit_cnt < 5) {
5660 			val64 = readq(&bar0->spi_control);
5661 			if (val64 & SPI_CONTROL_NACK) {
5662 				ret = 1;
5663 				break;
5664 			} else if (val64 & SPI_CONTROL_DONE) {
5665 				ret = 0;
5666 				break;
5667 			}
5668 			msleep(50);
5669 			exit_cnt++;
5670 		}
5671 	}
5672 	return ret;
5673 }
5674 static void s2io_vpd_read(struct s2io_nic *nic)
5675 {
5676 	u8 *vpd_data;
5677 	u8 data;
5678 	int i = 0, cnt, len, fail = 0;
5679 	int vpd_addr = 0x80;
5680 	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5681 
5682 	if (nic->device_type == XFRAME_II_DEVICE) {
5683 		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5684 		vpd_addr = 0x80;
5685 	} else {
5686 		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5687 		vpd_addr = 0x50;
5688 	}
5689 	strcpy(nic->serial_num, "NOT AVAILABLE");
5690 
5691 	vpd_data = kmalloc(256, GFP_KERNEL);
5692 	if (!vpd_data) {
5693 		swstats->mem_alloc_fail_cnt++;
5694 		return;
5695 	}
5696 	swstats->mem_allocated += 256;
5697 
5698 	for (i = 0; i < 256; i += 4) {
5699 		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5700 		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5701 		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5702 		for (cnt = 0; cnt < 5; cnt++) {
5703 			msleep(2);
5704 			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5705 			if (data == 0x80)
5706 				break;
5707 		}
5708 		if (cnt >= 5) {
5709 			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5710 			fail = 1;
5711 			break;
5712 		}
5713 		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5714 				      (u32 *)&vpd_data[i]);
5715 	}
5716 
5717 	if (!fail) {
5718 		/* read serial number of adapter */
5719 		for (cnt = 0; cnt < 252; cnt++) {
5720 			if ((vpd_data[cnt] == 'S') &&
5721 			    (vpd_data[cnt+1] == 'N')) {
5722 				len = vpd_data[cnt+2];
5723 				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5724 					memcpy(nic->serial_num,
5725 					       &vpd_data[cnt + 3],
5726 					       len);
5727 					memset(nic->serial_num+len,
5728 					       0,
5729 					       VPD_STRING_LEN-len);
5730 					break;
5731 				}
5732 			}
5733 		}
5734 	}
5735 
5736 	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5737 		len = vpd_data[1];
5738 		memcpy(nic->product_name, &vpd_data[3], len);
5739 		nic->product_name[len] = 0;
5740 	}
5741 	kfree(vpd_data);
5742 	swstats->mem_freed += 256;
5743 }
5744 
5745 /**
5746  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5747  *  @sp : private member of the device structure, which is a pointer to the
5748  *  s2io_nic structure.
5749  *  @eeprom : pointer to the user level structure provided by ethtool,
5750  *  containing all relevant information.
5751  *  @data_buf : user defined value to be written into Eeprom.
5752  *  Description: Reads the values stored in the Eeprom at given offset
5753  *  for a given length. Stores these values int the input argument data
5754  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5755  *  Return value:
5756  *  int  0 on success
5757  */
5758 
5759 static int s2io_ethtool_geeprom(struct net_device *dev,
5760 				struct ethtool_eeprom *eeprom, u8 * data_buf)
5761 {
5762 	u32 i, valid;
5763 	u64 data;
5764 	struct s2io_nic *sp = netdev_priv(dev);
5765 
5766 	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5767 
5768 	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5769 		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5770 
5771 	for (i = 0; i < eeprom->len; i += 4) {
5772 		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5773 			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5774 			return -EFAULT;
5775 		}
5776 		valid = INV(data);
5777 		memcpy((data_buf + i), &valid, 4);
5778 	}
5779 	return 0;
5780 }
5781 
5782 /**
5783  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5784  *  @sp : private member of the device structure, which is a pointer to the
5785  *  s2io_nic structure.
5786  *  @eeprom : pointer to the user level structure provided by ethtool,
5787  *  containing all relevant information.
5788  *  @data_buf ; user defined value to be written into Eeprom.
5789  *  Description:
5790  *  Tries to write the user provided value in the Eeprom, at the offset
5791  *  given by the user.
5792  *  Return value:
5793  *  0 on success, -EFAULT on failure.
5794  */
5795 
5796 static int s2io_ethtool_seeprom(struct net_device *dev,
5797 				struct ethtool_eeprom *eeprom,
5798 				u8 *data_buf)
5799 {
5800 	int len = eeprom->len, cnt = 0;
5801 	u64 valid = 0, data;
5802 	struct s2io_nic *sp = netdev_priv(dev);
5803 
5804 	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5805 		DBG_PRINT(ERR_DBG,
5806 			  "ETHTOOL_WRITE_EEPROM Err: "
5807 			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5808 			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5809 			  eeprom->magic);
5810 		return -EFAULT;
5811 	}
5812 
5813 	while (len) {
5814 		data = (u32)data_buf[cnt] & 0x000000FF;
5815 		if (data)
5816 			valid = (u32)(data << 24);
5817 		else
5818 			valid = data;
5819 
5820 		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5821 			DBG_PRINT(ERR_DBG,
5822 				  "ETHTOOL_WRITE_EEPROM Err: "
5823 				  "Cannot write into the specified offset\n");
5824 			return -EFAULT;
5825 		}
5826 		cnt++;
5827 		len--;
5828 	}
5829 
5830 	return 0;
5831 }
5832 
5833 /**
5834  * s2io_register_test - reads and writes into all clock domains.
5835  * @sp : private member of the device structure, which is a pointer to the
5836  * s2io_nic structure.
5837  * @data : variable that returns the result of each of the test conducted b
5838  * by the driver.
5839  * Description:
5840  * Read and write into all clock domains. The NIC has 3 clock domains,
5841  * see that registers in all the three regions are accessible.
5842  * Return value:
5843  * 0 on success.
5844  */
5845 
5846 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5847 {
5848 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5849 	u64 val64 = 0, exp_val;
5850 	int fail = 0;
5851 
5852 	val64 = readq(&bar0->pif_rd_swapper_fb);
5853 	if (val64 != 0x123456789abcdefULL) {
5854 		fail = 1;
5855 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5856 	}
5857 
5858 	val64 = readq(&bar0->rmac_pause_cfg);
5859 	if (val64 != 0xc000ffff00000000ULL) {
5860 		fail = 1;
5861 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5862 	}
5863 
5864 	val64 = readq(&bar0->rx_queue_cfg);
5865 	if (sp->device_type == XFRAME_II_DEVICE)
5866 		exp_val = 0x0404040404040404ULL;
5867 	else
5868 		exp_val = 0x0808080808080808ULL;
5869 	if (val64 != exp_val) {
5870 		fail = 1;
5871 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5872 	}
5873 
5874 	val64 = readq(&bar0->xgxs_efifo_cfg);
5875 	if (val64 != 0x000000001923141EULL) {
5876 		fail = 1;
5877 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5878 	}
5879 
5880 	val64 = 0x5A5A5A5A5A5A5A5AULL;
5881 	writeq(val64, &bar0->xmsi_data);
5882 	val64 = readq(&bar0->xmsi_data);
5883 	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5884 		fail = 1;
5885 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5886 	}
5887 
5888 	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5889 	writeq(val64, &bar0->xmsi_data);
5890 	val64 = readq(&bar0->xmsi_data);
5891 	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5892 		fail = 1;
5893 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5894 	}
5895 
5896 	*data = fail;
5897 	return fail;
5898 }
5899 
5900 /**
5901  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5902  * @sp : private member of the device structure, which is a pointer to the
5903  * s2io_nic structure.
5904  * @data:variable that returns the result of each of the test conducted by
5905  * the driver.
5906  * Description:
5907  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5908  * register.
5909  * Return value:
5910  * 0 on success.
5911  */
5912 
5913 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5914 {
5915 	int fail = 0;
5916 	u64 ret_data, org_4F0, org_7F0;
5917 	u8 saved_4F0 = 0, saved_7F0 = 0;
5918 	struct net_device *dev = sp->dev;
5919 
5920 	/* Test Write Error at offset 0 */
5921 	/* Note that SPI interface allows write access to all areas
5922 	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5923 	 */
5924 	if (sp->device_type == XFRAME_I_DEVICE)
5925 		if (!write_eeprom(sp, 0, 0, 3))
5926 			fail = 1;
5927 
5928 	/* Save current values at offsets 0x4F0 and 0x7F0 */
5929 	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5930 		saved_4F0 = 1;
5931 	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5932 		saved_7F0 = 1;
5933 
5934 	/* Test Write at offset 4f0 */
5935 	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5936 		fail = 1;
5937 	if (read_eeprom(sp, 0x4F0, &ret_data))
5938 		fail = 1;
5939 
5940 	if (ret_data != 0x012345) {
5941 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5942 			  "Data written %llx Data read %llx\n",
5943 			  dev->name, (unsigned long long)0x12345,
5944 			  (unsigned long long)ret_data);
5945 		fail = 1;
5946 	}
5947 
5948 	/* Reset the EEPROM data go FFFF */
5949 	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5950 
5951 	/* Test Write Request Error at offset 0x7c */
5952 	if (sp->device_type == XFRAME_I_DEVICE)
5953 		if (!write_eeprom(sp, 0x07C, 0, 3))
5954 			fail = 1;
5955 
5956 	/* Test Write Request at offset 0x7f0 */
5957 	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5958 		fail = 1;
5959 	if (read_eeprom(sp, 0x7F0, &ret_data))
5960 		fail = 1;
5961 
5962 	if (ret_data != 0x012345) {
5963 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5964 			  "Data written %llx Data read %llx\n",
5965 			  dev->name, (unsigned long long)0x12345,
5966 			  (unsigned long long)ret_data);
5967 		fail = 1;
5968 	}
5969 
5970 	/* Reset the EEPROM data go FFFF */
5971 	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5972 
5973 	if (sp->device_type == XFRAME_I_DEVICE) {
5974 		/* Test Write Error at offset 0x80 */
5975 		if (!write_eeprom(sp, 0x080, 0, 3))
5976 			fail = 1;
5977 
5978 		/* Test Write Error at offset 0xfc */
5979 		if (!write_eeprom(sp, 0x0FC, 0, 3))
5980 			fail = 1;
5981 
5982 		/* Test Write Error at offset 0x100 */
5983 		if (!write_eeprom(sp, 0x100, 0, 3))
5984 			fail = 1;
5985 
5986 		/* Test Write Error at offset 4ec */
5987 		if (!write_eeprom(sp, 0x4EC, 0, 3))
5988 			fail = 1;
5989 	}
5990 
5991 	/* Restore values at offsets 0x4F0 and 0x7F0 */
5992 	if (saved_4F0)
5993 		write_eeprom(sp, 0x4F0, org_4F0, 3);
5994 	if (saved_7F0)
5995 		write_eeprom(sp, 0x7F0, org_7F0, 3);
5996 
5997 	*data = fail;
5998 	return fail;
5999 }
6000 
6001 /**
6002  * s2io_bist_test - invokes the MemBist test of the card .
6003  * @sp : private member of the device structure, which is a pointer to the
6004  * s2io_nic structure.
6005  * @data:variable that returns the result of each of the test conducted by
6006  * the driver.
6007  * Description:
6008  * This invokes the MemBist test of the card. We give around
6009  * 2 secs time for the Test to complete. If it's still not complete
6010  * within this peiod, we consider that the test failed.
6011  * Return value:
6012  * 0 on success and -1 on failure.
6013  */
6014 
6015 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6016 {
6017 	u8 bist = 0;
6018 	int cnt = 0, ret = -1;
6019 
6020 	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6021 	bist |= PCI_BIST_START;
6022 	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6023 
6024 	while (cnt < 20) {
6025 		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6026 		if (!(bist & PCI_BIST_START)) {
6027 			*data = (bist & PCI_BIST_CODE_MASK);
6028 			ret = 0;
6029 			break;
6030 		}
6031 		msleep(100);
6032 		cnt++;
6033 	}
6034 
6035 	return ret;
6036 }
6037 
6038 /**
6039  * s2io_link_test - verifies the link state of the nic
6040  * @sp ; private member of the device structure, which is a pointer to the
6041  * s2io_nic structure.
6042  * @data: variable that returns the result of each of the test conducted by
6043  * the driver.
6044  * Description:
6045  * The function verifies the link state of the NIC and updates the input
6046  * argument 'data' appropriately.
6047  * Return value:
6048  * 0 on success.
6049  */
6050 
6051 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6052 {
6053 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6054 	u64 val64;
6055 
6056 	val64 = readq(&bar0->adapter_status);
6057 	if (!(LINK_IS_UP(val64)))
6058 		*data = 1;
6059 	else
6060 		*data = 0;
6061 
6062 	return *data;
6063 }
6064 
6065 /**
6066  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6067  * @sp: private member of the device structure, which is a pointer to the
6068  * s2io_nic structure.
6069  * @data: variable that returns the result of each of the test
6070  * conducted by the driver.
6071  * Description:
6072  *  This is one of the offline test that tests the read and write
6073  *  access to the RldRam chip on the NIC.
6074  * Return value:
6075  *  0 on success.
6076  */
6077 
6078 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6079 {
6080 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6081 	u64 val64;
6082 	int cnt, iteration = 0, test_fail = 0;
6083 
6084 	val64 = readq(&bar0->adapter_control);
6085 	val64 &= ~ADAPTER_ECC_EN;
6086 	writeq(val64, &bar0->adapter_control);
6087 
6088 	val64 = readq(&bar0->mc_rldram_test_ctrl);
6089 	val64 |= MC_RLDRAM_TEST_MODE;
6090 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6091 
6092 	val64 = readq(&bar0->mc_rldram_mrs);
6093 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6094 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6095 
6096 	val64 |= MC_RLDRAM_MRS_ENABLE;
6097 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6098 
6099 	while (iteration < 2) {
6100 		val64 = 0x55555555aaaa0000ULL;
6101 		if (iteration == 1)
6102 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6103 		writeq(val64, &bar0->mc_rldram_test_d0);
6104 
6105 		val64 = 0xaaaa5a5555550000ULL;
6106 		if (iteration == 1)
6107 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6108 		writeq(val64, &bar0->mc_rldram_test_d1);
6109 
6110 		val64 = 0x55aaaaaaaa5a0000ULL;
6111 		if (iteration == 1)
6112 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6113 		writeq(val64, &bar0->mc_rldram_test_d2);
6114 
6115 		val64 = (u64) (0x0000003ffffe0100ULL);
6116 		writeq(val64, &bar0->mc_rldram_test_add);
6117 
6118 		val64 = MC_RLDRAM_TEST_MODE |
6119 			MC_RLDRAM_TEST_WRITE |
6120 			MC_RLDRAM_TEST_GO;
6121 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6122 
6123 		for (cnt = 0; cnt < 5; cnt++) {
6124 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6125 			if (val64 & MC_RLDRAM_TEST_DONE)
6126 				break;
6127 			msleep(200);
6128 		}
6129 
6130 		if (cnt == 5)
6131 			break;
6132 
6133 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6134 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6135 
6136 		for (cnt = 0; cnt < 5; cnt++) {
6137 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6138 			if (val64 & MC_RLDRAM_TEST_DONE)
6139 				break;
6140 			msleep(500);
6141 		}
6142 
6143 		if (cnt == 5)
6144 			break;
6145 
6146 		val64 = readq(&bar0->mc_rldram_test_ctrl);
6147 		if (!(val64 & MC_RLDRAM_TEST_PASS))
6148 			test_fail = 1;
6149 
6150 		iteration++;
6151 	}
6152 
6153 	*data = test_fail;
6154 
6155 	/* Bring the adapter out of test mode */
6156 	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6157 
6158 	return test_fail;
6159 }
6160 
6161 /**
6162  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6163  *  @sp : private member of the device structure, which is a pointer to the
6164  *  s2io_nic structure.
6165  *  @ethtest : pointer to a ethtool command specific structure that will be
6166  *  returned to the user.
6167  *  @data : variable that returns the result of each of the test
6168  * conducted by the driver.
6169  * Description:
6170  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6171  *  the health of the card.
6172  * Return value:
6173  *  void
6174  */
6175 
6176 static void s2io_ethtool_test(struct net_device *dev,
6177 			      struct ethtool_test *ethtest,
6178 			      uint64_t *data)
6179 {
6180 	struct s2io_nic *sp = netdev_priv(dev);
6181 	int orig_state = netif_running(sp->dev);
6182 
6183 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6184 		/* Offline Tests. */
6185 		if (orig_state)
6186 			s2io_close(sp->dev);
6187 
6188 		if (s2io_register_test(sp, &data[0]))
6189 			ethtest->flags |= ETH_TEST_FL_FAILED;
6190 
6191 		s2io_reset(sp);
6192 
6193 		if (s2io_rldram_test(sp, &data[3]))
6194 			ethtest->flags |= ETH_TEST_FL_FAILED;
6195 
6196 		s2io_reset(sp);
6197 
6198 		if (s2io_eeprom_test(sp, &data[1]))
6199 			ethtest->flags |= ETH_TEST_FL_FAILED;
6200 
6201 		if (s2io_bist_test(sp, &data[4]))
6202 			ethtest->flags |= ETH_TEST_FL_FAILED;
6203 
6204 		if (orig_state)
6205 			s2io_open(sp->dev);
6206 
6207 		data[2] = 0;
6208 	} else {
6209 		/* Online Tests. */
6210 		if (!orig_state) {
6211 			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6212 				  dev->name);
6213 			data[0] = -1;
6214 			data[1] = -1;
6215 			data[2] = -1;
6216 			data[3] = -1;
6217 			data[4] = -1;
6218 		}
6219 
6220 		if (s2io_link_test(sp, &data[2]))
6221 			ethtest->flags |= ETH_TEST_FL_FAILED;
6222 
6223 		data[0] = 0;
6224 		data[1] = 0;
6225 		data[3] = 0;
6226 		data[4] = 0;
6227 	}
6228 }
6229 
6230 static void s2io_get_ethtool_stats(struct net_device *dev,
6231 				   struct ethtool_stats *estats,
6232 				   u64 *tmp_stats)
6233 {
6234 	int i = 0, k;
6235 	struct s2io_nic *sp = netdev_priv(dev);
6236 	struct stat_block *stats = sp->mac_control.stats_info;
6237 	struct swStat *swstats = &stats->sw_stat;
6238 	struct xpakStat *xstats = &stats->xpak_stat;
6239 
6240 	s2io_updt_stats(sp);
6241 	tmp_stats[i++] =
6242 		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6243 		le32_to_cpu(stats->tmac_frms);
6244 	tmp_stats[i++] =
6245 		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6246 		le32_to_cpu(stats->tmac_data_octets);
6247 	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6248 	tmp_stats[i++] =
6249 		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6250 		le32_to_cpu(stats->tmac_mcst_frms);
6251 	tmp_stats[i++] =
6252 		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6253 		le32_to_cpu(stats->tmac_bcst_frms);
6254 	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6255 	tmp_stats[i++] =
6256 		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6257 		le32_to_cpu(stats->tmac_ttl_octets);
6258 	tmp_stats[i++] =
6259 		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6260 		le32_to_cpu(stats->tmac_ucst_frms);
6261 	tmp_stats[i++] =
6262 		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6263 		le32_to_cpu(stats->tmac_nucst_frms);
6264 	tmp_stats[i++] =
6265 		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6266 		le32_to_cpu(stats->tmac_any_err_frms);
6267 	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6268 	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6269 	tmp_stats[i++] =
6270 		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6271 		le32_to_cpu(stats->tmac_vld_ip);
6272 	tmp_stats[i++] =
6273 		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6274 		le32_to_cpu(stats->tmac_drop_ip);
6275 	tmp_stats[i++] =
6276 		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6277 		le32_to_cpu(stats->tmac_icmp);
6278 	tmp_stats[i++] =
6279 		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6280 		le32_to_cpu(stats->tmac_rst_tcp);
6281 	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6282 	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6283 		le32_to_cpu(stats->tmac_udp);
6284 	tmp_stats[i++] =
6285 		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6286 		le32_to_cpu(stats->rmac_vld_frms);
6287 	tmp_stats[i++] =
6288 		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6289 		le32_to_cpu(stats->rmac_data_octets);
6290 	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6291 	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6292 	tmp_stats[i++] =
6293 		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6294 		le32_to_cpu(stats->rmac_vld_mcst_frms);
6295 	tmp_stats[i++] =
6296 		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6297 		le32_to_cpu(stats->rmac_vld_bcst_frms);
6298 	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6299 	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6300 	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6301 	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6302 	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6303 	tmp_stats[i++] =
6304 		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6305 		le32_to_cpu(stats->rmac_ttl_octets);
6306 	tmp_stats[i++] =
6307 		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6308 		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6309 	tmp_stats[i++] =
6310 		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6311 		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6312 	tmp_stats[i++] =
6313 		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6314 		le32_to_cpu(stats->rmac_discarded_frms);
6315 	tmp_stats[i++] =
6316 		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6317 		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6318 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6319 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6320 	tmp_stats[i++] =
6321 		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6322 		le32_to_cpu(stats->rmac_usized_frms);
6323 	tmp_stats[i++] =
6324 		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6325 		le32_to_cpu(stats->rmac_osized_frms);
6326 	tmp_stats[i++] =
6327 		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6328 		le32_to_cpu(stats->rmac_frag_frms);
6329 	tmp_stats[i++] =
6330 		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6331 		le32_to_cpu(stats->rmac_jabber_frms);
6332 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6333 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6334 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6335 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6336 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6337 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6338 	tmp_stats[i++] =
6339 		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6340 		le32_to_cpu(stats->rmac_ip);
6341 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6342 	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6343 	tmp_stats[i++] =
6344 		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6345 		le32_to_cpu(stats->rmac_drop_ip);
6346 	tmp_stats[i++] =
6347 		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6348 		le32_to_cpu(stats->rmac_icmp);
6349 	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6350 	tmp_stats[i++] =
6351 		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6352 		le32_to_cpu(stats->rmac_udp);
6353 	tmp_stats[i++] =
6354 		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6355 		le32_to_cpu(stats->rmac_err_drp_udp);
6356 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6357 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6358 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6359 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6360 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6361 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6362 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6363 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6364 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6365 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6366 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6367 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6368 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6369 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6370 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6371 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6372 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6373 	tmp_stats[i++] =
6374 		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6375 		le32_to_cpu(stats->rmac_pause_cnt);
6376 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6377 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6378 	tmp_stats[i++] =
6379 		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6380 		le32_to_cpu(stats->rmac_accepted_ip);
6381 	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6382 	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6383 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6384 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6385 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6386 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6387 	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6388 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6389 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6390 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6391 	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6392 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6393 	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6394 	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6395 	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6396 	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6397 	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6398 	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6399 	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6400 
6401 	/* Enhanced statistics exist only for Hercules */
6402 	if (sp->device_type == XFRAME_II_DEVICE) {
6403 		tmp_stats[i++] =
6404 			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6405 		tmp_stats[i++] =
6406 			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6407 		tmp_stats[i++] =
6408 			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6409 		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6410 		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6411 		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6412 		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6413 		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6414 		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6415 		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6416 		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6417 		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6418 		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6419 		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6420 		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6421 		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6422 	}
6423 
6424 	tmp_stats[i++] = 0;
6425 	tmp_stats[i++] = swstats->single_ecc_errs;
6426 	tmp_stats[i++] = swstats->double_ecc_errs;
6427 	tmp_stats[i++] = swstats->parity_err_cnt;
6428 	tmp_stats[i++] = swstats->serious_err_cnt;
6429 	tmp_stats[i++] = swstats->soft_reset_cnt;
6430 	tmp_stats[i++] = swstats->fifo_full_cnt;
6431 	for (k = 0; k < MAX_RX_RINGS; k++)
6432 		tmp_stats[i++] = swstats->ring_full_cnt[k];
6433 	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6434 	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6435 	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6436 	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6437 	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6438 	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6439 	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6440 	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6441 	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6442 	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6443 	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6444 	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6445 	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6446 	tmp_stats[i++] = swstats->sending_both;
6447 	tmp_stats[i++] = swstats->outof_sequence_pkts;
6448 	tmp_stats[i++] = swstats->flush_max_pkts;
6449 	if (swstats->num_aggregations) {
6450 		u64 tmp = swstats->sum_avg_pkts_aggregated;
6451 		int count = 0;
6452 		/*
6453 		 * Since 64-bit divide does not work on all platforms,
6454 		 * do repeated subtraction.
6455 		 */
6456 		while (tmp >= swstats->num_aggregations) {
6457 			tmp -= swstats->num_aggregations;
6458 			count++;
6459 		}
6460 		tmp_stats[i++] = count;
6461 	} else
6462 		tmp_stats[i++] = 0;
6463 	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6464 	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6465 	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6466 	tmp_stats[i++] = swstats->mem_allocated;
6467 	tmp_stats[i++] = swstats->mem_freed;
6468 	tmp_stats[i++] = swstats->link_up_cnt;
6469 	tmp_stats[i++] = swstats->link_down_cnt;
6470 	tmp_stats[i++] = swstats->link_up_time;
6471 	tmp_stats[i++] = swstats->link_down_time;
6472 
6473 	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6474 	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6475 	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6476 	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6477 	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6478 
6479 	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6480 	tmp_stats[i++] = swstats->rx_abort_cnt;
6481 	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6482 	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6483 	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6484 	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6485 	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6486 	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6487 	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6488 	tmp_stats[i++] = swstats->tda_err_cnt;
6489 	tmp_stats[i++] = swstats->pfc_err_cnt;
6490 	tmp_stats[i++] = swstats->pcc_err_cnt;
6491 	tmp_stats[i++] = swstats->tti_err_cnt;
6492 	tmp_stats[i++] = swstats->tpa_err_cnt;
6493 	tmp_stats[i++] = swstats->sm_err_cnt;
6494 	tmp_stats[i++] = swstats->lso_err_cnt;
6495 	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6496 	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6497 	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6498 	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6499 	tmp_stats[i++] = swstats->rc_err_cnt;
6500 	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6501 	tmp_stats[i++] = swstats->rpa_err_cnt;
6502 	tmp_stats[i++] = swstats->rda_err_cnt;
6503 	tmp_stats[i++] = swstats->rti_err_cnt;
6504 	tmp_stats[i++] = swstats->mc_err_cnt;
6505 }
6506 
6507 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6508 {
6509 	return XENA_REG_SPACE;
6510 }
6511 
6512 
6513 static int s2io_get_eeprom_len(struct net_device *dev)
6514 {
6515 	return XENA_EEPROM_SPACE;
6516 }
6517 
6518 static int s2io_get_sset_count(struct net_device *dev, int sset)
6519 {
6520 	struct s2io_nic *sp = netdev_priv(dev);
6521 
6522 	switch (sset) {
6523 	case ETH_SS_TEST:
6524 		return S2IO_TEST_LEN;
6525 	case ETH_SS_STATS:
6526 		switch (sp->device_type) {
6527 		case XFRAME_I_DEVICE:
6528 			return XFRAME_I_STAT_LEN;
6529 		case XFRAME_II_DEVICE:
6530 			return XFRAME_II_STAT_LEN;
6531 		default:
6532 			return 0;
6533 		}
6534 	default:
6535 		return -EOPNOTSUPP;
6536 	}
6537 }
6538 
6539 static void s2io_ethtool_get_strings(struct net_device *dev,
6540 				     u32 stringset, u8 *data)
6541 {
6542 	int stat_size = 0;
6543 	struct s2io_nic *sp = netdev_priv(dev);
6544 
6545 	switch (stringset) {
6546 	case ETH_SS_TEST:
6547 		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6548 		break;
6549 	case ETH_SS_STATS:
6550 		stat_size = sizeof(ethtool_xena_stats_keys);
6551 		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6552 		if (sp->device_type == XFRAME_II_DEVICE) {
6553 			memcpy(data + stat_size,
6554 			       &ethtool_enhanced_stats_keys,
6555 			       sizeof(ethtool_enhanced_stats_keys));
6556 			stat_size += sizeof(ethtool_enhanced_stats_keys);
6557 		}
6558 
6559 		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6560 		       sizeof(ethtool_driver_stats_keys));
6561 	}
6562 }
6563 
6564 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6565 {
6566 	struct s2io_nic *sp = netdev_priv(dev);
6567 	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6568 
6569 	if (changed && netif_running(dev)) {
6570 		int rc;
6571 
6572 		s2io_stop_all_tx_queue(sp);
6573 		s2io_card_down(sp);
6574 		dev->features = features;
6575 		rc = s2io_card_up(sp);
6576 		if (rc)
6577 			s2io_reset(sp);
6578 		else
6579 			s2io_start_all_tx_queue(sp);
6580 
6581 		return rc ? rc : 1;
6582 	}
6583 
6584 	return 0;
6585 }
6586 
6587 static const struct ethtool_ops netdev_ethtool_ops = {
6588 	.get_drvinfo = s2io_ethtool_gdrvinfo,
6589 	.get_regs_len = s2io_ethtool_get_regs_len,
6590 	.get_regs = s2io_ethtool_gregs,
6591 	.get_link = ethtool_op_get_link,
6592 	.get_eeprom_len = s2io_get_eeprom_len,
6593 	.get_eeprom = s2io_ethtool_geeprom,
6594 	.set_eeprom = s2io_ethtool_seeprom,
6595 	.get_ringparam = s2io_ethtool_gringparam,
6596 	.get_pauseparam = s2io_ethtool_getpause_data,
6597 	.set_pauseparam = s2io_ethtool_setpause_data,
6598 	.self_test = s2io_ethtool_test,
6599 	.get_strings = s2io_ethtool_get_strings,
6600 	.set_phys_id = s2io_ethtool_set_led,
6601 	.get_ethtool_stats = s2io_get_ethtool_stats,
6602 	.get_sset_count = s2io_get_sset_count,
6603 	.get_link_ksettings = s2io_ethtool_get_link_ksettings,
6604 	.set_link_ksettings = s2io_ethtool_set_link_ksettings,
6605 };
6606 
6607 /**
6608  *  s2io_ioctl - Entry point for the Ioctl
6609  *  @dev :  Device pointer.
6610  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6611  *  a proprietary structure used to pass information to the driver.
6612  *  @cmd :  This is used to distinguish between the different commands that
6613  *  can be passed to the IOCTL functions.
6614  *  Description:
6615  *  Currently there are no special functionality supported in IOCTL, hence
6616  *  function always return EOPNOTSUPPORTED
6617  */
6618 
6619 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6620 {
6621 	return -EOPNOTSUPP;
6622 }
6623 
6624 /**
6625  *  s2io_change_mtu - entry point to change MTU size for the device.
6626  *   @dev : device pointer.
6627  *   @new_mtu : the new MTU size for the device.
6628  *   Description: A driver entry point to change MTU size for the device.
6629  *   Before changing the MTU the device must be stopped.
6630  *  Return value:
6631  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6632  *   file on failure.
6633  */
6634 
6635 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6636 {
6637 	struct s2io_nic *sp = netdev_priv(dev);
6638 	int ret = 0;
6639 
6640 	dev->mtu = new_mtu;
6641 	if (netif_running(dev)) {
6642 		s2io_stop_all_tx_queue(sp);
6643 		s2io_card_down(sp);
6644 		ret = s2io_card_up(sp);
6645 		if (ret) {
6646 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6647 				  __func__);
6648 			return ret;
6649 		}
6650 		s2io_wake_all_tx_queue(sp);
6651 	} else { /* Device is down */
6652 		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6653 		u64 val64 = new_mtu;
6654 
6655 		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6656 	}
6657 
6658 	return ret;
6659 }
6660 
6661 /**
6662  * s2io_set_link - Set the LInk status
6663  * @data: long pointer to device private structue
6664  * Description: Sets the link status for the adapter
6665  */
6666 
6667 static void s2io_set_link(struct work_struct *work)
6668 {
6669 	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6670 					    set_link_task);
6671 	struct net_device *dev = nic->dev;
6672 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6673 	register u64 val64;
6674 	u16 subid;
6675 
6676 	rtnl_lock();
6677 
6678 	if (!netif_running(dev))
6679 		goto out_unlock;
6680 
6681 	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6682 		/* The card is being reset, no point doing anything */
6683 		goto out_unlock;
6684 	}
6685 
6686 	subid = nic->pdev->subsystem_device;
6687 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6688 		/*
6689 		 * Allow a small delay for the NICs self initiated
6690 		 * cleanup to complete.
6691 		 */
6692 		msleep(100);
6693 	}
6694 
6695 	val64 = readq(&bar0->adapter_status);
6696 	if (LINK_IS_UP(val64)) {
6697 		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6698 			if (verify_xena_quiescence(nic)) {
6699 				val64 = readq(&bar0->adapter_control);
6700 				val64 |= ADAPTER_CNTL_EN;
6701 				writeq(val64, &bar0->adapter_control);
6702 				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6703 					    nic->device_type, subid)) {
6704 					val64 = readq(&bar0->gpio_control);
6705 					val64 |= GPIO_CTRL_GPIO_0;
6706 					writeq(val64, &bar0->gpio_control);
6707 					val64 = readq(&bar0->gpio_control);
6708 				} else {
6709 					val64 |= ADAPTER_LED_ON;
6710 					writeq(val64, &bar0->adapter_control);
6711 				}
6712 				nic->device_enabled_once = true;
6713 			} else {
6714 				DBG_PRINT(ERR_DBG,
6715 					  "%s: Error: device is not Quiescent\n",
6716 					  dev->name);
6717 				s2io_stop_all_tx_queue(nic);
6718 			}
6719 		}
6720 		val64 = readq(&bar0->adapter_control);
6721 		val64 |= ADAPTER_LED_ON;
6722 		writeq(val64, &bar0->adapter_control);
6723 		s2io_link(nic, LINK_UP);
6724 	} else {
6725 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6726 						      subid)) {
6727 			val64 = readq(&bar0->gpio_control);
6728 			val64 &= ~GPIO_CTRL_GPIO_0;
6729 			writeq(val64, &bar0->gpio_control);
6730 			val64 = readq(&bar0->gpio_control);
6731 		}
6732 		/* turn off LED */
6733 		val64 = readq(&bar0->adapter_control);
6734 		val64 = val64 & (~ADAPTER_LED_ON);
6735 		writeq(val64, &bar0->adapter_control);
6736 		s2io_link(nic, LINK_DOWN);
6737 	}
6738 	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6739 
6740 out_unlock:
6741 	rtnl_unlock();
6742 }
6743 
6744 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6745 				  struct buffAdd *ba,
6746 				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6747 				  u64 *temp2, int size)
6748 {
6749 	struct net_device *dev = sp->dev;
6750 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6751 
6752 	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6753 		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6754 		/* allocate skb */
6755 		if (*skb) {
6756 			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6757 			/*
6758 			 * As Rx frame are not going to be processed,
6759 			 * using same mapped address for the Rxd
6760 			 * buffer pointer
6761 			 */
6762 			rxdp1->Buffer0_ptr = *temp0;
6763 		} else {
6764 			*skb = netdev_alloc_skb(dev, size);
6765 			if (!(*skb)) {
6766 				DBG_PRINT(INFO_DBG,
6767 					  "%s: Out of memory to allocate %s\n",
6768 					  dev->name, "1 buf mode SKBs");
6769 				stats->mem_alloc_fail_cnt++;
6770 				return -ENOMEM ;
6771 			}
6772 			stats->mem_allocated += (*skb)->truesize;
6773 			/* storing the mapped addr in a temp variable
6774 			 * such it will be used for next rxd whose
6775 			 * Host Control is NULL
6776 			 */
6777 			rxdp1->Buffer0_ptr = *temp0 =
6778 				pci_map_single(sp->pdev, (*skb)->data,
6779 					       size - NET_IP_ALIGN,
6780 					       PCI_DMA_FROMDEVICE);
6781 			if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6782 				goto memalloc_failed;
6783 			rxdp->Host_Control = (unsigned long) (*skb);
6784 		}
6785 	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6786 		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6787 		/* Two buffer Mode */
6788 		if (*skb) {
6789 			rxdp3->Buffer2_ptr = *temp2;
6790 			rxdp3->Buffer0_ptr = *temp0;
6791 			rxdp3->Buffer1_ptr = *temp1;
6792 		} else {
6793 			*skb = netdev_alloc_skb(dev, size);
6794 			if (!(*skb)) {
6795 				DBG_PRINT(INFO_DBG,
6796 					  "%s: Out of memory to allocate %s\n",
6797 					  dev->name,
6798 					  "2 buf mode SKBs");
6799 				stats->mem_alloc_fail_cnt++;
6800 				return -ENOMEM;
6801 			}
6802 			stats->mem_allocated += (*skb)->truesize;
6803 			rxdp3->Buffer2_ptr = *temp2 =
6804 				pci_map_single(sp->pdev, (*skb)->data,
6805 					       dev->mtu + 4,
6806 					       PCI_DMA_FROMDEVICE);
6807 			if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6808 				goto memalloc_failed;
6809 			rxdp3->Buffer0_ptr = *temp0 =
6810 				pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6811 					       PCI_DMA_FROMDEVICE);
6812 			if (pci_dma_mapping_error(sp->pdev,
6813 						  rxdp3->Buffer0_ptr)) {
6814 				pci_unmap_single(sp->pdev,
6815 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6816 						 dev->mtu + 4,
6817 						 PCI_DMA_FROMDEVICE);
6818 				goto memalloc_failed;
6819 			}
6820 			rxdp->Host_Control = (unsigned long) (*skb);
6821 
6822 			/* Buffer-1 will be dummy buffer not used */
6823 			rxdp3->Buffer1_ptr = *temp1 =
6824 				pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6825 					       PCI_DMA_FROMDEVICE);
6826 			if (pci_dma_mapping_error(sp->pdev,
6827 						  rxdp3->Buffer1_ptr)) {
6828 				pci_unmap_single(sp->pdev,
6829 						 (dma_addr_t)rxdp3->Buffer0_ptr,
6830 						 BUF0_LEN, PCI_DMA_FROMDEVICE);
6831 				pci_unmap_single(sp->pdev,
6832 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6833 						 dev->mtu + 4,
6834 						 PCI_DMA_FROMDEVICE);
6835 				goto memalloc_failed;
6836 			}
6837 		}
6838 	}
6839 	return 0;
6840 
6841 memalloc_failed:
6842 	stats->pci_map_fail_cnt++;
6843 	stats->mem_freed += (*skb)->truesize;
6844 	dev_kfree_skb(*skb);
6845 	return -ENOMEM;
6846 }
6847 
6848 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6849 				int size)
6850 {
6851 	struct net_device *dev = sp->dev;
6852 	if (sp->rxd_mode == RXD_MODE_1) {
6853 		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6854 	} else if (sp->rxd_mode == RXD_MODE_3B) {
6855 		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6856 		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6857 		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6858 	}
6859 }
6860 
6861 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6862 {
6863 	int i, j, k, blk_cnt = 0, size;
6864 	struct config_param *config = &sp->config;
6865 	struct mac_info *mac_control = &sp->mac_control;
6866 	struct net_device *dev = sp->dev;
6867 	struct RxD_t *rxdp = NULL;
6868 	struct sk_buff *skb = NULL;
6869 	struct buffAdd *ba = NULL;
6870 	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6871 
6872 	/* Calculate the size based on ring mode */
6873 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6874 		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6875 	if (sp->rxd_mode == RXD_MODE_1)
6876 		size += NET_IP_ALIGN;
6877 	else if (sp->rxd_mode == RXD_MODE_3B)
6878 		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6879 
6880 	for (i = 0; i < config->rx_ring_num; i++) {
6881 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6882 		struct ring_info *ring = &mac_control->rings[i];
6883 
6884 		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6885 
6886 		for (j = 0; j < blk_cnt; j++) {
6887 			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6888 				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6889 				if (sp->rxd_mode == RXD_MODE_3B)
6890 					ba = &ring->ba[j][k];
6891 				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6892 							   &temp0_64,
6893 							   &temp1_64,
6894 							   &temp2_64,
6895 							   size) == -ENOMEM) {
6896 					return 0;
6897 				}
6898 
6899 				set_rxd_buffer_size(sp, rxdp, size);
6900 				dma_wmb();
6901 				/* flip the Ownership bit to Hardware */
6902 				rxdp->Control_1 |= RXD_OWN_XENA;
6903 			}
6904 		}
6905 	}
6906 	return 0;
6907 
6908 }
6909 
6910 static int s2io_add_isr(struct s2io_nic *sp)
6911 {
6912 	int ret = 0;
6913 	struct net_device *dev = sp->dev;
6914 	int err = 0;
6915 
6916 	if (sp->config.intr_type == MSI_X)
6917 		ret = s2io_enable_msi_x(sp);
6918 	if (ret) {
6919 		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6920 		sp->config.intr_type = INTA;
6921 	}
6922 
6923 	/*
6924 	 * Store the values of the MSIX table in
6925 	 * the struct s2io_nic structure
6926 	 */
6927 	store_xmsi_data(sp);
6928 
6929 	/* After proper initialization of H/W, register ISR */
6930 	if (sp->config.intr_type == MSI_X) {
6931 		int i, msix_rx_cnt = 0;
6932 
6933 		for (i = 0; i < sp->num_entries; i++) {
6934 			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6935 				if (sp->s2io_entries[i].type ==
6936 				    MSIX_RING_TYPE) {
6937 					snprintf(sp->desc[i],
6938 						sizeof(sp->desc[i]),
6939 						"%s:MSI-X-%d-RX",
6940 						dev->name, i);
6941 					err = request_irq(sp->entries[i].vector,
6942 							  s2io_msix_ring_handle,
6943 							  0,
6944 							  sp->desc[i],
6945 							  sp->s2io_entries[i].arg);
6946 				} else if (sp->s2io_entries[i].type ==
6947 					   MSIX_ALARM_TYPE) {
6948 					snprintf(sp->desc[i],
6949 						sizeof(sp->desc[i]),
6950 						"%s:MSI-X-%d-TX",
6951 						dev->name, i);
6952 					err = request_irq(sp->entries[i].vector,
6953 							  s2io_msix_fifo_handle,
6954 							  0,
6955 							  sp->desc[i],
6956 							  sp->s2io_entries[i].arg);
6957 
6958 				}
6959 				/* if either data or addr is zero print it. */
6960 				if (!(sp->msix_info[i].addr &&
6961 				      sp->msix_info[i].data)) {
6962 					DBG_PRINT(ERR_DBG,
6963 						  "%s @Addr:0x%llx Data:0x%llx\n",
6964 						  sp->desc[i],
6965 						  (unsigned long long)
6966 						  sp->msix_info[i].addr,
6967 						  (unsigned long long)
6968 						  ntohl(sp->msix_info[i].data));
6969 				} else
6970 					msix_rx_cnt++;
6971 				if (err) {
6972 					remove_msix_isr(sp);
6973 
6974 					DBG_PRINT(ERR_DBG,
6975 						  "%s:MSI-X-%d registration "
6976 						  "failed\n", dev->name, i);
6977 
6978 					DBG_PRINT(ERR_DBG,
6979 						  "%s: Defaulting to INTA\n",
6980 						  dev->name);
6981 					sp->config.intr_type = INTA;
6982 					break;
6983 				}
6984 				sp->s2io_entries[i].in_use =
6985 					MSIX_REGISTERED_SUCCESS;
6986 			}
6987 		}
6988 		if (!err) {
6989 			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6990 			DBG_PRINT(INFO_DBG,
6991 				  "MSI-X-TX entries enabled through alarm vector\n");
6992 		}
6993 	}
6994 	if (sp->config.intr_type == INTA) {
6995 		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6996 				  sp->name, dev);
6997 		if (err) {
6998 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6999 				  dev->name);
7000 			return -1;
7001 		}
7002 	}
7003 	return 0;
7004 }
7005 
7006 static void s2io_rem_isr(struct s2io_nic *sp)
7007 {
7008 	if (sp->config.intr_type == MSI_X)
7009 		remove_msix_isr(sp);
7010 	else
7011 		remove_inta_isr(sp);
7012 }
7013 
7014 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7015 {
7016 	int cnt = 0;
7017 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7018 	register u64 val64 = 0;
7019 	struct config_param *config;
7020 	config = &sp->config;
7021 
7022 	if (!is_s2io_card_up(sp))
7023 		return;
7024 
7025 	del_timer_sync(&sp->alarm_timer);
7026 	/* If s2io_set_link task is executing, wait till it completes. */
7027 	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7028 		msleep(50);
7029 	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7030 
7031 	/* Disable napi */
7032 	if (sp->config.napi) {
7033 		int off = 0;
7034 		if (config->intr_type ==  MSI_X) {
7035 			for (; off < sp->config.rx_ring_num; off++)
7036 				napi_disable(&sp->mac_control.rings[off].napi);
7037 		}
7038 		else
7039 			napi_disable(&sp->napi);
7040 	}
7041 
7042 	/* disable Tx and Rx traffic on the NIC */
7043 	if (do_io)
7044 		stop_nic(sp);
7045 
7046 	s2io_rem_isr(sp);
7047 
7048 	/* stop the tx queue, indicate link down */
7049 	s2io_link(sp, LINK_DOWN);
7050 
7051 	/* Check if the device is Quiescent and then Reset the NIC */
7052 	while (do_io) {
7053 		/* As per the HW requirement we need to replenish the
7054 		 * receive buffer to avoid the ring bump. Since there is
7055 		 * no intention of processing the Rx frame at this pointwe are
7056 		 * just setting the ownership bit of rxd in Each Rx
7057 		 * ring to HW and set the appropriate buffer size
7058 		 * based on the ring mode
7059 		 */
7060 		rxd_owner_bit_reset(sp);
7061 
7062 		val64 = readq(&bar0->adapter_status);
7063 		if (verify_xena_quiescence(sp)) {
7064 			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7065 				break;
7066 		}
7067 
7068 		msleep(50);
7069 		cnt++;
7070 		if (cnt == 10) {
7071 			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7072 				  "adapter status reads 0x%llx\n",
7073 				  (unsigned long long)val64);
7074 			break;
7075 		}
7076 	}
7077 	if (do_io)
7078 		s2io_reset(sp);
7079 
7080 	/* Free all Tx buffers */
7081 	free_tx_buffers(sp);
7082 
7083 	/* Free all Rx buffers */
7084 	free_rx_buffers(sp);
7085 
7086 	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7087 }
7088 
7089 static void s2io_card_down(struct s2io_nic *sp)
7090 {
7091 	do_s2io_card_down(sp, 1);
7092 }
7093 
7094 static int s2io_card_up(struct s2io_nic *sp)
7095 {
7096 	int i, ret = 0;
7097 	struct config_param *config;
7098 	struct mac_info *mac_control;
7099 	struct net_device *dev = sp->dev;
7100 	u16 interruptible;
7101 
7102 	/* Initialize the H/W I/O registers */
7103 	ret = init_nic(sp);
7104 	if (ret != 0) {
7105 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7106 			  dev->name);
7107 		if (ret != -EIO)
7108 			s2io_reset(sp);
7109 		return ret;
7110 	}
7111 
7112 	/*
7113 	 * Initializing the Rx buffers. For now we are considering only 1
7114 	 * Rx ring and initializing buffers into 30 Rx blocks
7115 	 */
7116 	config = &sp->config;
7117 	mac_control = &sp->mac_control;
7118 
7119 	for (i = 0; i < config->rx_ring_num; i++) {
7120 		struct ring_info *ring = &mac_control->rings[i];
7121 
7122 		ring->mtu = dev->mtu;
7123 		ring->lro = !!(dev->features & NETIF_F_LRO);
7124 		ret = fill_rx_buffers(sp, ring, 1);
7125 		if (ret) {
7126 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7127 				  dev->name);
7128 			s2io_reset(sp);
7129 			free_rx_buffers(sp);
7130 			return -ENOMEM;
7131 		}
7132 		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7133 			  ring->rx_bufs_left);
7134 	}
7135 
7136 	/* Initialise napi */
7137 	if (config->napi) {
7138 		if (config->intr_type ==  MSI_X) {
7139 			for (i = 0; i < sp->config.rx_ring_num; i++)
7140 				napi_enable(&sp->mac_control.rings[i].napi);
7141 		} else {
7142 			napi_enable(&sp->napi);
7143 		}
7144 	}
7145 
7146 	/* Maintain the state prior to the open */
7147 	if (sp->promisc_flg)
7148 		sp->promisc_flg = 0;
7149 	if (sp->m_cast_flg) {
7150 		sp->m_cast_flg = 0;
7151 		sp->all_multi_pos = 0;
7152 	}
7153 
7154 	/* Setting its receive mode */
7155 	s2io_set_multicast(dev);
7156 
7157 	if (dev->features & NETIF_F_LRO) {
7158 		/* Initialize max aggregatable pkts per session based on MTU */
7159 		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7160 		/* Check if we can use (if specified) user provided value */
7161 		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7162 			sp->lro_max_aggr_per_sess = lro_max_pkts;
7163 	}
7164 
7165 	/* Enable Rx Traffic and interrupts on the NIC */
7166 	if (start_nic(sp)) {
7167 		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7168 		s2io_reset(sp);
7169 		free_rx_buffers(sp);
7170 		return -ENODEV;
7171 	}
7172 
7173 	/* Add interrupt service routine */
7174 	if (s2io_add_isr(sp) != 0) {
7175 		if (sp->config.intr_type == MSI_X)
7176 			s2io_rem_isr(sp);
7177 		s2io_reset(sp);
7178 		free_rx_buffers(sp);
7179 		return -ENODEV;
7180 	}
7181 
7182 	timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7183 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7184 
7185 	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7186 
7187 	/*  Enable select interrupts */
7188 	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7189 	if (sp->config.intr_type != INTA) {
7190 		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7191 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7192 	} else {
7193 		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7194 		interruptible |= TX_PIC_INTR;
7195 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7196 	}
7197 
7198 	return 0;
7199 }
7200 
7201 /**
7202  * s2io_restart_nic - Resets the NIC.
7203  * @data : long pointer to the device private structure
7204  * Description:
7205  * This function is scheduled to be run by the s2io_tx_watchdog
7206  * function after 0.5 secs to reset the NIC. The idea is to reduce
7207  * the run time of the watch dog routine which is run holding a
7208  * spin lock.
7209  */
7210 
7211 static void s2io_restart_nic(struct work_struct *work)
7212 {
7213 	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7214 	struct net_device *dev = sp->dev;
7215 
7216 	rtnl_lock();
7217 
7218 	if (!netif_running(dev))
7219 		goto out_unlock;
7220 
7221 	s2io_card_down(sp);
7222 	if (s2io_card_up(sp)) {
7223 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7224 	}
7225 	s2io_wake_all_tx_queue(sp);
7226 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7227 out_unlock:
7228 	rtnl_unlock();
7229 }
7230 
7231 /**
7232  *  s2io_tx_watchdog - Watchdog for transmit side.
7233  *  @dev : Pointer to net device structure
7234  *  Description:
7235  *  This function is triggered if the Tx Queue is stopped
7236  *  for a pre-defined amount of time when the Interface is still up.
7237  *  If the Interface is jammed in such a situation, the hardware is
7238  *  reset (by s2io_close) and restarted again (by s2io_open) to
7239  *  overcome any problem that might have been caused in the hardware.
7240  *  Return value:
7241  *  void
7242  */
7243 
7244 static void s2io_tx_watchdog(struct net_device *dev)
7245 {
7246 	struct s2io_nic *sp = netdev_priv(dev);
7247 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7248 
7249 	if (netif_carrier_ok(dev)) {
7250 		swstats->watchdog_timer_cnt++;
7251 		schedule_work(&sp->rst_timer_task);
7252 		swstats->soft_reset_cnt++;
7253 	}
7254 }
7255 
7256 /**
7257  *   rx_osm_handler - To perform some OS related operations on SKB.
7258  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7259  *   @skb : the socket buffer pointer.
7260  *   @len : length of the packet
7261  *   @cksum : FCS checksum of the frame.
7262  *   @ring_no : the ring from which this RxD was extracted.
7263  *   Description:
7264  *   This function is called by the Rx interrupt serivce routine to perform
7265  *   some OS related operations on the SKB before passing it to the upper
7266  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7267  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7268  *   to the upper layer. If the checksum is wrong, it increments the Rx
7269  *   packet error count, frees the SKB and returns error.
7270  *   Return value:
7271  *   SUCCESS on success and -1 on failure.
7272  */
7273 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7274 {
7275 	struct s2io_nic *sp = ring_data->nic;
7276 	struct net_device *dev = ring_data->dev;
7277 	struct sk_buff *skb = (struct sk_buff *)
7278 		((unsigned long)rxdp->Host_Control);
7279 	int ring_no = ring_data->ring_no;
7280 	u16 l3_csum, l4_csum;
7281 	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7282 	struct lro *uninitialized_var(lro);
7283 	u8 err_mask;
7284 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7285 
7286 	skb->dev = dev;
7287 
7288 	if (err) {
7289 		/* Check for parity error */
7290 		if (err & 0x1)
7291 			swstats->parity_err_cnt++;
7292 
7293 		err_mask = err >> 48;
7294 		switch (err_mask) {
7295 		case 1:
7296 			swstats->rx_parity_err_cnt++;
7297 			break;
7298 
7299 		case 2:
7300 			swstats->rx_abort_cnt++;
7301 			break;
7302 
7303 		case 3:
7304 			swstats->rx_parity_abort_cnt++;
7305 			break;
7306 
7307 		case 4:
7308 			swstats->rx_rda_fail_cnt++;
7309 			break;
7310 
7311 		case 5:
7312 			swstats->rx_unkn_prot_cnt++;
7313 			break;
7314 
7315 		case 6:
7316 			swstats->rx_fcs_err_cnt++;
7317 			break;
7318 
7319 		case 7:
7320 			swstats->rx_buf_size_err_cnt++;
7321 			break;
7322 
7323 		case 8:
7324 			swstats->rx_rxd_corrupt_cnt++;
7325 			break;
7326 
7327 		case 15:
7328 			swstats->rx_unkn_err_cnt++;
7329 			break;
7330 		}
7331 		/*
7332 		 * Drop the packet if bad transfer code. Exception being
7333 		 * 0x5, which could be due to unsupported IPv6 extension header.
7334 		 * In this case, we let stack handle the packet.
7335 		 * Note that in this case, since checksum will be incorrect,
7336 		 * stack will validate the same.
7337 		 */
7338 		if (err_mask != 0x5) {
7339 			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7340 				  dev->name, err_mask);
7341 			dev->stats.rx_crc_errors++;
7342 			swstats->mem_freed
7343 				+= skb->truesize;
7344 			dev_kfree_skb(skb);
7345 			ring_data->rx_bufs_left -= 1;
7346 			rxdp->Host_Control = 0;
7347 			return 0;
7348 		}
7349 	}
7350 
7351 	rxdp->Host_Control = 0;
7352 	if (sp->rxd_mode == RXD_MODE_1) {
7353 		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7354 
7355 		skb_put(skb, len);
7356 	} else if (sp->rxd_mode == RXD_MODE_3B) {
7357 		int get_block = ring_data->rx_curr_get_info.block_index;
7358 		int get_off = ring_data->rx_curr_get_info.offset;
7359 		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7360 		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7361 		unsigned char *buff = skb_push(skb, buf0_len);
7362 
7363 		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7364 		memcpy(buff, ba->ba_0, buf0_len);
7365 		skb_put(skb, buf2_len);
7366 	}
7367 
7368 	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7369 	    ((!ring_data->lro) ||
7370 	     (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7371 	    (dev->features & NETIF_F_RXCSUM)) {
7372 		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7373 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7374 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7375 			/*
7376 			 * NIC verifies if the Checksum of the received
7377 			 * frame is Ok or not and accordingly returns
7378 			 * a flag in the RxD.
7379 			 */
7380 			skb->ip_summed = CHECKSUM_UNNECESSARY;
7381 			if (ring_data->lro) {
7382 				u32 tcp_len = 0;
7383 				u8 *tcp;
7384 				int ret = 0;
7385 
7386 				ret = s2io_club_tcp_session(ring_data,
7387 							    skb->data, &tcp,
7388 							    &tcp_len, &lro,
7389 							    rxdp, sp);
7390 				switch (ret) {
7391 				case 3: /* Begin anew */
7392 					lro->parent = skb;
7393 					goto aggregate;
7394 				case 1: /* Aggregate */
7395 					lro_append_pkt(sp, lro, skb, tcp_len);
7396 					goto aggregate;
7397 				case 4: /* Flush session */
7398 					lro_append_pkt(sp, lro, skb, tcp_len);
7399 					queue_rx_frame(lro->parent,
7400 						       lro->vlan_tag);
7401 					clear_lro_session(lro);
7402 					swstats->flush_max_pkts++;
7403 					goto aggregate;
7404 				case 2: /* Flush both */
7405 					lro->parent->data_len = lro->frags_len;
7406 					swstats->sending_both++;
7407 					queue_rx_frame(lro->parent,
7408 						       lro->vlan_tag);
7409 					clear_lro_session(lro);
7410 					goto send_up;
7411 				case 0: /* sessions exceeded */
7412 				case -1: /* non-TCP or not L2 aggregatable */
7413 				case 5: /*
7414 					 * First pkt in session not
7415 					 * L3/L4 aggregatable
7416 					 */
7417 					break;
7418 				default:
7419 					DBG_PRINT(ERR_DBG,
7420 						  "%s: Samadhana!!\n",
7421 						  __func__);
7422 					BUG();
7423 				}
7424 			}
7425 		} else {
7426 			/*
7427 			 * Packet with erroneous checksum, let the
7428 			 * upper layers deal with it.
7429 			 */
7430 			skb_checksum_none_assert(skb);
7431 		}
7432 	} else
7433 		skb_checksum_none_assert(skb);
7434 
7435 	swstats->mem_freed += skb->truesize;
7436 send_up:
7437 	skb_record_rx_queue(skb, ring_no);
7438 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7439 aggregate:
7440 	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7441 	return SUCCESS;
7442 }
7443 
7444 /**
7445  *  s2io_link - stops/starts the Tx queue.
7446  *  @sp : private member of the device structure, which is a pointer to the
7447  *  s2io_nic structure.
7448  *  @link : inidicates whether link is UP/DOWN.
7449  *  Description:
7450  *  This function stops/starts the Tx queue depending on whether the link
7451  *  status of the NIC is is down or up. This is called by the Alarm
7452  *  interrupt handler whenever a link change interrupt comes up.
7453  *  Return value:
7454  *  void.
7455  */
7456 
7457 static void s2io_link(struct s2io_nic *sp, int link)
7458 {
7459 	struct net_device *dev = sp->dev;
7460 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7461 
7462 	if (link != sp->last_link_state) {
7463 		init_tti(sp, link);
7464 		if (link == LINK_DOWN) {
7465 			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7466 			s2io_stop_all_tx_queue(sp);
7467 			netif_carrier_off(dev);
7468 			if (swstats->link_up_cnt)
7469 				swstats->link_up_time =
7470 					jiffies - sp->start_time;
7471 			swstats->link_down_cnt++;
7472 		} else {
7473 			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7474 			if (swstats->link_down_cnt)
7475 				swstats->link_down_time =
7476 					jiffies - sp->start_time;
7477 			swstats->link_up_cnt++;
7478 			netif_carrier_on(dev);
7479 			s2io_wake_all_tx_queue(sp);
7480 		}
7481 	}
7482 	sp->last_link_state = link;
7483 	sp->start_time = jiffies;
7484 }
7485 
7486 /**
7487  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7488  *  @sp : private member of the device structure, which is a pointer to the
7489  *  s2io_nic structure.
7490  *  Description:
7491  *  This function initializes a few of the PCI and PCI-X configuration registers
7492  *  with recommended values.
7493  *  Return value:
7494  *  void
7495  */
7496 
7497 static void s2io_init_pci(struct s2io_nic *sp)
7498 {
7499 	u16 pci_cmd = 0, pcix_cmd = 0;
7500 
7501 	/* Enable Data Parity Error Recovery in PCI-X command register. */
7502 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7503 			     &(pcix_cmd));
7504 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7505 			      (pcix_cmd | 1));
7506 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7507 			     &(pcix_cmd));
7508 
7509 	/* Set the PErr Response bit in PCI command register. */
7510 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7511 	pci_write_config_word(sp->pdev, PCI_COMMAND,
7512 			      (pci_cmd | PCI_COMMAND_PARITY));
7513 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7514 }
7515 
7516 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7517 			    u8 *dev_multiq)
7518 {
7519 	int i;
7520 
7521 	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7522 		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7523 			  "(%d) not supported\n", tx_fifo_num);
7524 
7525 		if (tx_fifo_num < 1)
7526 			tx_fifo_num = 1;
7527 		else
7528 			tx_fifo_num = MAX_TX_FIFOS;
7529 
7530 		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7531 	}
7532 
7533 	if (multiq)
7534 		*dev_multiq = multiq;
7535 
7536 	if (tx_steering_type && (1 == tx_fifo_num)) {
7537 		if (tx_steering_type != TX_DEFAULT_STEERING)
7538 			DBG_PRINT(ERR_DBG,
7539 				  "Tx steering is not supported with "
7540 				  "one fifo. Disabling Tx steering.\n");
7541 		tx_steering_type = NO_STEERING;
7542 	}
7543 
7544 	if ((tx_steering_type < NO_STEERING) ||
7545 	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7546 		DBG_PRINT(ERR_DBG,
7547 			  "Requested transmit steering not supported\n");
7548 		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7549 		tx_steering_type = NO_STEERING;
7550 	}
7551 
7552 	if (rx_ring_num > MAX_RX_RINGS) {
7553 		DBG_PRINT(ERR_DBG,
7554 			  "Requested number of rx rings not supported\n");
7555 		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7556 			  MAX_RX_RINGS);
7557 		rx_ring_num = MAX_RX_RINGS;
7558 	}
7559 
7560 	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7561 		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7562 			  "Defaulting to INTA\n");
7563 		*dev_intr_type = INTA;
7564 	}
7565 
7566 	if ((*dev_intr_type == MSI_X) &&
7567 	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7568 	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7569 		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7570 			  "Defaulting to INTA\n");
7571 		*dev_intr_type = INTA;
7572 	}
7573 
7574 	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7575 		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7576 		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7577 		rx_ring_mode = 1;
7578 	}
7579 
7580 	for (i = 0; i < MAX_RX_RINGS; i++)
7581 		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7582 			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7583 				  "supported\nDefaulting to %d\n",
7584 				  MAX_RX_BLOCKS_PER_RING);
7585 			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7586 		}
7587 
7588 	return SUCCESS;
7589 }
7590 
7591 /**
7592  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7593  * or Traffic class respectively.
7594  * @nic: device private variable
7595  * Description: The function configures the receive steering to
7596  * desired receive ring.
7597  * Return Value:  SUCCESS on success and
7598  * '-1' on failure (endian settings incorrect).
7599  */
7600 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7601 {
7602 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7603 	register u64 val64 = 0;
7604 
7605 	if (ds_codepoint > 63)
7606 		return FAILURE;
7607 
7608 	val64 = RTS_DS_MEM_DATA(ring);
7609 	writeq(val64, &bar0->rts_ds_mem_data);
7610 
7611 	val64 = RTS_DS_MEM_CTRL_WE |
7612 		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7613 		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7614 
7615 	writeq(val64, &bar0->rts_ds_mem_ctrl);
7616 
7617 	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7618 				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7619 				     S2IO_BIT_RESET);
7620 }
7621 
7622 static const struct net_device_ops s2io_netdev_ops = {
7623 	.ndo_open	        = s2io_open,
7624 	.ndo_stop	        = s2io_close,
7625 	.ndo_get_stats	        = s2io_get_stats,
7626 	.ndo_start_xmit    	= s2io_xmit,
7627 	.ndo_validate_addr	= eth_validate_addr,
7628 	.ndo_set_rx_mode	= s2io_set_multicast,
7629 	.ndo_do_ioctl	   	= s2io_ioctl,
7630 	.ndo_set_mac_address    = s2io_set_mac_addr,
7631 	.ndo_change_mtu	   	= s2io_change_mtu,
7632 	.ndo_set_features	= s2io_set_features,
7633 	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7634 #ifdef CONFIG_NET_POLL_CONTROLLER
7635 	.ndo_poll_controller    = s2io_netpoll,
7636 #endif
7637 };
7638 
7639 /**
7640  *  s2io_init_nic - Initialization of the adapter .
7641  *  @pdev : structure containing the PCI related information of the device.
7642  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7643  *  Description:
7644  *  The function initializes an adapter identified by the pci_dec structure.
7645  *  All OS related initialization including memory and device structure and
7646  *  initlaization of the device private variable is done. Also the swapper
7647  *  control register is initialized to enable read and write into the I/O
7648  *  registers of the device.
7649  *  Return value:
7650  *  returns 0 on success and negative on failure.
7651  */
7652 
7653 static int
7654 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7655 {
7656 	struct s2io_nic *sp;
7657 	struct net_device *dev;
7658 	int i, j, ret;
7659 	int dma_flag = false;
7660 	u32 mac_up, mac_down;
7661 	u64 val64 = 0, tmp64 = 0;
7662 	struct XENA_dev_config __iomem *bar0 = NULL;
7663 	u16 subid;
7664 	struct config_param *config;
7665 	struct mac_info *mac_control;
7666 	int mode;
7667 	u8 dev_intr_type = intr_type;
7668 	u8 dev_multiq = 0;
7669 
7670 	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7671 	if (ret)
7672 		return ret;
7673 
7674 	ret = pci_enable_device(pdev);
7675 	if (ret) {
7676 		DBG_PRINT(ERR_DBG,
7677 			  "%s: pci_enable_device failed\n", __func__);
7678 		return ret;
7679 	}
7680 
7681 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7682 		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7683 		dma_flag = true;
7684 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7685 			DBG_PRINT(ERR_DBG,
7686 				  "Unable to obtain 64bit DMA "
7687 				  "for consistent allocations\n");
7688 			pci_disable_device(pdev);
7689 			return -ENOMEM;
7690 		}
7691 	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7692 		DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7693 	} else {
7694 		pci_disable_device(pdev);
7695 		return -ENOMEM;
7696 	}
7697 	ret = pci_request_regions(pdev, s2io_driver_name);
7698 	if (ret) {
7699 		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7700 			  __func__, ret);
7701 		pci_disable_device(pdev);
7702 		return -ENODEV;
7703 	}
7704 	if (dev_multiq)
7705 		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7706 	else
7707 		dev = alloc_etherdev(sizeof(struct s2io_nic));
7708 	if (dev == NULL) {
7709 		pci_disable_device(pdev);
7710 		pci_release_regions(pdev);
7711 		return -ENODEV;
7712 	}
7713 
7714 	pci_set_master(pdev);
7715 	pci_set_drvdata(pdev, dev);
7716 	SET_NETDEV_DEV(dev, &pdev->dev);
7717 
7718 	/*  Private member variable initialized to s2io NIC structure */
7719 	sp = netdev_priv(dev);
7720 	sp->dev = dev;
7721 	sp->pdev = pdev;
7722 	sp->high_dma_flag = dma_flag;
7723 	sp->device_enabled_once = false;
7724 	if (rx_ring_mode == 1)
7725 		sp->rxd_mode = RXD_MODE_1;
7726 	if (rx_ring_mode == 2)
7727 		sp->rxd_mode = RXD_MODE_3B;
7728 
7729 	sp->config.intr_type = dev_intr_type;
7730 
7731 	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7732 	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7733 		sp->device_type = XFRAME_II_DEVICE;
7734 	else
7735 		sp->device_type = XFRAME_I_DEVICE;
7736 
7737 
7738 	/* Initialize some PCI/PCI-X fields of the NIC. */
7739 	s2io_init_pci(sp);
7740 
7741 	/*
7742 	 * Setting the device configuration parameters.
7743 	 * Most of these parameters can be specified by the user during
7744 	 * module insertion as they are module loadable parameters. If
7745 	 * these parameters are not not specified during load time, they
7746 	 * are initialized with default values.
7747 	 */
7748 	config = &sp->config;
7749 	mac_control = &sp->mac_control;
7750 
7751 	config->napi = napi;
7752 	config->tx_steering_type = tx_steering_type;
7753 
7754 	/* Tx side parameters. */
7755 	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7756 		config->tx_fifo_num = MAX_TX_FIFOS;
7757 	else
7758 		config->tx_fifo_num = tx_fifo_num;
7759 
7760 	/* Initialize the fifos used for tx steering */
7761 	if (config->tx_fifo_num < 5) {
7762 		if (config->tx_fifo_num  == 1)
7763 			sp->total_tcp_fifos = 1;
7764 		else
7765 			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7766 		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7767 		sp->total_udp_fifos = 1;
7768 		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7769 	} else {
7770 		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7771 				       FIFO_OTHER_MAX_NUM);
7772 		sp->udp_fifo_idx = sp->total_tcp_fifos;
7773 		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7774 		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7775 	}
7776 
7777 	config->multiq = dev_multiq;
7778 	for (i = 0; i < config->tx_fifo_num; i++) {
7779 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7780 
7781 		tx_cfg->fifo_len = tx_fifo_len[i];
7782 		tx_cfg->fifo_priority = i;
7783 	}
7784 
7785 	/* mapping the QoS priority to the configured fifos */
7786 	for (i = 0; i < MAX_TX_FIFOS; i++)
7787 		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7788 
7789 	/* map the hashing selector table to the configured fifos */
7790 	for (i = 0; i < config->tx_fifo_num; i++)
7791 		sp->fifo_selector[i] = fifo_selector[i];
7792 
7793 
7794 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7795 	for (i = 0; i < config->tx_fifo_num; i++) {
7796 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7797 
7798 		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7799 		if (tx_cfg->fifo_len < 65) {
7800 			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7801 			break;
7802 		}
7803 	}
7804 	/* + 2 because one Txd for skb->data and one Txd for UFO */
7805 	config->max_txds = MAX_SKB_FRAGS + 2;
7806 
7807 	/* Rx side parameters. */
7808 	config->rx_ring_num = rx_ring_num;
7809 	for (i = 0; i < config->rx_ring_num; i++) {
7810 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7811 		struct ring_info *ring = &mac_control->rings[i];
7812 
7813 		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7814 		rx_cfg->ring_priority = i;
7815 		ring->rx_bufs_left = 0;
7816 		ring->rxd_mode = sp->rxd_mode;
7817 		ring->rxd_count = rxd_count[sp->rxd_mode];
7818 		ring->pdev = sp->pdev;
7819 		ring->dev = sp->dev;
7820 	}
7821 
7822 	for (i = 0; i < rx_ring_num; i++) {
7823 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7824 
7825 		rx_cfg->ring_org = RING_ORG_BUFF1;
7826 		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7827 	}
7828 
7829 	/*  Setting Mac Control parameters */
7830 	mac_control->rmac_pause_time = rmac_pause_time;
7831 	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7832 	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7833 
7834 
7835 	/*  initialize the shared memory used by the NIC and the host */
7836 	if (init_shared_mem(sp)) {
7837 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7838 		ret = -ENOMEM;
7839 		goto mem_alloc_failed;
7840 	}
7841 
7842 	sp->bar0 = pci_ioremap_bar(pdev, 0);
7843 	if (!sp->bar0) {
7844 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7845 			  dev->name);
7846 		ret = -ENOMEM;
7847 		goto bar0_remap_failed;
7848 	}
7849 
7850 	sp->bar1 = pci_ioremap_bar(pdev, 2);
7851 	if (!sp->bar1) {
7852 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7853 			  dev->name);
7854 		ret = -ENOMEM;
7855 		goto bar1_remap_failed;
7856 	}
7857 
7858 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7859 	for (j = 0; j < MAX_TX_FIFOS; j++) {
7860 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7861 	}
7862 
7863 	/*  Driver entry points */
7864 	dev->netdev_ops = &s2io_netdev_ops;
7865 	dev->ethtool_ops = &netdev_ethtool_ops;
7866 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7867 		NETIF_F_TSO | NETIF_F_TSO6 |
7868 		NETIF_F_RXCSUM | NETIF_F_LRO;
7869 	dev->features |= dev->hw_features |
7870 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7871 	if (sp->high_dma_flag == true)
7872 		dev->features |= NETIF_F_HIGHDMA;
7873 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7874 	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7875 	INIT_WORK(&sp->set_link_task, s2io_set_link);
7876 
7877 	pci_save_state(sp->pdev);
7878 
7879 	/* Setting swapper control on the NIC, for proper reset operation */
7880 	if (s2io_set_swapper(sp)) {
7881 		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7882 			  dev->name);
7883 		ret = -EAGAIN;
7884 		goto set_swap_failed;
7885 	}
7886 
7887 	/* Verify if the Herc works on the slot its placed into */
7888 	if (sp->device_type & XFRAME_II_DEVICE) {
7889 		mode = s2io_verify_pci_mode(sp);
7890 		if (mode < 0) {
7891 			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7892 				  __func__);
7893 			ret = -EBADSLT;
7894 			goto set_swap_failed;
7895 		}
7896 	}
7897 
7898 	if (sp->config.intr_type == MSI_X) {
7899 		sp->num_entries = config->rx_ring_num + 1;
7900 		ret = s2io_enable_msi_x(sp);
7901 
7902 		if (!ret) {
7903 			ret = s2io_test_msi(sp);
7904 			/* rollback MSI-X, will re-enable during add_isr() */
7905 			remove_msix_isr(sp);
7906 		}
7907 		if (ret) {
7908 
7909 			DBG_PRINT(ERR_DBG,
7910 				  "MSI-X requested but failed to enable\n");
7911 			sp->config.intr_type = INTA;
7912 		}
7913 	}
7914 
7915 	if (config->intr_type ==  MSI_X) {
7916 		for (i = 0; i < config->rx_ring_num ; i++) {
7917 			struct ring_info *ring = &mac_control->rings[i];
7918 
7919 			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7920 		}
7921 	} else {
7922 		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7923 	}
7924 
7925 	/* Not needed for Herc */
7926 	if (sp->device_type & XFRAME_I_DEVICE) {
7927 		/*
7928 		 * Fix for all "FFs" MAC address problems observed on
7929 		 * Alpha platforms
7930 		 */
7931 		fix_mac_address(sp);
7932 		s2io_reset(sp);
7933 	}
7934 
7935 	/*
7936 	 * MAC address initialization.
7937 	 * For now only one mac address will be read and used.
7938 	 */
7939 	bar0 = sp->bar0;
7940 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7941 		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7942 	writeq(val64, &bar0->rmac_addr_cmd_mem);
7943 	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7944 			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7945 			      S2IO_BIT_RESET);
7946 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
7947 	mac_down = (u32)tmp64;
7948 	mac_up = (u32) (tmp64 >> 32);
7949 
7950 	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7951 	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7952 	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7953 	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7954 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7955 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7956 
7957 	/*  Set the factory defined MAC address initially   */
7958 	dev->addr_len = ETH_ALEN;
7959 	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7960 
7961 	/* initialize number of multicast & unicast MAC entries variables */
7962 	if (sp->device_type == XFRAME_I_DEVICE) {
7963 		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7964 		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7965 		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7966 	} else if (sp->device_type == XFRAME_II_DEVICE) {
7967 		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7968 		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7969 		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7970 	}
7971 
7972 	/* MTU range: 46 - 9600 */
7973 	dev->min_mtu = MIN_MTU;
7974 	dev->max_mtu = S2IO_JUMBO_SIZE;
7975 
7976 	/* store mac addresses from CAM to s2io_nic structure */
7977 	do_s2io_store_unicast_mc(sp);
7978 
7979 	/* Configure MSIX vector for number of rings configured plus one */
7980 	if ((sp->device_type == XFRAME_II_DEVICE) &&
7981 	    (config->intr_type == MSI_X))
7982 		sp->num_entries = config->rx_ring_num + 1;
7983 
7984 	/* Store the values of the MSIX table in the s2io_nic structure */
7985 	store_xmsi_data(sp);
7986 	/* reset Nic and bring it to known state */
7987 	s2io_reset(sp);
7988 
7989 	/*
7990 	 * Initialize link state flags
7991 	 * and the card state parameter
7992 	 */
7993 	sp->state = 0;
7994 
7995 	/* Initialize spinlocks */
7996 	for (i = 0; i < sp->config.tx_fifo_num; i++) {
7997 		struct fifo_info *fifo = &mac_control->fifos[i];
7998 
7999 		spin_lock_init(&fifo->tx_lock);
8000 	}
8001 
8002 	/*
8003 	 * SXE-002: Configure link and activity LED to init state
8004 	 * on driver load.
8005 	 */
8006 	subid = sp->pdev->subsystem_device;
8007 	if ((subid & 0xFF) >= 0x07) {
8008 		val64 = readq(&bar0->gpio_control);
8009 		val64 |= 0x0000800000000000ULL;
8010 		writeq(val64, &bar0->gpio_control);
8011 		val64 = 0x0411040400000000ULL;
8012 		writeq(val64, (void __iomem *)bar0 + 0x2700);
8013 		val64 = readq(&bar0->gpio_control);
8014 	}
8015 
8016 	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8017 
8018 	if (register_netdev(dev)) {
8019 		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8020 		ret = -ENODEV;
8021 		goto register_failed;
8022 	}
8023 	s2io_vpd_read(sp);
8024 	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8025 	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8026 		  sp->product_name, pdev->revision);
8027 	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8028 		  s2io_driver_version);
8029 	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8030 	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8031 	if (sp->device_type & XFRAME_II_DEVICE) {
8032 		mode = s2io_print_pci_mode(sp);
8033 		if (mode < 0) {
8034 			ret = -EBADSLT;
8035 			unregister_netdev(dev);
8036 			goto set_swap_failed;
8037 		}
8038 	}
8039 	switch (sp->rxd_mode) {
8040 	case RXD_MODE_1:
8041 		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8042 			  dev->name);
8043 		break;
8044 	case RXD_MODE_3B:
8045 		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8046 			  dev->name);
8047 		break;
8048 	}
8049 
8050 	switch (sp->config.napi) {
8051 	case 0:
8052 		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8053 		break;
8054 	case 1:
8055 		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8056 		break;
8057 	}
8058 
8059 	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8060 		  sp->config.tx_fifo_num);
8061 
8062 	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8063 		  sp->config.rx_ring_num);
8064 
8065 	switch (sp->config.intr_type) {
8066 	case INTA:
8067 		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8068 		break;
8069 	case MSI_X:
8070 		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8071 		break;
8072 	}
8073 	if (sp->config.multiq) {
8074 		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8075 			struct fifo_info *fifo = &mac_control->fifos[i];
8076 
8077 			fifo->multiq = config->multiq;
8078 		}
8079 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8080 			  dev->name);
8081 	} else
8082 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8083 			  dev->name);
8084 
8085 	switch (sp->config.tx_steering_type) {
8086 	case NO_STEERING:
8087 		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8088 			  dev->name);
8089 		break;
8090 	case TX_PRIORITY_STEERING:
8091 		DBG_PRINT(ERR_DBG,
8092 			  "%s: Priority steering enabled for transmit\n",
8093 			  dev->name);
8094 		break;
8095 	case TX_DEFAULT_STEERING:
8096 		DBG_PRINT(ERR_DBG,
8097 			  "%s: Default steering enabled for transmit\n",
8098 			  dev->name);
8099 	}
8100 
8101 	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8102 		  dev->name);
8103 	/* Initialize device name */
8104 	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8105 		 sp->product_name);
8106 
8107 	if (vlan_tag_strip)
8108 		sp->vlan_strip_flag = 1;
8109 	else
8110 		sp->vlan_strip_flag = 0;
8111 
8112 	/*
8113 	 * Make Link state as off at this point, when the Link change
8114 	 * interrupt comes the state will be automatically changed to
8115 	 * the right state.
8116 	 */
8117 	netif_carrier_off(dev);
8118 
8119 	return 0;
8120 
8121 register_failed:
8122 set_swap_failed:
8123 	iounmap(sp->bar1);
8124 bar1_remap_failed:
8125 	iounmap(sp->bar0);
8126 bar0_remap_failed:
8127 mem_alloc_failed:
8128 	free_shared_mem(sp);
8129 	pci_disable_device(pdev);
8130 	pci_release_regions(pdev);
8131 	free_netdev(dev);
8132 
8133 	return ret;
8134 }
8135 
8136 /**
8137  * s2io_rem_nic - Free the PCI device
8138  * @pdev: structure containing the PCI related information of the device.
8139  * Description: This function is called by the Pci subsystem to release a
8140  * PCI device and free up all resource held up by the device. This could
8141  * be in response to a Hot plug event or when the driver is to be removed
8142  * from memory.
8143  */
8144 
8145 static void s2io_rem_nic(struct pci_dev *pdev)
8146 {
8147 	struct net_device *dev = pci_get_drvdata(pdev);
8148 	struct s2io_nic *sp;
8149 
8150 	if (dev == NULL) {
8151 		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8152 		return;
8153 	}
8154 
8155 	sp = netdev_priv(dev);
8156 
8157 	cancel_work_sync(&sp->rst_timer_task);
8158 	cancel_work_sync(&sp->set_link_task);
8159 
8160 	unregister_netdev(dev);
8161 
8162 	free_shared_mem(sp);
8163 	iounmap(sp->bar0);
8164 	iounmap(sp->bar1);
8165 	pci_release_regions(pdev);
8166 	free_netdev(dev);
8167 	pci_disable_device(pdev);
8168 }
8169 
8170 module_pci_driver(s2io_driver);
8171 
8172 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8173 				struct tcphdr **tcp, struct RxD_t *rxdp,
8174 				struct s2io_nic *sp)
8175 {
8176 	int ip_off;
8177 	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8178 
8179 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8180 		DBG_PRINT(INIT_DBG,
8181 			  "%s: Non-TCP frames not supported for LRO\n",
8182 			  __func__);
8183 		return -1;
8184 	}
8185 
8186 	/* Checking for DIX type or DIX type with VLAN */
8187 	if ((l2_type == 0) || (l2_type == 4)) {
8188 		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8189 		/*
8190 		 * If vlan stripping is disabled and the frame is VLAN tagged,
8191 		 * shift the offset by the VLAN header size bytes.
8192 		 */
8193 		if ((!sp->vlan_strip_flag) &&
8194 		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8195 			ip_off += HEADER_VLAN_SIZE;
8196 	} else {
8197 		/* LLC, SNAP etc are considered non-mergeable */
8198 		return -1;
8199 	}
8200 
8201 	*ip = (struct iphdr *)(buffer + ip_off);
8202 	ip_len = (u8)((*ip)->ihl);
8203 	ip_len <<= 2;
8204 	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8205 
8206 	return 0;
8207 }
8208 
8209 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8210 				  struct tcphdr *tcp)
8211 {
8212 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8213 	if ((lro->iph->saddr != ip->saddr) ||
8214 	    (lro->iph->daddr != ip->daddr) ||
8215 	    (lro->tcph->source != tcp->source) ||
8216 	    (lro->tcph->dest != tcp->dest))
8217 		return -1;
8218 	return 0;
8219 }
8220 
8221 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8222 {
8223 	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8224 }
8225 
8226 static void initiate_new_session(struct lro *lro, u8 *l2h,
8227 				 struct iphdr *ip, struct tcphdr *tcp,
8228 				 u32 tcp_pyld_len, u16 vlan_tag)
8229 {
8230 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8231 	lro->l2h = l2h;
8232 	lro->iph = ip;
8233 	lro->tcph = tcp;
8234 	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8235 	lro->tcp_ack = tcp->ack_seq;
8236 	lro->sg_num = 1;
8237 	lro->total_len = ntohs(ip->tot_len);
8238 	lro->frags_len = 0;
8239 	lro->vlan_tag = vlan_tag;
8240 	/*
8241 	 * Check if we saw TCP timestamp.
8242 	 * Other consistency checks have already been done.
8243 	 */
8244 	if (tcp->doff == 8) {
8245 		__be32 *ptr;
8246 		ptr = (__be32 *)(tcp+1);
8247 		lro->saw_ts = 1;
8248 		lro->cur_tsval = ntohl(*(ptr+1));
8249 		lro->cur_tsecr = *(ptr+2);
8250 	}
8251 	lro->in_use = 1;
8252 }
8253 
8254 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8255 {
8256 	struct iphdr *ip = lro->iph;
8257 	struct tcphdr *tcp = lro->tcph;
8258 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8259 
8260 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8261 
8262 	/* Update L3 header */
8263 	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8264 	ip->tot_len = htons(lro->total_len);
8265 
8266 	/* Update L4 header */
8267 	tcp->ack_seq = lro->tcp_ack;
8268 	tcp->window = lro->window;
8269 
8270 	/* Update tsecr field if this session has timestamps enabled */
8271 	if (lro->saw_ts) {
8272 		__be32 *ptr = (__be32 *)(tcp + 1);
8273 		*(ptr+2) = lro->cur_tsecr;
8274 	}
8275 
8276 	/* Update counters required for calculation of
8277 	 * average no. of packets aggregated.
8278 	 */
8279 	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8280 	swstats->num_aggregations++;
8281 }
8282 
8283 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8284 			     struct tcphdr *tcp, u32 l4_pyld)
8285 {
8286 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8287 	lro->total_len += l4_pyld;
8288 	lro->frags_len += l4_pyld;
8289 	lro->tcp_next_seq += l4_pyld;
8290 	lro->sg_num++;
8291 
8292 	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8293 	lro->tcp_ack = tcp->ack_seq;
8294 	lro->window = tcp->window;
8295 
8296 	if (lro->saw_ts) {
8297 		__be32 *ptr;
8298 		/* Update tsecr and tsval from this packet */
8299 		ptr = (__be32 *)(tcp+1);
8300 		lro->cur_tsval = ntohl(*(ptr+1));
8301 		lro->cur_tsecr = *(ptr + 2);
8302 	}
8303 }
8304 
8305 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8306 				    struct tcphdr *tcp, u32 tcp_pyld_len)
8307 {
8308 	u8 *ptr;
8309 
8310 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8311 
8312 	if (!tcp_pyld_len) {
8313 		/* Runt frame or a pure ack */
8314 		return -1;
8315 	}
8316 
8317 	if (ip->ihl != 5) /* IP has options */
8318 		return -1;
8319 
8320 	/* If we see CE codepoint in IP header, packet is not mergeable */
8321 	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8322 		return -1;
8323 
8324 	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8325 	if (tcp->urg || tcp->psh || tcp->rst ||
8326 	    tcp->syn || tcp->fin ||
8327 	    tcp->ece || tcp->cwr || !tcp->ack) {
8328 		/*
8329 		 * Currently recognize only the ack control word and
8330 		 * any other control field being set would result in
8331 		 * flushing the LRO session
8332 		 */
8333 		return -1;
8334 	}
8335 
8336 	/*
8337 	 * Allow only one TCP timestamp option. Don't aggregate if
8338 	 * any other options are detected.
8339 	 */
8340 	if (tcp->doff != 5 && tcp->doff != 8)
8341 		return -1;
8342 
8343 	if (tcp->doff == 8) {
8344 		ptr = (u8 *)(tcp + 1);
8345 		while (*ptr == TCPOPT_NOP)
8346 			ptr++;
8347 		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8348 			return -1;
8349 
8350 		/* Ensure timestamp value increases monotonically */
8351 		if (l_lro)
8352 			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8353 				return -1;
8354 
8355 		/* timestamp echo reply should be non-zero */
8356 		if (*((__be32 *)(ptr+6)) == 0)
8357 			return -1;
8358 	}
8359 
8360 	return 0;
8361 }
8362 
8363 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8364 				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8365 				 struct RxD_t *rxdp, struct s2io_nic *sp)
8366 {
8367 	struct iphdr *ip;
8368 	struct tcphdr *tcph;
8369 	int ret = 0, i;
8370 	u16 vlan_tag = 0;
8371 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8372 
8373 	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8374 				   rxdp, sp);
8375 	if (ret)
8376 		return ret;
8377 
8378 	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8379 
8380 	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8381 	tcph = (struct tcphdr *)*tcp;
8382 	*tcp_len = get_l4_pyld_length(ip, tcph);
8383 	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8384 		struct lro *l_lro = &ring_data->lro0_n[i];
8385 		if (l_lro->in_use) {
8386 			if (check_for_socket_match(l_lro, ip, tcph))
8387 				continue;
8388 			/* Sock pair matched */
8389 			*lro = l_lro;
8390 
8391 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8392 				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8393 					  "expected 0x%x, actual 0x%x\n",
8394 					  __func__,
8395 					  (*lro)->tcp_next_seq,
8396 					  ntohl(tcph->seq));
8397 
8398 				swstats->outof_sequence_pkts++;
8399 				ret = 2;
8400 				break;
8401 			}
8402 
8403 			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8404 						      *tcp_len))
8405 				ret = 1; /* Aggregate */
8406 			else
8407 				ret = 2; /* Flush both */
8408 			break;
8409 		}
8410 	}
8411 
8412 	if (ret == 0) {
8413 		/* Before searching for available LRO objects,
8414 		 * check if the pkt is L3/L4 aggregatable. If not
8415 		 * don't create new LRO session. Just send this
8416 		 * packet up.
8417 		 */
8418 		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8419 			return 5;
8420 
8421 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8422 			struct lro *l_lro = &ring_data->lro0_n[i];
8423 			if (!(l_lro->in_use)) {
8424 				*lro = l_lro;
8425 				ret = 3; /* Begin anew */
8426 				break;
8427 			}
8428 		}
8429 	}
8430 
8431 	if (ret == 0) { /* sessions exceeded */
8432 		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8433 			  __func__);
8434 		*lro = NULL;
8435 		return ret;
8436 	}
8437 
8438 	switch (ret) {
8439 	case 3:
8440 		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8441 				     vlan_tag);
8442 		break;
8443 	case 2:
8444 		update_L3L4_header(sp, *lro);
8445 		break;
8446 	case 1:
8447 		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8448 		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8449 			update_L3L4_header(sp, *lro);
8450 			ret = 4; /* Flush the LRO */
8451 		}
8452 		break;
8453 	default:
8454 		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8455 		break;
8456 	}
8457 
8458 	return ret;
8459 }
8460 
8461 static void clear_lro_session(struct lro *lro)
8462 {
8463 	static u16 lro_struct_size = sizeof(struct lro);
8464 
8465 	memset(lro, 0, lro_struct_size);
8466 }
8467 
8468 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8469 {
8470 	struct net_device *dev = skb->dev;
8471 	struct s2io_nic *sp = netdev_priv(dev);
8472 
8473 	skb->protocol = eth_type_trans(skb, dev);
8474 	if (vlan_tag && sp->vlan_strip_flag)
8475 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8476 	if (sp->config.napi)
8477 		netif_receive_skb(skb);
8478 	else
8479 		netif_rx(skb);
8480 }
8481 
8482 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8483 			   struct sk_buff *skb, u32 tcp_len)
8484 {
8485 	struct sk_buff *first = lro->parent;
8486 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8487 
8488 	first->len += tcp_len;
8489 	first->data_len = lro->frags_len;
8490 	skb_pull(skb, (skb->len - tcp_len));
8491 	if (skb_shinfo(first)->frag_list)
8492 		lro->last_frag->next = skb;
8493 	else
8494 		skb_shinfo(first)->frag_list = skb;
8495 	first->truesize += skb->truesize;
8496 	lro->last_frag = skb;
8497 	swstats->clubbed_frms_cnt++;
8498 }
8499 
8500 /**
8501  * s2io_io_error_detected - called when PCI error is detected
8502  * @pdev: Pointer to PCI device
8503  * @state: The current pci connection state
8504  *
8505  * This function is called after a PCI bus error affecting
8506  * this device has been detected.
8507  */
8508 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8509 					       pci_channel_state_t state)
8510 {
8511 	struct net_device *netdev = pci_get_drvdata(pdev);
8512 	struct s2io_nic *sp = netdev_priv(netdev);
8513 
8514 	netif_device_detach(netdev);
8515 
8516 	if (state == pci_channel_io_perm_failure)
8517 		return PCI_ERS_RESULT_DISCONNECT;
8518 
8519 	if (netif_running(netdev)) {
8520 		/* Bring down the card, while avoiding PCI I/O */
8521 		do_s2io_card_down(sp, 0);
8522 	}
8523 	pci_disable_device(pdev);
8524 
8525 	return PCI_ERS_RESULT_NEED_RESET;
8526 }
8527 
8528 /**
8529  * s2io_io_slot_reset - called after the pci bus has been reset.
8530  * @pdev: Pointer to PCI device
8531  *
8532  * Restart the card from scratch, as if from a cold-boot.
8533  * At this point, the card has exprienced a hard reset,
8534  * followed by fixups by BIOS, and has its config space
8535  * set up identically to what it was at cold boot.
8536  */
8537 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8538 {
8539 	struct net_device *netdev = pci_get_drvdata(pdev);
8540 	struct s2io_nic *sp = netdev_priv(netdev);
8541 
8542 	if (pci_enable_device(pdev)) {
8543 		pr_err("Cannot re-enable PCI device after reset.\n");
8544 		return PCI_ERS_RESULT_DISCONNECT;
8545 	}
8546 
8547 	pci_set_master(pdev);
8548 	s2io_reset(sp);
8549 
8550 	return PCI_ERS_RESULT_RECOVERED;
8551 }
8552 
8553 /**
8554  * s2io_io_resume - called when traffic can start flowing again.
8555  * @pdev: Pointer to PCI device
8556  *
8557  * This callback is called when the error recovery driver tells
8558  * us that its OK to resume normal operation.
8559  */
8560 static void s2io_io_resume(struct pci_dev *pdev)
8561 {
8562 	struct net_device *netdev = pci_get_drvdata(pdev);
8563 	struct s2io_nic *sp = netdev_priv(netdev);
8564 
8565 	if (netif_running(netdev)) {
8566 		if (s2io_card_up(sp)) {
8567 			pr_err("Can't bring device back up after reset.\n");
8568 			return;
8569 		}
8570 
8571 		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8572 			s2io_card_down(sp);
8573 			pr_err("Can't restore mac addr after reset.\n");
8574 			return;
8575 		}
8576 	}
8577 
8578 	netif_device_attach(netdev);
8579 	netif_tx_wake_all_queues(netdev);
8580 }
8581