1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2010 Exar Corp.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik		: For pointing out the improper error condition
15  *			  check in the s2io_xmit routine and also some
16  *			  issues in the Tx watch dog function. Also for
17  *			  patiently answering all those innumerable
18  *			  questions regaring the 2.6 porting issues.
19  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
20  *			  macros available only in 2.6 Kernel.
21  * Francois Romieu	: For pointing out all code part that were
22  *			  deprecated and also styling related comments.
23  * Grant Grundler	: For helping me get rid of some Architecture
24  *			  dependent code.
25  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explanation of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *		values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_max_pkts: This parameter defines maximum number of packets can be
42  *     aggregated as a single large packet
43  * napi: This parameter used to enable/disable NAPI (polling Rx)
44  *     Possible values '1' for enable and '0' for disable. Default is '1'
45  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
46  *      Possible values '1' for enable and '0' for disable. Default is '0'
47  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
48  *                 Possible values '1' for enable , '0' for disable.
49  *                 Default is '2' - which means disable in promisc mode
50  *                 and enable in non-promiscuous mode.
51  * multiq: This parameter used to enable/disable MULTIQUEUE support.
52  *      Possible values '1' for enable and '0' for disable. Default is '0'
53  ************************************************************************/
54 
55 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/mdio.h>
67 #include <linux/skbuff.h>
68 #include <linux/init.h>
69 #include <linux/delay.h>
70 #include <linux/stddef.h>
71 #include <linux/ioctl.h>
72 #include <linux/timex.h>
73 #include <linux/ethtool.h>
74 #include <linux/workqueue.h>
75 #include <linux/if_vlan.h>
76 #include <linux/ip.h>
77 #include <linux/tcp.h>
78 #include <linux/uaccess.h>
79 #include <linux/io.h>
80 #include <linux/slab.h>
81 #include <linux/prefetch.h>
82 #include <net/tcp.h>
83 #include <net/checksum.h>
84 
85 #include <asm/div64.h>
86 #include <asm/irq.h>
87 
88 /* local include */
89 #include "s2io.h"
90 #include "s2io-regs.h"
91 
92 #define DRV_VERSION "2.0.26.28"
93 
94 /* S2io Driver name & version. */
95 static const char s2io_driver_name[] = "Neterion";
96 static const char s2io_driver_version[] = DRV_VERSION;
97 
98 static const int rxd_size[2] = {32, 48};
99 static const int rxd_count[2] = {127, 85};
100 
101 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
102 {
103 	int ret;
104 
105 	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
106 	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 
108 	return ret;
109 }
110 
111 /*
112  * Cards with following subsystem_id have a link state indication
113  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
114  * macro below identifies these cards given the subsystem_id.
115  */
116 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
117 	(dev_type == XFRAME_I_DEVICE) ?					\
118 	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
119 	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
120 
121 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
122 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
123 
124 static inline int is_s2io_card_up(const struct s2io_nic *sp)
125 {
126 	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
127 }
128 
129 /* Ethtool related variables and Macros. */
130 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
131 	"Register test\t(offline)",
132 	"Eeprom test\t(offline)",
133 	"Link test\t(online)",
134 	"RLDRAM test\t(offline)",
135 	"BIST Test\t(offline)"
136 };
137 
138 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
139 	{"tmac_frms"},
140 	{"tmac_data_octets"},
141 	{"tmac_drop_frms"},
142 	{"tmac_mcst_frms"},
143 	{"tmac_bcst_frms"},
144 	{"tmac_pause_ctrl_frms"},
145 	{"tmac_ttl_octets"},
146 	{"tmac_ucst_frms"},
147 	{"tmac_nucst_frms"},
148 	{"tmac_any_err_frms"},
149 	{"tmac_ttl_less_fb_octets"},
150 	{"tmac_vld_ip_octets"},
151 	{"tmac_vld_ip"},
152 	{"tmac_drop_ip"},
153 	{"tmac_icmp"},
154 	{"tmac_rst_tcp"},
155 	{"tmac_tcp"},
156 	{"tmac_udp"},
157 	{"rmac_vld_frms"},
158 	{"rmac_data_octets"},
159 	{"rmac_fcs_err_frms"},
160 	{"rmac_drop_frms"},
161 	{"rmac_vld_mcst_frms"},
162 	{"rmac_vld_bcst_frms"},
163 	{"rmac_in_rng_len_err_frms"},
164 	{"rmac_out_rng_len_err_frms"},
165 	{"rmac_long_frms"},
166 	{"rmac_pause_ctrl_frms"},
167 	{"rmac_unsup_ctrl_frms"},
168 	{"rmac_ttl_octets"},
169 	{"rmac_accepted_ucst_frms"},
170 	{"rmac_accepted_nucst_frms"},
171 	{"rmac_discarded_frms"},
172 	{"rmac_drop_events"},
173 	{"rmac_ttl_less_fb_octets"},
174 	{"rmac_ttl_frms"},
175 	{"rmac_usized_frms"},
176 	{"rmac_osized_frms"},
177 	{"rmac_frag_frms"},
178 	{"rmac_jabber_frms"},
179 	{"rmac_ttl_64_frms"},
180 	{"rmac_ttl_65_127_frms"},
181 	{"rmac_ttl_128_255_frms"},
182 	{"rmac_ttl_256_511_frms"},
183 	{"rmac_ttl_512_1023_frms"},
184 	{"rmac_ttl_1024_1518_frms"},
185 	{"rmac_ip"},
186 	{"rmac_ip_octets"},
187 	{"rmac_hdr_err_ip"},
188 	{"rmac_drop_ip"},
189 	{"rmac_icmp"},
190 	{"rmac_tcp"},
191 	{"rmac_udp"},
192 	{"rmac_err_drp_udp"},
193 	{"rmac_xgmii_err_sym"},
194 	{"rmac_frms_q0"},
195 	{"rmac_frms_q1"},
196 	{"rmac_frms_q2"},
197 	{"rmac_frms_q3"},
198 	{"rmac_frms_q4"},
199 	{"rmac_frms_q5"},
200 	{"rmac_frms_q6"},
201 	{"rmac_frms_q7"},
202 	{"rmac_full_q0"},
203 	{"rmac_full_q1"},
204 	{"rmac_full_q2"},
205 	{"rmac_full_q3"},
206 	{"rmac_full_q4"},
207 	{"rmac_full_q5"},
208 	{"rmac_full_q6"},
209 	{"rmac_full_q7"},
210 	{"rmac_pause_cnt"},
211 	{"rmac_xgmii_data_err_cnt"},
212 	{"rmac_xgmii_ctrl_err_cnt"},
213 	{"rmac_accepted_ip"},
214 	{"rmac_err_tcp"},
215 	{"rd_req_cnt"},
216 	{"new_rd_req_cnt"},
217 	{"new_rd_req_rtry_cnt"},
218 	{"rd_rtry_cnt"},
219 	{"wr_rtry_rd_ack_cnt"},
220 	{"wr_req_cnt"},
221 	{"new_wr_req_cnt"},
222 	{"new_wr_req_rtry_cnt"},
223 	{"wr_rtry_cnt"},
224 	{"wr_disc_cnt"},
225 	{"rd_rtry_wr_ack_cnt"},
226 	{"txp_wr_cnt"},
227 	{"txd_rd_cnt"},
228 	{"txd_wr_cnt"},
229 	{"rxd_rd_cnt"},
230 	{"rxd_wr_cnt"},
231 	{"txf_rd_cnt"},
232 	{"rxf_wr_cnt"}
233 };
234 
235 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
236 	{"rmac_ttl_1519_4095_frms"},
237 	{"rmac_ttl_4096_8191_frms"},
238 	{"rmac_ttl_8192_max_frms"},
239 	{"rmac_ttl_gt_max_frms"},
240 	{"rmac_osized_alt_frms"},
241 	{"rmac_jabber_alt_frms"},
242 	{"rmac_gt_max_alt_frms"},
243 	{"rmac_vlan_frms"},
244 	{"rmac_len_discard"},
245 	{"rmac_fcs_discard"},
246 	{"rmac_pf_discard"},
247 	{"rmac_da_discard"},
248 	{"rmac_red_discard"},
249 	{"rmac_rts_discard"},
250 	{"rmac_ingm_full_discard"},
251 	{"link_fault_cnt"}
252 };
253 
254 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
255 	{"\n DRIVER STATISTICS"},
256 	{"single_bit_ecc_errs"},
257 	{"double_bit_ecc_errs"},
258 	{"parity_err_cnt"},
259 	{"serious_err_cnt"},
260 	{"soft_reset_cnt"},
261 	{"fifo_full_cnt"},
262 	{"ring_0_full_cnt"},
263 	{"ring_1_full_cnt"},
264 	{"ring_2_full_cnt"},
265 	{"ring_3_full_cnt"},
266 	{"ring_4_full_cnt"},
267 	{"ring_5_full_cnt"},
268 	{"ring_6_full_cnt"},
269 	{"ring_7_full_cnt"},
270 	{"alarm_transceiver_temp_high"},
271 	{"alarm_transceiver_temp_low"},
272 	{"alarm_laser_bias_current_high"},
273 	{"alarm_laser_bias_current_low"},
274 	{"alarm_laser_output_power_high"},
275 	{"alarm_laser_output_power_low"},
276 	{"warn_transceiver_temp_high"},
277 	{"warn_transceiver_temp_low"},
278 	{"warn_laser_bias_current_high"},
279 	{"warn_laser_bias_current_low"},
280 	{"warn_laser_output_power_high"},
281 	{"warn_laser_output_power_low"},
282 	{"lro_aggregated_pkts"},
283 	{"lro_flush_both_count"},
284 	{"lro_out_of_sequence_pkts"},
285 	{"lro_flush_due_to_max_pkts"},
286 	{"lro_avg_aggr_pkts"},
287 	{"mem_alloc_fail_cnt"},
288 	{"pci_map_fail_cnt"},
289 	{"watchdog_timer_cnt"},
290 	{"mem_allocated"},
291 	{"mem_freed"},
292 	{"link_up_cnt"},
293 	{"link_down_cnt"},
294 	{"link_up_time"},
295 	{"link_down_time"},
296 	{"tx_tcode_buf_abort_cnt"},
297 	{"tx_tcode_desc_abort_cnt"},
298 	{"tx_tcode_parity_err_cnt"},
299 	{"tx_tcode_link_loss_cnt"},
300 	{"tx_tcode_list_proc_err_cnt"},
301 	{"rx_tcode_parity_err_cnt"},
302 	{"rx_tcode_abort_cnt"},
303 	{"rx_tcode_parity_abort_cnt"},
304 	{"rx_tcode_rda_fail_cnt"},
305 	{"rx_tcode_unkn_prot_cnt"},
306 	{"rx_tcode_fcs_err_cnt"},
307 	{"rx_tcode_buf_size_err_cnt"},
308 	{"rx_tcode_rxd_corrupt_cnt"},
309 	{"rx_tcode_unkn_err_cnt"},
310 	{"tda_err_cnt"},
311 	{"pfc_err_cnt"},
312 	{"pcc_err_cnt"},
313 	{"tti_err_cnt"},
314 	{"tpa_err_cnt"},
315 	{"sm_err_cnt"},
316 	{"lso_err_cnt"},
317 	{"mac_tmac_err_cnt"},
318 	{"mac_rmac_err_cnt"},
319 	{"xgxs_txgxs_err_cnt"},
320 	{"xgxs_rxgxs_err_cnt"},
321 	{"rc_err_cnt"},
322 	{"prc_pcix_err_cnt"},
323 	{"rpa_err_cnt"},
324 	{"rda_err_cnt"},
325 	{"rti_err_cnt"},
326 	{"mc_err_cnt"}
327 };
328 
329 #define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
330 #define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
331 #define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
332 
333 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
334 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
335 
336 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
337 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
338 
339 #define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
340 #define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
341 
342 #define S2IO_TIMER_CONF(timer, handle, arg, exp)	\
343 	init_timer(&timer);				\
344 	timer.function = handle;			\
345 	timer.data = (unsigned long)arg;		\
346 	mod_timer(&timer, (jiffies + exp))		\
347 
348 /* copy mac addr to def_mac_addr array */
349 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
350 {
351 	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
352 	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
353 	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
354 	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
355 	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
356 	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
357 }
358 
359 /*
360  * Constants to be programmed into the Xena's registers, to configure
361  * the XAUI.
362  */
363 
364 #define	END_SIGN	0x0
365 static const u64 herc_act_dtx_cfg[] = {
366 	/* Set address */
367 	0x8000051536750000ULL, 0x80000515367500E0ULL,
368 	/* Write data */
369 	0x8000051536750004ULL, 0x80000515367500E4ULL,
370 	/* Set address */
371 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
372 	/* Write data */
373 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
374 	/* Set address */
375 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
376 	/* Write data */
377 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
378 	/* Set address */
379 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
380 	/* Write data */
381 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
382 	/* Done */
383 	END_SIGN
384 };
385 
386 static const u64 xena_dtx_cfg[] = {
387 	/* Set address */
388 	0x8000051500000000ULL, 0x80000515000000E0ULL,
389 	/* Write data */
390 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
391 	/* Set address */
392 	0x8001051500000000ULL, 0x80010515000000E0ULL,
393 	/* Write data */
394 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
395 	/* Set address */
396 	0x8002051500000000ULL, 0x80020515000000E0ULL,
397 	/* Write data */
398 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
399 	END_SIGN
400 };
401 
402 /*
403  * Constants for Fixing the MacAddress problem seen mostly on
404  * Alpha machines.
405  */
406 static const u64 fix_mac[] = {
407 	0x0060000000000000ULL, 0x0060600000000000ULL,
408 	0x0040600000000000ULL, 0x0000600000000000ULL,
409 	0x0020600000000000ULL, 0x0060600000000000ULL,
410 	0x0020600000000000ULL, 0x0060600000000000ULL,
411 	0x0020600000000000ULL, 0x0060600000000000ULL,
412 	0x0020600000000000ULL, 0x0060600000000000ULL,
413 	0x0020600000000000ULL, 0x0060600000000000ULL,
414 	0x0020600000000000ULL, 0x0060600000000000ULL,
415 	0x0020600000000000ULL, 0x0060600000000000ULL,
416 	0x0020600000000000ULL, 0x0060600000000000ULL,
417 	0x0020600000000000ULL, 0x0060600000000000ULL,
418 	0x0020600000000000ULL, 0x0060600000000000ULL,
419 	0x0020600000000000ULL, 0x0000600000000000ULL,
420 	0x0040600000000000ULL, 0x0060600000000000ULL,
421 	END_SIGN
422 };
423 
424 MODULE_LICENSE("GPL");
425 MODULE_VERSION(DRV_VERSION);
426 
427 
428 /* Module Loadable parameters. */
429 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
430 S2IO_PARM_INT(rx_ring_num, 1);
431 S2IO_PARM_INT(multiq, 0);
432 S2IO_PARM_INT(rx_ring_mode, 1);
433 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
434 S2IO_PARM_INT(rmac_pause_time, 0x100);
435 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
436 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
437 S2IO_PARM_INT(shared_splits, 0);
438 S2IO_PARM_INT(tmac_util_period, 5);
439 S2IO_PARM_INT(rmac_util_period, 5);
440 S2IO_PARM_INT(l3l4hdr_size, 128);
441 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
442 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
443 /* Frequency of Rx desc syncs expressed as power of 2 */
444 S2IO_PARM_INT(rxsync_frequency, 3);
445 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
446 S2IO_PARM_INT(intr_type, 2);
447 /* Large receive offload feature */
448 
449 /* Max pkts to be aggregated by LRO at one time. If not specified,
450  * aggregation happens until we hit max IP pkt size(64K)
451  */
452 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
453 S2IO_PARM_INT(indicate_max_pkts, 0);
454 
455 S2IO_PARM_INT(napi, 1);
456 S2IO_PARM_INT(ufo, 0);
457 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
458 
459 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
460 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
461 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
462 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
463 static unsigned int rts_frm_len[MAX_RX_RINGS] =
464 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
465 
466 module_param_array(tx_fifo_len, uint, NULL, 0);
467 module_param_array(rx_ring_sz, uint, NULL, 0);
468 module_param_array(rts_frm_len, uint, NULL, 0);
469 
470 /*
471  * S2IO device table.
472  * This table lists all the devices that this driver supports.
473  */
474 static const struct pci_device_id s2io_tbl[] = {
475 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
476 	 PCI_ANY_ID, PCI_ANY_ID},
477 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
478 	 PCI_ANY_ID, PCI_ANY_ID},
479 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
480 	 PCI_ANY_ID, PCI_ANY_ID},
481 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
482 	 PCI_ANY_ID, PCI_ANY_ID},
483 	{0,}
484 };
485 
486 MODULE_DEVICE_TABLE(pci, s2io_tbl);
487 
488 static const struct pci_error_handlers s2io_err_handler = {
489 	.error_detected = s2io_io_error_detected,
490 	.slot_reset = s2io_io_slot_reset,
491 	.resume = s2io_io_resume,
492 };
493 
494 static struct pci_driver s2io_driver = {
495 	.name = "S2IO",
496 	.id_table = s2io_tbl,
497 	.probe = s2io_init_nic,
498 	.remove = s2io_rem_nic,
499 	.err_handler = &s2io_err_handler,
500 };
501 
502 /* A simplifier macro used both by init and free shared_mem Fns(). */
503 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
504 
505 /* netqueue manipulation helper functions */
506 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
507 {
508 	if (!sp->config.multiq) {
509 		int i;
510 
511 		for (i = 0; i < sp->config.tx_fifo_num; i++)
512 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
513 	}
514 	netif_tx_stop_all_queues(sp->dev);
515 }
516 
517 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
518 {
519 	if (!sp->config.multiq)
520 		sp->mac_control.fifos[fifo_no].queue_state =
521 			FIFO_QUEUE_STOP;
522 
523 	netif_tx_stop_all_queues(sp->dev);
524 }
525 
526 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
527 {
528 	if (!sp->config.multiq) {
529 		int i;
530 
531 		for (i = 0; i < sp->config.tx_fifo_num; i++)
532 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
533 	}
534 	netif_tx_start_all_queues(sp->dev);
535 }
536 
537 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
538 {
539 	if (!sp->config.multiq) {
540 		int i;
541 
542 		for (i = 0; i < sp->config.tx_fifo_num; i++)
543 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
544 	}
545 	netif_tx_wake_all_queues(sp->dev);
546 }
547 
548 static inline void s2io_wake_tx_queue(
549 	struct fifo_info *fifo, int cnt, u8 multiq)
550 {
551 
552 	if (multiq) {
553 		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
554 			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
555 	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
556 		if (netif_queue_stopped(fifo->dev)) {
557 			fifo->queue_state = FIFO_QUEUE_START;
558 			netif_wake_queue(fifo->dev);
559 		}
560 	}
561 }
562 
563 /**
564  * init_shared_mem - Allocation and Initialization of Memory
565  * @nic: Device private variable.
566  * Description: The function allocates all the memory areas shared
567  * between the NIC and the driver. This includes Tx descriptors,
568  * Rx descriptors and the statistics block.
569  */
570 
571 static int init_shared_mem(struct s2io_nic *nic)
572 {
573 	u32 size;
574 	void *tmp_v_addr, *tmp_v_addr_next;
575 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
576 	struct RxD_block *pre_rxd_blk = NULL;
577 	int i, j, blk_cnt;
578 	int lst_size, lst_per_page;
579 	struct net_device *dev = nic->dev;
580 	unsigned long tmp;
581 	struct buffAdd *ba;
582 	struct config_param *config = &nic->config;
583 	struct mac_info *mac_control = &nic->mac_control;
584 	unsigned long long mem_allocated = 0;
585 
586 	/* Allocation and initialization of TXDLs in FIFOs */
587 	size = 0;
588 	for (i = 0; i < config->tx_fifo_num; i++) {
589 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
590 
591 		size += tx_cfg->fifo_len;
592 	}
593 	if (size > MAX_AVAILABLE_TXDS) {
594 		DBG_PRINT(ERR_DBG,
595 			  "Too many TxDs requested: %d, max supported: %d\n",
596 			  size, MAX_AVAILABLE_TXDS);
597 		return -EINVAL;
598 	}
599 
600 	size = 0;
601 	for (i = 0; i < config->tx_fifo_num; i++) {
602 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
603 
604 		size = tx_cfg->fifo_len;
605 		/*
606 		 * Legal values are from 2 to 8192
607 		 */
608 		if (size < 2) {
609 			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
610 				  "Valid lengths are 2 through 8192\n",
611 				  i, size);
612 			return -EINVAL;
613 		}
614 	}
615 
616 	lst_size = (sizeof(struct TxD) * config->max_txds);
617 	lst_per_page = PAGE_SIZE / lst_size;
618 
619 	for (i = 0; i < config->tx_fifo_num; i++) {
620 		struct fifo_info *fifo = &mac_control->fifos[i];
621 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
622 		int fifo_len = tx_cfg->fifo_len;
623 		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
624 
625 		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
626 		if (!fifo->list_info) {
627 			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
628 			return -ENOMEM;
629 		}
630 		mem_allocated += list_holder_size;
631 	}
632 	for (i = 0; i < config->tx_fifo_num; i++) {
633 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
634 						lst_per_page);
635 		struct fifo_info *fifo = &mac_control->fifos[i];
636 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
637 
638 		fifo->tx_curr_put_info.offset = 0;
639 		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
640 		fifo->tx_curr_get_info.offset = 0;
641 		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
642 		fifo->fifo_no = i;
643 		fifo->nic = nic;
644 		fifo->max_txds = MAX_SKB_FRAGS + 2;
645 		fifo->dev = dev;
646 
647 		for (j = 0; j < page_num; j++) {
648 			int k = 0;
649 			dma_addr_t tmp_p;
650 			void *tmp_v;
651 			tmp_v = pci_alloc_consistent(nic->pdev,
652 						     PAGE_SIZE, &tmp_p);
653 			if (!tmp_v) {
654 				DBG_PRINT(INFO_DBG,
655 					  "pci_alloc_consistent failed for TxDL\n");
656 				return -ENOMEM;
657 			}
658 			/* If we got a zero DMA address(can happen on
659 			 * certain platforms like PPC), reallocate.
660 			 * Store virtual address of page we don't want,
661 			 * to be freed later.
662 			 */
663 			if (!tmp_p) {
664 				mac_control->zerodma_virt_addr = tmp_v;
665 				DBG_PRINT(INIT_DBG,
666 					  "%s: Zero DMA address for TxDL. "
667 					  "Virtual address %p\n",
668 					  dev->name, tmp_v);
669 				tmp_v = pci_alloc_consistent(nic->pdev,
670 							     PAGE_SIZE, &tmp_p);
671 				if (!tmp_v) {
672 					DBG_PRINT(INFO_DBG,
673 						  "pci_alloc_consistent failed for TxDL\n");
674 					return -ENOMEM;
675 				}
676 				mem_allocated += PAGE_SIZE;
677 			}
678 			while (k < lst_per_page) {
679 				int l = (j * lst_per_page) + k;
680 				if (l == tx_cfg->fifo_len)
681 					break;
682 				fifo->list_info[l].list_virt_addr =
683 					tmp_v + (k * lst_size);
684 				fifo->list_info[l].list_phy_addr =
685 					tmp_p + (k * lst_size);
686 				k++;
687 			}
688 		}
689 	}
690 
691 	for (i = 0; i < config->tx_fifo_num; i++) {
692 		struct fifo_info *fifo = &mac_control->fifos[i];
693 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
694 
695 		size = tx_cfg->fifo_len;
696 		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
697 		if (!fifo->ufo_in_band_v)
698 			return -ENOMEM;
699 		mem_allocated += (size * sizeof(u64));
700 	}
701 
702 	/* Allocation and initialization of RXDs in Rings */
703 	size = 0;
704 	for (i = 0; i < config->rx_ring_num; i++) {
705 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
706 		struct ring_info *ring = &mac_control->rings[i];
707 
708 		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
709 			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
710 				  "multiple of RxDs per Block\n",
711 				  dev->name, i);
712 			return FAILURE;
713 		}
714 		size += rx_cfg->num_rxd;
715 		ring->block_count = rx_cfg->num_rxd /
716 			(rxd_count[nic->rxd_mode] + 1);
717 		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
718 	}
719 	if (nic->rxd_mode == RXD_MODE_1)
720 		size = (size * (sizeof(struct RxD1)));
721 	else
722 		size = (size * (sizeof(struct RxD3)));
723 
724 	for (i = 0; i < config->rx_ring_num; i++) {
725 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
726 		struct ring_info *ring = &mac_control->rings[i];
727 
728 		ring->rx_curr_get_info.block_index = 0;
729 		ring->rx_curr_get_info.offset = 0;
730 		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
731 		ring->rx_curr_put_info.block_index = 0;
732 		ring->rx_curr_put_info.offset = 0;
733 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
734 		ring->nic = nic;
735 		ring->ring_no = i;
736 
737 		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
738 		/*  Allocating all the Rx blocks */
739 		for (j = 0; j < blk_cnt; j++) {
740 			struct rx_block_info *rx_blocks;
741 			int l;
742 
743 			rx_blocks = &ring->rx_blocks[j];
744 			size = SIZE_OF_BLOCK;	/* size is always page size */
745 			tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
746 							  &tmp_p_addr);
747 			if (tmp_v_addr == NULL) {
748 				/*
749 				 * In case of failure, free_shared_mem()
750 				 * is called, which should free any
751 				 * memory that was alloced till the
752 				 * failure happened.
753 				 */
754 				rx_blocks->block_virt_addr = tmp_v_addr;
755 				return -ENOMEM;
756 			}
757 			mem_allocated += size;
758 			memset(tmp_v_addr, 0, size);
759 
760 			size = sizeof(struct rxd_info) *
761 				rxd_count[nic->rxd_mode];
762 			rx_blocks->block_virt_addr = tmp_v_addr;
763 			rx_blocks->block_dma_addr = tmp_p_addr;
764 			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
765 			if (!rx_blocks->rxds)
766 				return -ENOMEM;
767 			mem_allocated += size;
768 			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
769 				rx_blocks->rxds[l].virt_addr =
770 					rx_blocks->block_virt_addr +
771 					(rxd_size[nic->rxd_mode] * l);
772 				rx_blocks->rxds[l].dma_addr =
773 					rx_blocks->block_dma_addr +
774 					(rxd_size[nic->rxd_mode] * l);
775 			}
776 		}
777 		/* Interlinking all Rx Blocks */
778 		for (j = 0; j < blk_cnt; j++) {
779 			int next = (j + 1) % blk_cnt;
780 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
781 			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
782 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
783 			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
784 
785 			pre_rxd_blk = tmp_v_addr;
786 			pre_rxd_blk->reserved_2_pNext_RxD_block =
787 				(unsigned long)tmp_v_addr_next;
788 			pre_rxd_blk->pNext_RxD_Blk_physical =
789 				(u64)tmp_p_addr_next;
790 		}
791 	}
792 	if (nic->rxd_mode == RXD_MODE_3B) {
793 		/*
794 		 * Allocation of Storages for buffer addresses in 2BUFF mode
795 		 * and the buffers as well.
796 		 */
797 		for (i = 0; i < config->rx_ring_num; i++) {
798 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
799 			struct ring_info *ring = &mac_control->rings[i];
800 
801 			blk_cnt = rx_cfg->num_rxd /
802 				(rxd_count[nic->rxd_mode] + 1);
803 			size = sizeof(struct buffAdd *) * blk_cnt;
804 			ring->ba = kmalloc(size, GFP_KERNEL);
805 			if (!ring->ba)
806 				return -ENOMEM;
807 			mem_allocated += size;
808 			for (j = 0; j < blk_cnt; j++) {
809 				int k = 0;
810 
811 				size = sizeof(struct buffAdd) *
812 					(rxd_count[nic->rxd_mode] + 1);
813 				ring->ba[j] = kmalloc(size, GFP_KERNEL);
814 				if (!ring->ba[j])
815 					return -ENOMEM;
816 				mem_allocated += size;
817 				while (k != rxd_count[nic->rxd_mode]) {
818 					ba = &ring->ba[j][k];
819 					size = BUF0_LEN + ALIGN_SIZE;
820 					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
821 					if (!ba->ba_0_org)
822 						return -ENOMEM;
823 					mem_allocated += size;
824 					tmp = (unsigned long)ba->ba_0_org;
825 					tmp += ALIGN_SIZE;
826 					tmp &= ~((unsigned long)ALIGN_SIZE);
827 					ba->ba_0 = (void *)tmp;
828 
829 					size = BUF1_LEN + ALIGN_SIZE;
830 					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
831 					if (!ba->ba_1_org)
832 						return -ENOMEM;
833 					mem_allocated += size;
834 					tmp = (unsigned long)ba->ba_1_org;
835 					tmp += ALIGN_SIZE;
836 					tmp &= ~((unsigned long)ALIGN_SIZE);
837 					ba->ba_1 = (void *)tmp;
838 					k++;
839 				}
840 			}
841 		}
842 	}
843 
844 	/* Allocation and initialization of Statistics block */
845 	size = sizeof(struct stat_block);
846 	mac_control->stats_mem =
847 		pci_alloc_consistent(nic->pdev, size,
848 				     &mac_control->stats_mem_phy);
849 
850 	if (!mac_control->stats_mem) {
851 		/*
852 		 * In case of failure, free_shared_mem() is called, which
853 		 * should free any memory that was alloced till the
854 		 * failure happened.
855 		 */
856 		return -ENOMEM;
857 	}
858 	mem_allocated += size;
859 	mac_control->stats_mem_sz = size;
860 
861 	tmp_v_addr = mac_control->stats_mem;
862 	mac_control->stats_info = tmp_v_addr;
863 	memset(tmp_v_addr, 0, size);
864 	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
865 		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
866 	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
867 	return SUCCESS;
868 }
869 
870 /**
871  * free_shared_mem - Free the allocated Memory
872  * @nic:  Device private variable.
873  * Description: This function is to free all memory locations allocated by
874  * the init_shared_mem() function and return it to the kernel.
875  */
876 
877 static void free_shared_mem(struct s2io_nic *nic)
878 {
879 	int i, j, blk_cnt, size;
880 	void *tmp_v_addr;
881 	dma_addr_t tmp_p_addr;
882 	int lst_size, lst_per_page;
883 	struct net_device *dev;
884 	int page_num = 0;
885 	struct config_param *config;
886 	struct mac_info *mac_control;
887 	struct stat_block *stats;
888 	struct swStat *swstats;
889 
890 	if (!nic)
891 		return;
892 
893 	dev = nic->dev;
894 
895 	config = &nic->config;
896 	mac_control = &nic->mac_control;
897 	stats = mac_control->stats_info;
898 	swstats = &stats->sw_stat;
899 
900 	lst_size = sizeof(struct TxD) * config->max_txds;
901 	lst_per_page = PAGE_SIZE / lst_size;
902 
903 	for (i = 0; i < config->tx_fifo_num; i++) {
904 		struct fifo_info *fifo = &mac_control->fifos[i];
905 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
906 
907 		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
908 		for (j = 0; j < page_num; j++) {
909 			int mem_blks = (j * lst_per_page);
910 			struct list_info_hold *fli;
911 
912 			if (!fifo->list_info)
913 				return;
914 
915 			fli = &fifo->list_info[mem_blks];
916 			if (!fli->list_virt_addr)
917 				break;
918 			pci_free_consistent(nic->pdev, PAGE_SIZE,
919 					    fli->list_virt_addr,
920 					    fli->list_phy_addr);
921 			swstats->mem_freed += PAGE_SIZE;
922 		}
923 		/* If we got a zero DMA address during allocation,
924 		 * free the page now
925 		 */
926 		if (mac_control->zerodma_virt_addr) {
927 			pci_free_consistent(nic->pdev, PAGE_SIZE,
928 					    mac_control->zerodma_virt_addr,
929 					    (dma_addr_t)0);
930 			DBG_PRINT(INIT_DBG,
931 				  "%s: Freeing TxDL with zero DMA address. "
932 				  "Virtual address %p\n",
933 				  dev->name, mac_control->zerodma_virt_addr);
934 			swstats->mem_freed += PAGE_SIZE;
935 		}
936 		kfree(fifo->list_info);
937 		swstats->mem_freed += tx_cfg->fifo_len *
938 			sizeof(struct list_info_hold);
939 	}
940 
941 	size = SIZE_OF_BLOCK;
942 	for (i = 0; i < config->rx_ring_num; i++) {
943 		struct ring_info *ring = &mac_control->rings[i];
944 
945 		blk_cnt = ring->block_count;
946 		for (j = 0; j < blk_cnt; j++) {
947 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
948 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
949 			if (tmp_v_addr == NULL)
950 				break;
951 			pci_free_consistent(nic->pdev, size,
952 					    tmp_v_addr, tmp_p_addr);
953 			swstats->mem_freed += size;
954 			kfree(ring->rx_blocks[j].rxds);
955 			swstats->mem_freed += sizeof(struct rxd_info) *
956 				rxd_count[nic->rxd_mode];
957 		}
958 	}
959 
960 	if (nic->rxd_mode == RXD_MODE_3B) {
961 		/* Freeing buffer storage addresses in 2BUFF mode. */
962 		for (i = 0; i < config->rx_ring_num; i++) {
963 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
964 			struct ring_info *ring = &mac_control->rings[i];
965 
966 			blk_cnt = rx_cfg->num_rxd /
967 				(rxd_count[nic->rxd_mode] + 1);
968 			for (j = 0; j < blk_cnt; j++) {
969 				int k = 0;
970 				if (!ring->ba[j])
971 					continue;
972 				while (k != rxd_count[nic->rxd_mode]) {
973 					struct buffAdd *ba = &ring->ba[j][k];
974 					kfree(ba->ba_0_org);
975 					swstats->mem_freed +=
976 						BUF0_LEN + ALIGN_SIZE;
977 					kfree(ba->ba_1_org);
978 					swstats->mem_freed +=
979 						BUF1_LEN + ALIGN_SIZE;
980 					k++;
981 				}
982 				kfree(ring->ba[j]);
983 				swstats->mem_freed += sizeof(struct buffAdd) *
984 					(rxd_count[nic->rxd_mode] + 1);
985 			}
986 			kfree(ring->ba);
987 			swstats->mem_freed += sizeof(struct buffAdd *) *
988 				blk_cnt;
989 		}
990 	}
991 
992 	for (i = 0; i < nic->config.tx_fifo_num; i++) {
993 		struct fifo_info *fifo = &mac_control->fifos[i];
994 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
995 
996 		if (fifo->ufo_in_band_v) {
997 			swstats->mem_freed += tx_cfg->fifo_len *
998 				sizeof(u64);
999 			kfree(fifo->ufo_in_band_v);
1000 		}
1001 	}
1002 
1003 	if (mac_control->stats_mem) {
1004 		swstats->mem_freed += mac_control->stats_mem_sz;
1005 		pci_free_consistent(nic->pdev,
1006 				    mac_control->stats_mem_sz,
1007 				    mac_control->stats_mem,
1008 				    mac_control->stats_mem_phy);
1009 	}
1010 }
1011 
1012 /**
1013  * s2io_verify_pci_mode -
1014  */
1015 
1016 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1017 {
1018 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1019 	register u64 val64 = 0;
1020 	int     mode;
1021 
1022 	val64 = readq(&bar0->pci_mode);
1023 	mode = (u8)GET_PCI_MODE(val64);
1024 
1025 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1026 		return -1;      /* Unknown PCI mode */
1027 	return mode;
1028 }
1029 
1030 #define NEC_VENID   0x1033
1031 #define NEC_DEVID   0x0125
1032 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1033 {
1034 	struct pci_dev *tdev = NULL;
1035 	for_each_pci_dev(tdev) {
1036 		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1037 			if (tdev->bus == s2io_pdev->bus->parent) {
1038 				pci_dev_put(tdev);
1039 				return 1;
1040 			}
1041 		}
1042 	}
1043 	return 0;
1044 }
1045 
1046 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1047 /**
1048  * s2io_print_pci_mode -
1049  */
1050 static int s2io_print_pci_mode(struct s2io_nic *nic)
1051 {
1052 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1053 	register u64 val64 = 0;
1054 	int	mode;
1055 	struct config_param *config = &nic->config;
1056 	const char *pcimode;
1057 
1058 	val64 = readq(&bar0->pci_mode);
1059 	mode = (u8)GET_PCI_MODE(val64);
1060 
1061 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1062 		return -1;	/* Unknown PCI mode */
1063 
1064 	config->bus_speed = bus_speed[mode];
1065 
1066 	if (s2io_on_nec_bridge(nic->pdev)) {
1067 		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1068 			  nic->dev->name);
1069 		return mode;
1070 	}
1071 
1072 	switch (mode) {
1073 	case PCI_MODE_PCI_33:
1074 		pcimode = "33MHz PCI bus";
1075 		break;
1076 	case PCI_MODE_PCI_66:
1077 		pcimode = "66MHz PCI bus";
1078 		break;
1079 	case PCI_MODE_PCIX_M1_66:
1080 		pcimode = "66MHz PCIX(M1) bus";
1081 		break;
1082 	case PCI_MODE_PCIX_M1_100:
1083 		pcimode = "100MHz PCIX(M1) bus";
1084 		break;
1085 	case PCI_MODE_PCIX_M1_133:
1086 		pcimode = "133MHz PCIX(M1) bus";
1087 		break;
1088 	case PCI_MODE_PCIX_M2_66:
1089 		pcimode = "133MHz PCIX(M2) bus";
1090 		break;
1091 	case PCI_MODE_PCIX_M2_100:
1092 		pcimode = "200MHz PCIX(M2) bus";
1093 		break;
1094 	case PCI_MODE_PCIX_M2_133:
1095 		pcimode = "266MHz PCIX(M2) bus";
1096 		break;
1097 	default:
1098 		pcimode = "unsupported bus!";
1099 		mode = -1;
1100 	}
1101 
1102 	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1103 		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1104 
1105 	return mode;
1106 }
1107 
1108 /**
1109  *  init_tti - Initialization transmit traffic interrupt scheme
1110  *  @nic: device private variable
1111  *  @link: link status (UP/DOWN) used to enable/disable continuous
1112  *  transmit interrupts
1113  *  Description: The function configures transmit traffic interrupts
1114  *  Return Value:  SUCCESS on success and
1115  *  '-1' on failure
1116  */
1117 
1118 static int init_tti(struct s2io_nic *nic, int link)
1119 {
1120 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1121 	register u64 val64 = 0;
1122 	int i;
1123 	struct config_param *config = &nic->config;
1124 
1125 	for (i = 0; i < config->tx_fifo_num; i++) {
1126 		/*
1127 		 * TTI Initialization. Default Tx timer gets us about
1128 		 * 250 interrupts per sec. Continuous interrupts are enabled
1129 		 * by default.
1130 		 */
1131 		if (nic->device_type == XFRAME_II_DEVICE) {
1132 			int count = (nic->config.bus_speed * 125)/2;
1133 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1134 		} else
1135 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1136 
1137 		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1138 			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1139 			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1140 			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1141 		if (i == 0)
1142 			if (use_continuous_tx_intrs && (link == LINK_UP))
1143 				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1144 		writeq(val64, &bar0->tti_data1_mem);
1145 
1146 		if (nic->config.intr_type == MSI_X) {
1147 			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1148 				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1149 				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1150 				TTI_DATA2_MEM_TX_UFC_D(0x300);
1151 		} else {
1152 			if ((nic->config.tx_steering_type ==
1153 			     TX_DEFAULT_STEERING) &&
1154 			    (config->tx_fifo_num > 1) &&
1155 			    (i >= nic->udp_fifo_idx) &&
1156 			    (i < (nic->udp_fifo_idx +
1157 				  nic->total_udp_fifos)))
1158 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1159 					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1160 					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1161 					TTI_DATA2_MEM_TX_UFC_D(0x120);
1162 			else
1163 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1164 					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1165 					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1166 					TTI_DATA2_MEM_TX_UFC_D(0x80);
1167 		}
1168 
1169 		writeq(val64, &bar0->tti_data2_mem);
1170 
1171 		val64 = TTI_CMD_MEM_WE |
1172 			TTI_CMD_MEM_STROBE_NEW_CMD |
1173 			TTI_CMD_MEM_OFFSET(i);
1174 		writeq(val64, &bar0->tti_command_mem);
1175 
1176 		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1177 					  TTI_CMD_MEM_STROBE_NEW_CMD,
1178 					  S2IO_BIT_RESET) != SUCCESS)
1179 			return FAILURE;
1180 	}
1181 
1182 	return SUCCESS;
1183 }
1184 
1185 /**
1186  *  init_nic - Initialization of hardware
1187  *  @nic: device private variable
1188  *  Description: The function sequentially configures every block
1189  *  of the H/W from their reset values.
1190  *  Return Value:  SUCCESS on success and
1191  *  '-1' on failure (endian settings incorrect).
1192  */
1193 
1194 static int init_nic(struct s2io_nic *nic)
1195 {
1196 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1197 	struct net_device *dev = nic->dev;
1198 	register u64 val64 = 0;
1199 	void __iomem *add;
1200 	u32 time;
1201 	int i, j;
1202 	int dtx_cnt = 0;
1203 	unsigned long long mem_share;
1204 	int mem_size;
1205 	struct config_param *config = &nic->config;
1206 	struct mac_info *mac_control = &nic->mac_control;
1207 
1208 	/* to set the swapper controle on the card */
1209 	if (s2io_set_swapper(nic)) {
1210 		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1211 		return -EIO;
1212 	}
1213 
1214 	/*
1215 	 * Herc requires EOI to be removed from reset before XGXS, so..
1216 	 */
1217 	if (nic->device_type & XFRAME_II_DEVICE) {
1218 		val64 = 0xA500000000ULL;
1219 		writeq(val64, &bar0->sw_reset);
1220 		msleep(500);
1221 		val64 = readq(&bar0->sw_reset);
1222 	}
1223 
1224 	/* Remove XGXS from reset state */
1225 	val64 = 0;
1226 	writeq(val64, &bar0->sw_reset);
1227 	msleep(500);
1228 	val64 = readq(&bar0->sw_reset);
1229 
1230 	/* Ensure that it's safe to access registers by checking
1231 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1232 	 */
1233 	if (nic->device_type == XFRAME_II_DEVICE) {
1234 		for (i = 0; i < 50; i++) {
1235 			val64 = readq(&bar0->adapter_status);
1236 			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1237 				break;
1238 			msleep(10);
1239 		}
1240 		if (i == 50)
1241 			return -ENODEV;
1242 	}
1243 
1244 	/*  Enable Receiving broadcasts */
1245 	add = &bar0->mac_cfg;
1246 	val64 = readq(&bar0->mac_cfg);
1247 	val64 |= MAC_RMAC_BCAST_ENABLE;
1248 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1249 	writel((u32)val64, add);
1250 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1251 	writel((u32) (val64 >> 32), (add + 4));
1252 
1253 	/* Read registers in all blocks */
1254 	val64 = readq(&bar0->mac_int_mask);
1255 	val64 = readq(&bar0->mc_int_mask);
1256 	val64 = readq(&bar0->xgxs_int_mask);
1257 
1258 	/*  Set MTU */
1259 	val64 = dev->mtu;
1260 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1261 
1262 	if (nic->device_type & XFRAME_II_DEVICE) {
1263 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1264 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1265 					  &bar0->dtx_control, UF);
1266 			if (dtx_cnt & 0x1)
1267 				msleep(1); /* Necessary!! */
1268 			dtx_cnt++;
1269 		}
1270 	} else {
1271 		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1272 			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1273 					  &bar0->dtx_control, UF);
1274 			val64 = readq(&bar0->dtx_control);
1275 			dtx_cnt++;
1276 		}
1277 	}
1278 
1279 	/*  Tx DMA Initialization */
1280 	val64 = 0;
1281 	writeq(val64, &bar0->tx_fifo_partition_0);
1282 	writeq(val64, &bar0->tx_fifo_partition_1);
1283 	writeq(val64, &bar0->tx_fifo_partition_2);
1284 	writeq(val64, &bar0->tx_fifo_partition_3);
1285 
1286 	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1287 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1288 
1289 		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1290 			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1291 
1292 		if (i == (config->tx_fifo_num - 1)) {
1293 			if (i % 2 == 0)
1294 				i++;
1295 		}
1296 
1297 		switch (i) {
1298 		case 1:
1299 			writeq(val64, &bar0->tx_fifo_partition_0);
1300 			val64 = 0;
1301 			j = 0;
1302 			break;
1303 		case 3:
1304 			writeq(val64, &bar0->tx_fifo_partition_1);
1305 			val64 = 0;
1306 			j = 0;
1307 			break;
1308 		case 5:
1309 			writeq(val64, &bar0->tx_fifo_partition_2);
1310 			val64 = 0;
1311 			j = 0;
1312 			break;
1313 		case 7:
1314 			writeq(val64, &bar0->tx_fifo_partition_3);
1315 			val64 = 0;
1316 			j = 0;
1317 			break;
1318 		default:
1319 			j++;
1320 			break;
1321 		}
1322 	}
1323 
1324 	/*
1325 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1326 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1327 	 */
1328 	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1329 		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1330 
1331 	val64 = readq(&bar0->tx_fifo_partition_0);
1332 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1333 		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1334 
1335 	/*
1336 	 * Initialization of Tx_PA_CONFIG register to ignore packet
1337 	 * integrity checking.
1338 	 */
1339 	val64 = readq(&bar0->tx_pa_cfg);
1340 	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1341 		TX_PA_CFG_IGNORE_SNAP_OUI |
1342 		TX_PA_CFG_IGNORE_LLC_CTRL |
1343 		TX_PA_CFG_IGNORE_L2_ERR;
1344 	writeq(val64, &bar0->tx_pa_cfg);
1345 
1346 	/* Rx DMA initialization. */
1347 	val64 = 0;
1348 	for (i = 0; i < config->rx_ring_num; i++) {
1349 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1350 
1351 		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1352 	}
1353 	writeq(val64, &bar0->rx_queue_priority);
1354 
1355 	/*
1356 	 * Allocating equal share of memory to all the
1357 	 * configured Rings.
1358 	 */
1359 	val64 = 0;
1360 	if (nic->device_type & XFRAME_II_DEVICE)
1361 		mem_size = 32;
1362 	else
1363 		mem_size = 64;
1364 
1365 	for (i = 0; i < config->rx_ring_num; i++) {
1366 		switch (i) {
1367 		case 0:
1368 			mem_share = (mem_size / config->rx_ring_num +
1369 				     mem_size % config->rx_ring_num);
1370 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1371 			continue;
1372 		case 1:
1373 			mem_share = (mem_size / config->rx_ring_num);
1374 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1375 			continue;
1376 		case 2:
1377 			mem_share = (mem_size / config->rx_ring_num);
1378 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1379 			continue;
1380 		case 3:
1381 			mem_share = (mem_size / config->rx_ring_num);
1382 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1383 			continue;
1384 		case 4:
1385 			mem_share = (mem_size / config->rx_ring_num);
1386 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1387 			continue;
1388 		case 5:
1389 			mem_share = (mem_size / config->rx_ring_num);
1390 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1391 			continue;
1392 		case 6:
1393 			mem_share = (mem_size / config->rx_ring_num);
1394 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1395 			continue;
1396 		case 7:
1397 			mem_share = (mem_size / config->rx_ring_num);
1398 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1399 			continue;
1400 		}
1401 	}
1402 	writeq(val64, &bar0->rx_queue_cfg);
1403 
1404 	/*
1405 	 * Filling Tx round robin registers
1406 	 * as per the number of FIFOs for equal scheduling priority
1407 	 */
1408 	switch (config->tx_fifo_num) {
1409 	case 1:
1410 		val64 = 0x0;
1411 		writeq(val64, &bar0->tx_w_round_robin_0);
1412 		writeq(val64, &bar0->tx_w_round_robin_1);
1413 		writeq(val64, &bar0->tx_w_round_robin_2);
1414 		writeq(val64, &bar0->tx_w_round_robin_3);
1415 		writeq(val64, &bar0->tx_w_round_robin_4);
1416 		break;
1417 	case 2:
1418 		val64 = 0x0001000100010001ULL;
1419 		writeq(val64, &bar0->tx_w_round_robin_0);
1420 		writeq(val64, &bar0->tx_w_round_robin_1);
1421 		writeq(val64, &bar0->tx_w_round_robin_2);
1422 		writeq(val64, &bar0->tx_w_round_robin_3);
1423 		val64 = 0x0001000100000000ULL;
1424 		writeq(val64, &bar0->tx_w_round_robin_4);
1425 		break;
1426 	case 3:
1427 		val64 = 0x0001020001020001ULL;
1428 		writeq(val64, &bar0->tx_w_round_robin_0);
1429 		val64 = 0x0200010200010200ULL;
1430 		writeq(val64, &bar0->tx_w_round_robin_1);
1431 		val64 = 0x0102000102000102ULL;
1432 		writeq(val64, &bar0->tx_w_round_robin_2);
1433 		val64 = 0x0001020001020001ULL;
1434 		writeq(val64, &bar0->tx_w_round_robin_3);
1435 		val64 = 0x0200010200000000ULL;
1436 		writeq(val64, &bar0->tx_w_round_robin_4);
1437 		break;
1438 	case 4:
1439 		val64 = 0x0001020300010203ULL;
1440 		writeq(val64, &bar0->tx_w_round_robin_0);
1441 		writeq(val64, &bar0->tx_w_round_robin_1);
1442 		writeq(val64, &bar0->tx_w_round_robin_2);
1443 		writeq(val64, &bar0->tx_w_round_robin_3);
1444 		val64 = 0x0001020300000000ULL;
1445 		writeq(val64, &bar0->tx_w_round_robin_4);
1446 		break;
1447 	case 5:
1448 		val64 = 0x0001020304000102ULL;
1449 		writeq(val64, &bar0->tx_w_round_robin_0);
1450 		val64 = 0x0304000102030400ULL;
1451 		writeq(val64, &bar0->tx_w_round_robin_1);
1452 		val64 = 0x0102030400010203ULL;
1453 		writeq(val64, &bar0->tx_w_round_robin_2);
1454 		val64 = 0x0400010203040001ULL;
1455 		writeq(val64, &bar0->tx_w_round_robin_3);
1456 		val64 = 0x0203040000000000ULL;
1457 		writeq(val64, &bar0->tx_w_round_robin_4);
1458 		break;
1459 	case 6:
1460 		val64 = 0x0001020304050001ULL;
1461 		writeq(val64, &bar0->tx_w_round_robin_0);
1462 		val64 = 0x0203040500010203ULL;
1463 		writeq(val64, &bar0->tx_w_round_robin_1);
1464 		val64 = 0x0405000102030405ULL;
1465 		writeq(val64, &bar0->tx_w_round_robin_2);
1466 		val64 = 0x0001020304050001ULL;
1467 		writeq(val64, &bar0->tx_w_round_robin_3);
1468 		val64 = 0x0203040500000000ULL;
1469 		writeq(val64, &bar0->tx_w_round_robin_4);
1470 		break;
1471 	case 7:
1472 		val64 = 0x0001020304050600ULL;
1473 		writeq(val64, &bar0->tx_w_round_robin_0);
1474 		val64 = 0x0102030405060001ULL;
1475 		writeq(val64, &bar0->tx_w_round_robin_1);
1476 		val64 = 0x0203040506000102ULL;
1477 		writeq(val64, &bar0->tx_w_round_robin_2);
1478 		val64 = 0x0304050600010203ULL;
1479 		writeq(val64, &bar0->tx_w_round_robin_3);
1480 		val64 = 0x0405060000000000ULL;
1481 		writeq(val64, &bar0->tx_w_round_robin_4);
1482 		break;
1483 	case 8:
1484 		val64 = 0x0001020304050607ULL;
1485 		writeq(val64, &bar0->tx_w_round_robin_0);
1486 		writeq(val64, &bar0->tx_w_round_robin_1);
1487 		writeq(val64, &bar0->tx_w_round_robin_2);
1488 		writeq(val64, &bar0->tx_w_round_robin_3);
1489 		val64 = 0x0001020300000000ULL;
1490 		writeq(val64, &bar0->tx_w_round_robin_4);
1491 		break;
1492 	}
1493 
1494 	/* Enable all configured Tx FIFO partitions */
1495 	val64 = readq(&bar0->tx_fifo_partition_0);
1496 	val64 |= (TX_FIFO_PARTITION_EN);
1497 	writeq(val64, &bar0->tx_fifo_partition_0);
1498 
1499 	/* Filling the Rx round robin registers as per the
1500 	 * number of Rings and steering based on QoS with
1501 	 * equal priority.
1502 	 */
1503 	switch (config->rx_ring_num) {
1504 	case 1:
1505 		val64 = 0x0;
1506 		writeq(val64, &bar0->rx_w_round_robin_0);
1507 		writeq(val64, &bar0->rx_w_round_robin_1);
1508 		writeq(val64, &bar0->rx_w_round_robin_2);
1509 		writeq(val64, &bar0->rx_w_round_robin_3);
1510 		writeq(val64, &bar0->rx_w_round_robin_4);
1511 
1512 		val64 = 0x8080808080808080ULL;
1513 		writeq(val64, &bar0->rts_qos_steering);
1514 		break;
1515 	case 2:
1516 		val64 = 0x0001000100010001ULL;
1517 		writeq(val64, &bar0->rx_w_round_robin_0);
1518 		writeq(val64, &bar0->rx_w_round_robin_1);
1519 		writeq(val64, &bar0->rx_w_round_robin_2);
1520 		writeq(val64, &bar0->rx_w_round_robin_3);
1521 		val64 = 0x0001000100000000ULL;
1522 		writeq(val64, &bar0->rx_w_round_robin_4);
1523 
1524 		val64 = 0x8080808040404040ULL;
1525 		writeq(val64, &bar0->rts_qos_steering);
1526 		break;
1527 	case 3:
1528 		val64 = 0x0001020001020001ULL;
1529 		writeq(val64, &bar0->rx_w_round_robin_0);
1530 		val64 = 0x0200010200010200ULL;
1531 		writeq(val64, &bar0->rx_w_round_robin_1);
1532 		val64 = 0x0102000102000102ULL;
1533 		writeq(val64, &bar0->rx_w_round_robin_2);
1534 		val64 = 0x0001020001020001ULL;
1535 		writeq(val64, &bar0->rx_w_round_robin_3);
1536 		val64 = 0x0200010200000000ULL;
1537 		writeq(val64, &bar0->rx_w_round_robin_4);
1538 
1539 		val64 = 0x8080804040402020ULL;
1540 		writeq(val64, &bar0->rts_qos_steering);
1541 		break;
1542 	case 4:
1543 		val64 = 0x0001020300010203ULL;
1544 		writeq(val64, &bar0->rx_w_round_robin_0);
1545 		writeq(val64, &bar0->rx_w_round_robin_1);
1546 		writeq(val64, &bar0->rx_w_round_robin_2);
1547 		writeq(val64, &bar0->rx_w_round_robin_3);
1548 		val64 = 0x0001020300000000ULL;
1549 		writeq(val64, &bar0->rx_w_round_robin_4);
1550 
1551 		val64 = 0x8080404020201010ULL;
1552 		writeq(val64, &bar0->rts_qos_steering);
1553 		break;
1554 	case 5:
1555 		val64 = 0x0001020304000102ULL;
1556 		writeq(val64, &bar0->rx_w_round_robin_0);
1557 		val64 = 0x0304000102030400ULL;
1558 		writeq(val64, &bar0->rx_w_round_robin_1);
1559 		val64 = 0x0102030400010203ULL;
1560 		writeq(val64, &bar0->rx_w_round_robin_2);
1561 		val64 = 0x0400010203040001ULL;
1562 		writeq(val64, &bar0->rx_w_round_robin_3);
1563 		val64 = 0x0203040000000000ULL;
1564 		writeq(val64, &bar0->rx_w_round_robin_4);
1565 
1566 		val64 = 0x8080404020201008ULL;
1567 		writeq(val64, &bar0->rts_qos_steering);
1568 		break;
1569 	case 6:
1570 		val64 = 0x0001020304050001ULL;
1571 		writeq(val64, &bar0->rx_w_round_robin_0);
1572 		val64 = 0x0203040500010203ULL;
1573 		writeq(val64, &bar0->rx_w_round_robin_1);
1574 		val64 = 0x0405000102030405ULL;
1575 		writeq(val64, &bar0->rx_w_round_robin_2);
1576 		val64 = 0x0001020304050001ULL;
1577 		writeq(val64, &bar0->rx_w_round_robin_3);
1578 		val64 = 0x0203040500000000ULL;
1579 		writeq(val64, &bar0->rx_w_round_robin_4);
1580 
1581 		val64 = 0x8080404020100804ULL;
1582 		writeq(val64, &bar0->rts_qos_steering);
1583 		break;
1584 	case 7:
1585 		val64 = 0x0001020304050600ULL;
1586 		writeq(val64, &bar0->rx_w_round_robin_0);
1587 		val64 = 0x0102030405060001ULL;
1588 		writeq(val64, &bar0->rx_w_round_robin_1);
1589 		val64 = 0x0203040506000102ULL;
1590 		writeq(val64, &bar0->rx_w_round_robin_2);
1591 		val64 = 0x0304050600010203ULL;
1592 		writeq(val64, &bar0->rx_w_round_robin_3);
1593 		val64 = 0x0405060000000000ULL;
1594 		writeq(val64, &bar0->rx_w_round_robin_4);
1595 
1596 		val64 = 0x8080402010080402ULL;
1597 		writeq(val64, &bar0->rts_qos_steering);
1598 		break;
1599 	case 8:
1600 		val64 = 0x0001020304050607ULL;
1601 		writeq(val64, &bar0->rx_w_round_robin_0);
1602 		writeq(val64, &bar0->rx_w_round_robin_1);
1603 		writeq(val64, &bar0->rx_w_round_robin_2);
1604 		writeq(val64, &bar0->rx_w_round_robin_3);
1605 		val64 = 0x0001020300000000ULL;
1606 		writeq(val64, &bar0->rx_w_round_robin_4);
1607 
1608 		val64 = 0x8040201008040201ULL;
1609 		writeq(val64, &bar0->rts_qos_steering);
1610 		break;
1611 	}
1612 
1613 	/* UDP Fix */
1614 	val64 = 0;
1615 	for (i = 0; i < 8; i++)
1616 		writeq(val64, &bar0->rts_frm_len_n[i]);
1617 
1618 	/* Set the default rts frame length for the rings configured */
1619 	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1620 	for (i = 0 ; i < config->rx_ring_num ; i++)
1621 		writeq(val64, &bar0->rts_frm_len_n[i]);
1622 
1623 	/* Set the frame length for the configured rings
1624 	 * desired by the user
1625 	 */
1626 	for (i = 0; i < config->rx_ring_num; i++) {
1627 		/* If rts_frm_len[i] == 0 then it is assumed that user not
1628 		 * specified frame length steering.
1629 		 * If the user provides the frame length then program
1630 		 * the rts_frm_len register for those values or else
1631 		 * leave it as it is.
1632 		 */
1633 		if (rts_frm_len[i] != 0) {
1634 			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1635 			       &bar0->rts_frm_len_n[i]);
1636 		}
1637 	}
1638 
1639 	/* Disable differentiated services steering logic */
1640 	for (i = 0; i < 64; i++) {
1641 		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1642 			DBG_PRINT(ERR_DBG,
1643 				  "%s: rts_ds_steer failed on codepoint %d\n",
1644 				  dev->name, i);
1645 			return -ENODEV;
1646 		}
1647 	}
1648 
1649 	/* Program statistics memory */
1650 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1651 
1652 	if (nic->device_type == XFRAME_II_DEVICE) {
1653 		val64 = STAT_BC(0x320);
1654 		writeq(val64, &bar0->stat_byte_cnt);
1655 	}
1656 
1657 	/*
1658 	 * Initializing the sampling rate for the device to calculate the
1659 	 * bandwidth utilization.
1660 	 */
1661 	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1662 		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1663 	writeq(val64, &bar0->mac_link_util);
1664 
1665 	/*
1666 	 * Initializing the Transmit and Receive Traffic Interrupt
1667 	 * Scheme.
1668 	 */
1669 
1670 	/* Initialize TTI */
1671 	if (SUCCESS != init_tti(nic, nic->last_link_state))
1672 		return -ENODEV;
1673 
1674 	/* RTI Initialization */
1675 	if (nic->device_type == XFRAME_II_DEVICE) {
1676 		/*
1677 		 * Programmed to generate Apprx 500 Intrs per
1678 		 * second
1679 		 */
1680 		int count = (nic->config.bus_speed * 125)/4;
1681 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1682 	} else
1683 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1684 	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1685 		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1686 		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1687 		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1688 
1689 	writeq(val64, &bar0->rti_data1_mem);
1690 
1691 	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1692 		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1693 	if (nic->config.intr_type == MSI_X)
1694 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1695 			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1696 	else
1697 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1698 			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1699 	writeq(val64, &bar0->rti_data2_mem);
1700 
1701 	for (i = 0; i < config->rx_ring_num; i++) {
1702 		val64 = RTI_CMD_MEM_WE |
1703 			RTI_CMD_MEM_STROBE_NEW_CMD |
1704 			RTI_CMD_MEM_OFFSET(i);
1705 		writeq(val64, &bar0->rti_command_mem);
1706 
1707 		/*
1708 		 * Once the operation completes, the Strobe bit of the
1709 		 * command register will be reset. We poll for this
1710 		 * particular condition. We wait for a maximum of 500ms
1711 		 * for the operation to complete, if it's not complete
1712 		 * by then we return error.
1713 		 */
1714 		time = 0;
1715 		while (true) {
1716 			val64 = readq(&bar0->rti_command_mem);
1717 			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1718 				break;
1719 
1720 			if (time > 10) {
1721 				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1722 					  dev->name);
1723 				return -ENODEV;
1724 			}
1725 			time++;
1726 			msleep(50);
1727 		}
1728 	}
1729 
1730 	/*
1731 	 * Initializing proper values as Pause threshold into all
1732 	 * the 8 Queues on Rx side.
1733 	 */
1734 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1735 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1736 
1737 	/* Disable RMAC PAD STRIPPING */
1738 	add = &bar0->mac_cfg;
1739 	val64 = readq(&bar0->mac_cfg);
1740 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1741 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1742 	writel((u32) (val64), add);
1743 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1744 	writel((u32) (val64 >> 32), (add + 4));
1745 	val64 = readq(&bar0->mac_cfg);
1746 
1747 	/* Enable FCS stripping by adapter */
1748 	add = &bar0->mac_cfg;
1749 	val64 = readq(&bar0->mac_cfg);
1750 	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1751 	if (nic->device_type == XFRAME_II_DEVICE)
1752 		writeq(val64, &bar0->mac_cfg);
1753 	else {
1754 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1755 		writel((u32) (val64), add);
1756 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1757 		writel((u32) (val64 >> 32), (add + 4));
1758 	}
1759 
1760 	/*
1761 	 * Set the time value to be inserted in the pause frame
1762 	 * generated by xena.
1763 	 */
1764 	val64 = readq(&bar0->rmac_pause_cfg);
1765 	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1766 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1767 	writeq(val64, &bar0->rmac_pause_cfg);
1768 
1769 	/*
1770 	 * Set the Threshold Limit for Generating the pause frame
1771 	 * If the amount of data in any Queue exceeds ratio of
1772 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1773 	 * pause frame is generated
1774 	 */
1775 	val64 = 0;
1776 	for (i = 0; i < 4; i++) {
1777 		val64 |= (((u64)0xFF00 |
1778 			   nic->mac_control.mc_pause_threshold_q0q3)
1779 			  << (i * 2 * 8));
1780 	}
1781 	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1782 
1783 	val64 = 0;
1784 	for (i = 0; i < 4; i++) {
1785 		val64 |= (((u64)0xFF00 |
1786 			   nic->mac_control.mc_pause_threshold_q4q7)
1787 			  << (i * 2 * 8));
1788 	}
1789 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1790 
1791 	/*
1792 	 * TxDMA will stop Read request if the number of read split has
1793 	 * exceeded the limit pointed by shared_splits
1794 	 */
1795 	val64 = readq(&bar0->pic_control);
1796 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1797 	writeq(val64, &bar0->pic_control);
1798 
1799 	if (nic->config.bus_speed == 266) {
1800 		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1801 		writeq(0x0, &bar0->read_retry_delay);
1802 		writeq(0x0, &bar0->write_retry_delay);
1803 	}
1804 
1805 	/*
1806 	 * Programming the Herc to split every write transaction
1807 	 * that does not start on an ADB to reduce disconnects.
1808 	 */
1809 	if (nic->device_type == XFRAME_II_DEVICE) {
1810 		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1811 			MISC_LINK_STABILITY_PRD(3);
1812 		writeq(val64, &bar0->misc_control);
1813 		val64 = readq(&bar0->pic_control2);
1814 		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1815 		writeq(val64, &bar0->pic_control2);
1816 	}
1817 	if (strstr(nic->product_name, "CX4")) {
1818 		val64 = TMAC_AVG_IPG(0x17);
1819 		writeq(val64, &bar0->tmac_avg_ipg);
1820 	}
1821 
1822 	return SUCCESS;
1823 }
1824 #define LINK_UP_DOWN_INTERRUPT		1
1825 #define MAC_RMAC_ERR_TIMER		2
1826 
1827 static int s2io_link_fault_indication(struct s2io_nic *nic)
1828 {
1829 	if (nic->device_type == XFRAME_II_DEVICE)
1830 		return LINK_UP_DOWN_INTERRUPT;
1831 	else
1832 		return MAC_RMAC_ERR_TIMER;
1833 }
1834 
1835 /**
1836  *  do_s2io_write_bits -  update alarm bits in alarm register
1837  *  @value: alarm bits
1838  *  @flag: interrupt status
1839  *  @addr: address value
1840  *  Description: update alarm bits in alarm register
1841  *  Return Value:
1842  *  NONE.
1843  */
1844 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1845 {
1846 	u64 temp64;
1847 
1848 	temp64 = readq(addr);
1849 
1850 	if (flag == ENABLE_INTRS)
1851 		temp64 &= ~((u64)value);
1852 	else
1853 		temp64 |= ((u64)value);
1854 	writeq(temp64, addr);
1855 }
1856 
1857 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1858 {
1859 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1860 	register u64 gen_int_mask = 0;
1861 	u64 interruptible;
1862 
1863 	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1864 	if (mask & TX_DMA_INTR) {
1865 		gen_int_mask |= TXDMA_INT_M;
1866 
1867 		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1868 				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1869 				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1870 				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1871 
1872 		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1873 				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1874 				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1875 				   &bar0->pfc_err_mask);
1876 
1877 		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1878 				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1879 				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1880 
1881 		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1882 				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1883 				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1884 				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1885 				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1886 				   PCC_TXB_ECC_SG_ERR,
1887 				   flag, &bar0->pcc_err_mask);
1888 
1889 		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1890 				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1891 
1892 		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1893 				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1894 				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1895 				   flag, &bar0->lso_err_mask);
1896 
1897 		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1898 				   flag, &bar0->tpa_err_mask);
1899 
1900 		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1901 	}
1902 
1903 	if (mask & TX_MAC_INTR) {
1904 		gen_int_mask |= TXMAC_INT_M;
1905 		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1906 				   &bar0->mac_int_mask);
1907 		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1908 				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1909 				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1910 				   flag, &bar0->mac_tmac_err_mask);
1911 	}
1912 
1913 	if (mask & TX_XGXS_INTR) {
1914 		gen_int_mask |= TXXGXS_INT_M;
1915 		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1916 				   &bar0->xgxs_int_mask);
1917 		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1918 				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1919 				   flag, &bar0->xgxs_txgxs_err_mask);
1920 	}
1921 
1922 	if (mask & RX_DMA_INTR) {
1923 		gen_int_mask |= RXDMA_INT_M;
1924 		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1925 				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1926 				   flag, &bar0->rxdma_int_mask);
1927 		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1928 				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1929 				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1930 				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1931 		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1932 				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1933 				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1934 				   &bar0->prc_pcix_err_mask);
1935 		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1936 				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1937 				   &bar0->rpa_err_mask);
1938 		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1939 				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1940 				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1941 				   RDA_FRM_ECC_SG_ERR |
1942 				   RDA_MISC_ERR|RDA_PCIX_ERR,
1943 				   flag, &bar0->rda_err_mask);
1944 		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1945 				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1946 				   flag, &bar0->rti_err_mask);
1947 	}
1948 
1949 	if (mask & RX_MAC_INTR) {
1950 		gen_int_mask |= RXMAC_INT_M;
1951 		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1952 				   &bar0->mac_int_mask);
1953 		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1954 				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1955 				 RMAC_DOUBLE_ECC_ERR);
1956 		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1957 			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1958 		do_s2io_write_bits(interruptible,
1959 				   flag, &bar0->mac_rmac_err_mask);
1960 	}
1961 
1962 	if (mask & RX_XGXS_INTR) {
1963 		gen_int_mask |= RXXGXS_INT_M;
1964 		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1965 				   &bar0->xgxs_int_mask);
1966 		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1967 				   &bar0->xgxs_rxgxs_err_mask);
1968 	}
1969 
1970 	if (mask & MC_INTR) {
1971 		gen_int_mask |= MC_INT_M;
1972 		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1973 				   flag, &bar0->mc_int_mask);
1974 		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1975 				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1976 				   &bar0->mc_err_mask);
1977 	}
1978 	nic->general_int_mask = gen_int_mask;
1979 
1980 	/* Remove this line when alarm interrupts are enabled */
1981 	nic->general_int_mask = 0;
1982 }
1983 
1984 /**
1985  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1986  *  @nic: device private variable,
1987  *  @mask: A mask indicating which Intr block must be modified and,
1988  *  @flag: A flag indicating whether to enable or disable the Intrs.
1989  *  Description: This function will either disable or enable the interrupts
1990  *  depending on the flag argument. The mask argument can be used to
1991  *  enable/disable any Intr block.
1992  *  Return Value: NONE.
1993  */
1994 
1995 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1996 {
1997 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1998 	register u64 temp64 = 0, intr_mask = 0;
1999 
2000 	intr_mask = nic->general_int_mask;
2001 
2002 	/*  Top level interrupt classification */
2003 	/*  PIC Interrupts */
2004 	if (mask & TX_PIC_INTR) {
2005 		/*  Enable PIC Intrs in the general intr mask register */
2006 		intr_mask |= TXPIC_INT_M;
2007 		if (flag == ENABLE_INTRS) {
2008 			/*
2009 			 * If Hercules adapter enable GPIO otherwise
2010 			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2011 			 * interrupts for now.
2012 			 * TODO
2013 			 */
2014 			if (s2io_link_fault_indication(nic) ==
2015 			    LINK_UP_DOWN_INTERRUPT) {
2016 				do_s2io_write_bits(PIC_INT_GPIO, flag,
2017 						   &bar0->pic_int_mask);
2018 				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2019 						   &bar0->gpio_int_mask);
2020 			} else
2021 				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2022 		} else if (flag == DISABLE_INTRS) {
2023 			/*
2024 			 * Disable PIC Intrs in the general
2025 			 * intr mask register
2026 			 */
2027 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2028 		}
2029 	}
2030 
2031 	/*  Tx traffic interrupts */
2032 	if (mask & TX_TRAFFIC_INTR) {
2033 		intr_mask |= TXTRAFFIC_INT_M;
2034 		if (flag == ENABLE_INTRS) {
2035 			/*
2036 			 * Enable all the Tx side interrupts
2037 			 * writing 0 Enables all 64 TX interrupt levels
2038 			 */
2039 			writeq(0x0, &bar0->tx_traffic_mask);
2040 		} else if (flag == DISABLE_INTRS) {
2041 			/*
2042 			 * Disable Tx Traffic Intrs in the general intr mask
2043 			 * register.
2044 			 */
2045 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2046 		}
2047 	}
2048 
2049 	/*  Rx traffic interrupts */
2050 	if (mask & RX_TRAFFIC_INTR) {
2051 		intr_mask |= RXTRAFFIC_INT_M;
2052 		if (flag == ENABLE_INTRS) {
2053 			/* writing 0 Enables all 8 RX interrupt levels */
2054 			writeq(0x0, &bar0->rx_traffic_mask);
2055 		} else if (flag == DISABLE_INTRS) {
2056 			/*
2057 			 * Disable Rx Traffic Intrs in the general intr mask
2058 			 * register.
2059 			 */
2060 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2061 		}
2062 	}
2063 
2064 	temp64 = readq(&bar0->general_int_mask);
2065 	if (flag == ENABLE_INTRS)
2066 		temp64 &= ~((u64)intr_mask);
2067 	else
2068 		temp64 = DISABLE_ALL_INTRS;
2069 	writeq(temp64, &bar0->general_int_mask);
2070 
2071 	nic->general_int_mask = readq(&bar0->general_int_mask);
2072 }
2073 
2074 /**
2075  *  verify_pcc_quiescent- Checks for PCC quiescent state
2076  *  Return: 1 If PCC is quiescence
2077  *          0 If PCC is not quiescence
2078  */
2079 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2080 {
2081 	int ret = 0, herc;
2082 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2083 	u64 val64 = readq(&bar0->adapter_status);
2084 
2085 	herc = (sp->device_type == XFRAME_II_DEVICE);
2086 
2087 	if (flag == false) {
2088 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2089 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2090 				ret = 1;
2091 		} else {
2092 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2093 				ret = 1;
2094 		}
2095 	} else {
2096 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2097 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2098 			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2099 				ret = 1;
2100 		} else {
2101 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2102 			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2103 				ret = 1;
2104 		}
2105 	}
2106 
2107 	return ret;
2108 }
2109 /**
2110  *  verify_xena_quiescence - Checks whether the H/W is ready
2111  *  Description: Returns whether the H/W is ready to go or not. Depending
2112  *  on whether adapter enable bit was written or not the comparison
2113  *  differs and the calling function passes the input argument flag to
2114  *  indicate this.
2115  *  Return: 1 If xena is quiescence
2116  *          0 If Xena is not quiescence
2117  */
2118 
2119 static int verify_xena_quiescence(struct s2io_nic *sp)
2120 {
2121 	int  mode;
2122 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2123 	u64 val64 = readq(&bar0->adapter_status);
2124 	mode = s2io_verify_pci_mode(sp);
2125 
2126 	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2127 		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2128 		return 0;
2129 	}
2130 	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2131 		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2132 		return 0;
2133 	}
2134 	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2135 		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2136 		return 0;
2137 	}
2138 	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2139 		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2140 		return 0;
2141 	}
2142 	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2143 		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2144 		return 0;
2145 	}
2146 	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2147 		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2148 		return 0;
2149 	}
2150 	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2151 		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2152 		return 0;
2153 	}
2154 	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2155 		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2156 		return 0;
2157 	}
2158 
2159 	/*
2160 	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2161 	 * the the P_PLL_LOCK bit in the adapter_status register will
2162 	 * not be asserted.
2163 	 */
2164 	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2165 	    sp->device_type == XFRAME_II_DEVICE &&
2166 	    mode != PCI_MODE_PCI_33) {
2167 		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2168 		return 0;
2169 	}
2170 	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2171 	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2172 		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2173 		return 0;
2174 	}
2175 	return 1;
2176 }
2177 
2178 /**
2179  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2180  * @sp: Pointer to device specifc structure
2181  * Description :
2182  * New procedure to clear mac address reading  problems on Alpha platforms
2183  *
2184  */
2185 
2186 static void fix_mac_address(struct s2io_nic *sp)
2187 {
2188 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2189 	int i = 0;
2190 
2191 	while (fix_mac[i] != END_SIGN) {
2192 		writeq(fix_mac[i++], &bar0->gpio_control);
2193 		udelay(10);
2194 		(void) readq(&bar0->gpio_control);
2195 	}
2196 }
2197 
2198 /**
2199  *  start_nic - Turns the device on
2200  *  @nic : device private variable.
2201  *  Description:
2202  *  This function actually turns the device on. Before this  function is
2203  *  called,all Registers are configured from their reset states
2204  *  and shared memory is allocated but the NIC is still quiescent. On
2205  *  calling this function, the device interrupts are cleared and the NIC is
2206  *  literally switched on by writing into the adapter control register.
2207  *  Return Value:
2208  *  SUCCESS on success and -1 on failure.
2209  */
2210 
2211 static int start_nic(struct s2io_nic *nic)
2212 {
2213 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2214 	struct net_device *dev = nic->dev;
2215 	register u64 val64 = 0;
2216 	u16 subid, i;
2217 	struct config_param *config = &nic->config;
2218 	struct mac_info *mac_control = &nic->mac_control;
2219 
2220 	/*  PRC Initialization and configuration */
2221 	for (i = 0; i < config->rx_ring_num; i++) {
2222 		struct ring_info *ring = &mac_control->rings[i];
2223 
2224 		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2225 		       &bar0->prc_rxd0_n[i]);
2226 
2227 		val64 = readq(&bar0->prc_ctrl_n[i]);
2228 		if (nic->rxd_mode == RXD_MODE_1)
2229 			val64 |= PRC_CTRL_RC_ENABLED;
2230 		else
2231 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2232 		if (nic->device_type == XFRAME_II_DEVICE)
2233 			val64 |= PRC_CTRL_GROUP_READS;
2234 		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2235 		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2236 		writeq(val64, &bar0->prc_ctrl_n[i]);
2237 	}
2238 
2239 	if (nic->rxd_mode == RXD_MODE_3B) {
2240 		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2241 		val64 = readq(&bar0->rx_pa_cfg);
2242 		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2243 		writeq(val64, &bar0->rx_pa_cfg);
2244 	}
2245 
2246 	if (vlan_tag_strip == 0) {
2247 		val64 = readq(&bar0->rx_pa_cfg);
2248 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2249 		writeq(val64, &bar0->rx_pa_cfg);
2250 		nic->vlan_strip_flag = 0;
2251 	}
2252 
2253 	/*
2254 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2255 	 * for around 100ms, which is approximately the time required
2256 	 * for the device to be ready for operation.
2257 	 */
2258 	val64 = readq(&bar0->mc_rldram_mrs);
2259 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2260 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2261 	val64 = readq(&bar0->mc_rldram_mrs);
2262 
2263 	msleep(100);	/* Delay by around 100 ms. */
2264 
2265 	/* Enabling ECC Protection. */
2266 	val64 = readq(&bar0->adapter_control);
2267 	val64 &= ~ADAPTER_ECC_EN;
2268 	writeq(val64, &bar0->adapter_control);
2269 
2270 	/*
2271 	 * Verify if the device is ready to be enabled, if so enable
2272 	 * it.
2273 	 */
2274 	val64 = readq(&bar0->adapter_status);
2275 	if (!verify_xena_quiescence(nic)) {
2276 		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2277 			  "Adapter status reads: 0x%llx\n",
2278 			  dev->name, (unsigned long long)val64);
2279 		return FAILURE;
2280 	}
2281 
2282 	/*
2283 	 * With some switches, link might be already up at this point.
2284 	 * Because of this weird behavior, when we enable laser,
2285 	 * we may not get link. We need to handle this. We cannot
2286 	 * figure out which switch is misbehaving. So we are forced to
2287 	 * make a global change.
2288 	 */
2289 
2290 	/* Enabling Laser. */
2291 	val64 = readq(&bar0->adapter_control);
2292 	val64 |= ADAPTER_EOI_TX_ON;
2293 	writeq(val64, &bar0->adapter_control);
2294 
2295 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2296 		/*
2297 		 * Dont see link state interrupts initially on some switches,
2298 		 * so directly scheduling the link state task here.
2299 		 */
2300 		schedule_work(&nic->set_link_task);
2301 	}
2302 	/* SXE-002: Initialize link and activity LED */
2303 	subid = nic->pdev->subsystem_device;
2304 	if (((subid & 0xFF) >= 0x07) &&
2305 	    (nic->device_type == XFRAME_I_DEVICE)) {
2306 		val64 = readq(&bar0->gpio_control);
2307 		val64 |= 0x0000800000000000ULL;
2308 		writeq(val64, &bar0->gpio_control);
2309 		val64 = 0x0411040400000000ULL;
2310 		writeq(val64, (void __iomem *)bar0 + 0x2700);
2311 	}
2312 
2313 	return SUCCESS;
2314 }
2315 /**
2316  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2317  */
2318 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2319 					struct TxD *txdlp, int get_off)
2320 {
2321 	struct s2io_nic *nic = fifo_data->nic;
2322 	struct sk_buff *skb;
2323 	struct TxD *txds;
2324 	u16 j, frg_cnt;
2325 
2326 	txds = txdlp;
2327 	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2328 		pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2329 				 sizeof(u64), PCI_DMA_TODEVICE);
2330 		txds++;
2331 	}
2332 
2333 	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2334 	if (!skb) {
2335 		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2336 		return NULL;
2337 	}
2338 	pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2339 			 skb_headlen(skb), PCI_DMA_TODEVICE);
2340 	frg_cnt = skb_shinfo(skb)->nr_frags;
2341 	if (frg_cnt) {
2342 		txds++;
2343 		for (j = 0; j < frg_cnt; j++, txds++) {
2344 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2345 			if (!txds->Buffer_Pointer)
2346 				break;
2347 			pci_unmap_page(nic->pdev,
2348 				       (dma_addr_t)txds->Buffer_Pointer,
2349 				       skb_frag_size(frag), PCI_DMA_TODEVICE);
2350 		}
2351 	}
2352 	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2353 	return skb;
2354 }
2355 
2356 /**
2357  *  free_tx_buffers - Free all queued Tx buffers
2358  *  @nic : device private variable.
2359  *  Description:
2360  *  Free all queued Tx buffers.
2361  *  Return Value: void
2362  */
2363 
2364 static void free_tx_buffers(struct s2io_nic *nic)
2365 {
2366 	struct net_device *dev = nic->dev;
2367 	struct sk_buff *skb;
2368 	struct TxD *txdp;
2369 	int i, j;
2370 	int cnt = 0;
2371 	struct config_param *config = &nic->config;
2372 	struct mac_info *mac_control = &nic->mac_control;
2373 	struct stat_block *stats = mac_control->stats_info;
2374 	struct swStat *swstats = &stats->sw_stat;
2375 
2376 	for (i = 0; i < config->tx_fifo_num; i++) {
2377 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2378 		struct fifo_info *fifo = &mac_control->fifos[i];
2379 		unsigned long flags;
2380 
2381 		spin_lock_irqsave(&fifo->tx_lock, flags);
2382 		for (j = 0; j < tx_cfg->fifo_len; j++) {
2383 			txdp = fifo->list_info[j].list_virt_addr;
2384 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2385 			if (skb) {
2386 				swstats->mem_freed += skb->truesize;
2387 				dev_kfree_skb(skb);
2388 				cnt++;
2389 			}
2390 		}
2391 		DBG_PRINT(INTR_DBG,
2392 			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2393 			  dev->name, cnt, i);
2394 		fifo->tx_curr_get_info.offset = 0;
2395 		fifo->tx_curr_put_info.offset = 0;
2396 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2397 	}
2398 }
2399 
2400 /**
2401  *   stop_nic -  To stop the nic
2402  *   @nic ; device private variable.
2403  *   Description:
2404  *   This function does exactly the opposite of what the start_nic()
2405  *   function does. This function is called to stop the device.
2406  *   Return Value:
2407  *   void.
2408  */
2409 
2410 static void stop_nic(struct s2io_nic *nic)
2411 {
2412 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2413 	register u64 val64 = 0;
2414 	u16 interruptible;
2415 
2416 	/*  Disable all interrupts */
2417 	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2418 	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2419 	interruptible |= TX_PIC_INTR;
2420 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2421 
2422 	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2423 	val64 = readq(&bar0->adapter_control);
2424 	val64 &= ~(ADAPTER_CNTL_EN);
2425 	writeq(val64, &bar0->adapter_control);
2426 }
2427 
2428 /**
2429  *  fill_rx_buffers - Allocates the Rx side skbs
2430  *  @ring_info: per ring structure
2431  *  @from_card_up: If this is true, we will map the buffer to get
2432  *     the dma address for buf0 and buf1 to give it to the card.
2433  *     Else we will sync the already mapped buffer to give it to the card.
2434  *  Description:
2435  *  The function allocates Rx side skbs and puts the physical
2436  *  address of these buffers into the RxD buffer pointers, so that the NIC
2437  *  can DMA the received frame into these locations.
2438  *  The NIC supports 3 receive modes, viz
2439  *  1. single buffer,
2440  *  2. three buffer and
2441  *  3. Five buffer modes.
2442  *  Each mode defines how many fragments the received frame will be split
2443  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2444  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2445  *  is split into 3 fragments. As of now only single buffer mode is
2446  *  supported.
2447  *   Return Value:
2448  *  SUCCESS on success or an appropriate -ve value on failure.
2449  */
2450 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2451 			   int from_card_up)
2452 {
2453 	struct sk_buff *skb;
2454 	struct RxD_t *rxdp;
2455 	int off, size, block_no, block_no1;
2456 	u32 alloc_tab = 0;
2457 	u32 alloc_cnt;
2458 	u64 tmp;
2459 	struct buffAdd *ba;
2460 	struct RxD_t *first_rxdp = NULL;
2461 	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2462 	struct RxD1 *rxdp1;
2463 	struct RxD3 *rxdp3;
2464 	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2465 
2466 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2467 
2468 	block_no1 = ring->rx_curr_get_info.block_index;
2469 	while (alloc_tab < alloc_cnt) {
2470 		block_no = ring->rx_curr_put_info.block_index;
2471 
2472 		off = ring->rx_curr_put_info.offset;
2473 
2474 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2475 
2476 		if ((block_no == block_no1) &&
2477 		    (off == ring->rx_curr_get_info.offset) &&
2478 		    (rxdp->Host_Control)) {
2479 			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2480 				  ring->dev->name);
2481 			goto end;
2482 		}
2483 		if (off && (off == ring->rxd_count)) {
2484 			ring->rx_curr_put_info.block_index++;
2485 			if (ring->rx_curr_put_info.block_index ==
2486 			    ring->block_count)
2487 				ring->rx_curr_put_info.block_index = 0;
2488 			block_no = ring->rx_curr_put_info.block_index;
2489 			off = 0;
2490 			ring->rx_curr_put_info.offset = off;
2491 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2492 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2493 				  ring->dev->name, rxdp);
2494 
2495 		}
2496 
2497 		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2498 		    ((ring->rxd_mode == RXD_MODE_3B) &&
2499 		     (rxdp->Control_2 & s2BIT(0)))) {
2500 			ring->rx_curr_put_info.offset = off;
2501 			goto end;
2502 		}
2503 		/* calculate size of skb based on ring mode */
2504 		size = ring->mtu +
2505 			HEADER_ETHERNET_II_802_3_SIZE +
2506 			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2507 		if (ring->rxd_mode == RXD_MODE_1)
2508 			size += NET_IP_ALIGN;
2509 		else
2510 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2511 
2512 		/* allocate skb */
2513 		skb = netdev_alloc_skb(nic->dev, size);
2514 		if (!skb) {
2515 			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2516 				  ring->dev->name);
2517 			if (first_rxdp) {
2518 				dma_wmb();
2519 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2520 			}
2521 			swstats->mem_alloc_fail_cnt++;
2522 
2523 			return -ENOMEM ;
2524 		}
2525 		swstats->mem_allocated += skb->truesize;
2526 
2527 		if (ring->rxd_mode == RXD_MODE_1) {
2528 			/* 1 buffer mode - normal operation mode */
2529 			rxdp1 = (struct RxD1 *)rxdp;
2530 			memset(rxdp, 0, sizeof(struct RxD1));
2531 			skb_reserve(skb, NET_IP_ALIGN);
2532 			rxdp1->Buffer0_ptr =
2533 				pci_map_single(ring->pdev, skb->data,
2534 					       size - NET_IP_ALIGN,
2535 					       PCI_DMA_FROMDEVICE);
2536 			if (pci_dma_mapping_error(nic->pdev,
2537 						  rxdp1->Buffer0_ptr))
2538 				goto pci_map_failed;
2539 
2540 			rxdp->Control_2 =
2541 				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2542 			rxdp->Host_Control = (unsigned long)skb;
2543 		} else if (ring->rxd_mode == RXD_MODE_3B) {
2544 			/*
2545 			 * 2 buffer mode -
2546 			 * 2 buffer mode provides 128
2547 			 * byte aligned receive buffers.
2548 			 */
2549 
2550 			rxdp3 = (struct RxD3 *)rxdp;
2551 			/* save buffer pointers to avoid frequent dma mapping */
2552 			Buffer0_ptr = rxdp3->Buffer0_ptr;
2553 			Buffer1_ptr = rxdp3->Buffer1_ptr;
2554 			memset(rxdp, 0, sizeof(struct RxD3));
2555 			/* restore the buffer pointers for dma sync*/
2556 			rxdp3->Buffer0_ptr = Buffer0_ptr;
2557 			rxdp3->Buffer1_ptr = Buffer1_ptr;
2558 
2559 			ba = &ring->ba[block_no][off];
2560 			skb_reserve(skb, BUF0_LEN);
2561 			tmp = (u64)(unsigned long)skb->data;
2562 			tmp += ALIGN_SIZE;
2563 			tmp &= ~ALIGN_SIZE;
2564 			skb->data = (void *) (unsigned long)tmp;
2565 			skb_reset_tail_pointer(skb);
2566 
2567 			if (from_card_up) {
2568 				rxdp3->Buffer0_ptr =
2569 					pci_map_single(ring->pdev, ba->ba_0,
2570 						       BUF0_LEN,
2571 						       PCI_DMA_FROMDEVICE);
2572 				if (pci_dma_mapping_error(nic->pdev,
2573 							  rxdp3->Buffer0_ptr))
2574 					goto pci_map_failed;
2575 			} else
2576 				pci_dma_sync_single_for_device(ring->pdev,
2577 							       (dma_addr_t)rxdp3->Buffer0_ptr,
2578 							       BUF0_LEN,
2579 							       PCI_DMA_FROMDEVICE);
2580 
2581 			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2582 			if (ring->rxd_mode == RXD_MODE_3B) {
2583 				/* Two buffer mode */
2584 
2585 				/*
2586 				 * Buffer2 will have L3/L4 header plus
2587 				 * L4 payload
2588 				 */
2589 				rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2590 								    skb->data,
2591 								    ring->mtu + 4,
2592 								    PCI_DMA_FROMDEVICE);
2593 
2594 				if (pci_dma_mapping_error(nic->pdev,
2595 							  rxdp3->Buffer2_ptr))
2596 					goto pci_map_failed;
2597 
2598 				if (from_card_up) {
2599 					rxdp3->Buffer1_ptr =
2600 						pci_map_single(ring->pdev,
2601 							       ba->ba_1,
2602 							       BUF1_LEN,
2603 							       PCI_DMA_FROMDEVICE);
2604 
2605 					if (pci_dma_mapping_error(nic->pdev,
2606 								  rxdp3->Buffer1_ptr)) {
2607 						pci_unmap_single(ring->pdev,
2608 								 (dma_addr_t)(unsigned long)
2609 								 skb->data,
2610 								 ring->mtu + 4,
2611 								 PCI_DMA_FROMDEVICE);
2612 						goto pci_map_failed;
2613 					}
2614 				}
2615 				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2616 				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2617 					(ring->mtu + 4);
2618 			}
2619 			rxdp->Control_2 |= s2BIT(0);
2620 			rxdp->Host_Control = (unsigned long) (skb);
2621 		}
2622 		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2623 			rxdp->Control_1 |= RXD_OWN_XENA;
2624 		off++;
2625 		if (off == (ring->rxd_count + 1))
2626 			off = 0;
2627 		ring->rx_curr_put_info.offset = off;
2628 
2629 		rxdp->Control_2 |= SET_RXD_MARKER;
2630 		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2631 			if (first_rxdp) {
2632 				dma_wmb();
2633 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2634 			}
2635 			first_rxdp = rxdp;
2636 		}
2637 		ring->rx_bufs_left += 1;
2638 		alloc_tab++;
2639 	}
2640 
2641 end:
2642 	/* Transfer ownership of first descriptor to adapter just before
2643 	 * exiting. Before that, use memory barrier so that ownership
2644 	 * and other fields are seen by adapter correctly.
2645 	 */
2646 	if (first_rxdp) {
2647 		dma_wmb();
2648 		first_rxdp->Control_1 |= RXD_OWN_XENA;
2649 	}
2650 
2651 	return SUCCESS;
2652 
2653 pci_map_failed:
2654 	swstats->pci_map_fail_cnt++;
2655 	swstats->mem_freed += skb->truesize;
2656 	dev_kfree_skb_irq(skb);
2657 	return -ENOMEM;
2658 }
2659 
2660 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2661 {
2662 	struct net_device *dev = sp->dev;
2663 	int j;
2664 	struct sk_buff *skb;
2665 	struct RxD_t *rxdp;
2666 	struct RxD1 *rxdp1;
2667 	struct RxD3 *rxdp3;
2668 	struct mac_info *mac_control = &sp->mac_control;
2669 	struct stat_block *stats = mac_control->stats_info;
2670 	struct swStat *swstats = &stats->sw_stat;
2671 
2672 	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2673 		rxdp = mac_control->rings[ring_no].
2674 			rx_blocks[blk].rxds[j].virt_addr;
2675 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2676 		if (!skb)
2677 			continue;
2678 		if (sp->rxd_mode == RXD_MODE_1) {
2679 			rxdp1 = (struct RxD1 *)rxdp;
2680 			pci_unmap_single(sp->pdev,
2681 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2682 					 dev->mtu +
2683 					 HEADER_ETHERNET_II_802_3_SIZE +
2684 					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2685 					 PCI_DMA_FROMDEVICE);
2686 			memset(rxdp, 0, sizeof(struct RxD1));
2687 		} else if (sp->rxd_mode == RXD_MODE_3B) {
2688 			rxdp3 = (struct RxD3 *)rxdp;
2689 			pci_unmap_single(sp->pdev,
2690 					 (dma_addr_t)rxdp3->Buffer0_ptr,
2691 					 BUF0_LEN,
2692 					 PCI_DMA_FROMDEVICE);
2693 			pci_unmap_single(sp->pdev,
2694 					 (dma_addr_t)rxdp3->Buffer1_ptr,
2695 					 BUF1_LEN,
2696 					 PCI_DMA_FROMDEVICE);
2697 			pci_unmap_single(sp->pdev,
2698 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2699 					 dev->mtu + 4,
2700 					 PCI_DMA_FROMDEVICE);
2701 			memset(rxdp, 0, sizeof(struct RxD3));
2702 		}
2703 		swstats->mem_freed += skb->truesize;
2704 		dev_kfree_skb(skb);
2705 		mac_control->rings[ring_no].rx_bufs_left -= 1;
2706 	}
2707 }
2708 
2709 /**
2710  *  free_rx_buffers - Frees all Rx buffers
2711  *  @sp: device private variable.
2712  *  Description:
2713  *  This function will free all Rx buffers allocated by host.
2714  *  Return Value:
2715  *  NONE.
2716  */
2717 
2718 static void free_rx_buffers(struct s2io_nic *sp)
2719 {
2720 	struct net_device *dev = sp->dev;
2721 	int i, blk = 0, buf_cnt = 0;
2722 	struct config_param *config = &sp->config;
2723 	struct mac_info *mac_control = &sp->mac_control;
2724 
2725 	for (i = 0; i < config->rx_ring_num; i++) {
2726 		struct ring_info *ring = &mac_control->rings[i];
2727 
2728 		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2729 			free_rxd_blk(sp, i, blk);
2730 
2731 		ring->rx_curr_put_info.block_index = 0;
2732 		ring->rx_curr_get_info.block_index = 0;
2733 		ring->rx_curr_put_info.offset = 0;
2734 		ring->rx_curr_get_info.offset = 0;
2735 		ring->rx_bufs_left = 0;
2736 		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2737 			  dev->name, buf_cnt, i);
2738 	}
2739 }
2740 
2741 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2742 {
2743 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2744 		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2745 			  ring->dev->name);
2746 	}
2747 	return 0;
2748 }
2749 
2750 /**
2751  * s2io_poll - Rx interrupt handler for NAPI support
2752  * @napi : pointer to the napi structure.
2753  * @budget : The number of packets that were budgeted to be processed
2754  * during  one pass through the 'Poll" function.
2755  * Description:
2756  * Comes into picture only if NAPI support has been incorporated. It does
2757  * the same thing that rx_intr_handler does, but not in a interrupt context
2758  * also It will process only a given number of packets.
2759  * Return value:
2760  * 0 on success and 1 if there are No Rx packets to be processed.
2761  */
2762 
2763 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2764 {
2765 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2766 	struct net_device *dev = ring->dev;
2767 	int pkts_processed = 0;
2768 	u8 __iomem *addr = NULL;
2769 	u8 val8 = 0;
2770 	struct s2io_nic *nic = netdev_priv(dev);
2771 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2772 	int budget_org = budget;
2773 
2774 	if (unlikely(!is_s2io_card_up(nic)))
2775 		return 0;
2776 
2777 	pkts_processed = rx_intr_handler(ring, budget);
2778 	s2io_chk_rx_buffers(nic, ring);
2779 
2780 	if (pkts_processed < budget_org) {
2781 		napi_complete_done(napi, pkts_processed);
2782 		/*Re Enable MSI-Rx Vector*/
2783 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2784 		addr += 7 - ring->ring_no;
2785 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2786 		writeb(val8, addr);
2787 		val8 = readb(addr);
2788 	}
2789 	return pkts_processed;
2790 }
2791 
2792 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2793 {
2794 	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2795 	int pkts_processed = 0;
2796 	int ring_pkts_processed, i;
2797 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2798 	int budget_org = budget;
2799 	struct config_param *config = &nic->config;
2800 	struct mac_info *mac_control = &nic->mac_control;
2801 
2802 	if (unlikely(!is_s2io_card_up(nic)))
2803 		return 0;
2804 
2805 	for (i = 0; i < config->rx_ring_num; i++) {
2806 		struct ring_info *ring = &mac_control->rings[i];
2807 		ring_pkts_processed = rx_intr_handler(ring, budget);
2808 		s2io_chk_rx_buffers(nic, ring);
2809 		pkts_processed += ring_pkts_processed;
2810 		budget -= ring_pkts_processed;
2811 		if (budget <= 0)
2812 			break;
2813 	}
2814 	if (pkts_processed < budget_org) {
2815 		napi_complete_done(napi, pkts_processed);
2816 		/* Re enable the Rx interrupts for the ring */
2817 		writeq(0, &bar0->rx_traffic_mask);
2818 		readl(&bar0->rx_traffic_mask);
2819 	}
2820 	return pkts_processed;
2821 }
2822 
2823 #ifdef CONFIG_NET_POLL_CONTROLLER
2824 /**
2825  * s2io_netpoll - netpoll event handler entry point
2826  * @dev : pointer to the device structure.
2827  * Description:
2828  * 	This function will be called by upper layer to check for events on the
2829  * interface in situations where interrupts are disabled. It is used for
2830  * specific in-kernel networking tasks, such as remote consoles and kernel
2831  * debugging over the network (example netdump in RedHat).
2832  */
2833 static void s2io_netpoll(struct net_device *dev)
2834 {
2835 	struct s2io_nic *nic = netdev_priv(dev);
2836 	const int irq = nic->pdev->irq;
2837 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2838 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2839 	int i;
2840 	struct config_param *config = &nic->config;
2841 	struct mac_info *mac_control = &nic->mac_control;
2842 
2843 	if (pci_channel_offline(nic->pdev))
2844 		return;
2845 
2846 	disable_irq(irq);
2847 
2848 	writeq(val64, &bar0->rx_traffic_int);
2849 	writeq(val64, &bar0->tx_traffic_int);
2850 
2851 	/* we need to free up the transmitted skbufs or else netpoll will
2852 	 * run out of skbs and will fail and eventually netpoll application such
2853 	 * as netdump will fail.
2854 	 */
2855 	for (i = 0; i < config->tx_fifo_num; i++)
2856 		tx_intr_handler(&mac_control->fifos[i]);
2857 
2858 	/* check for received packet and indicate up to network */
2859 	for (i = 0; i < config->rx_ring_num; i++) {
2860 		struct ring_info *ring = &mac_control->rings[i];
2861 
2862 		rx_intr_handler(ring, 0);
2863 	}
2864 
2865 	for (i = 0; i < config->rx_ring_num; i++) {
2866 		struct ring_info *ring = &mac_control->rings[i];
2867 
2868 		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2869 			DBG_PRINT(INFO_DBG,
2870 				  "%s: Out of memory in Rx Netpoll!!\n",
2871 				  dev->name);
2872 			break;
2873 		}
2874 	}
2875 	enable_irq(irq);
2876 }
2877 #endif
2878 
2879 /**
2880  *  rx_intr_handler - Rx interrupt handler
2881  *  @ring_info: per ring structure.
2882  *  @budget: budget for napi processing.
2883  *  Description:
2884  *  If the interrupt is because of a received frame or if the
2885  *  receive ring contains fresh as yet un-processed frames,this function is
2886  *  called. It picks out the RxD at which place the last Rx processing had
2887  *  stopped and sends the skb to the OSM's Rx handler and then increments
2888  *  the offset.
2889  *  Return Value:
2890  *  No. of napi packets processed.
2891  */
2892 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2893 {
2894 	int get_block, put_block;
2895 	struct rx_curr_get_info get_info, put_info;
2896 	struct RxD_t *rxdp;
2897 	struct sk_buff *skb;
2898 	int pkt_cnt = 0, napi_pkts = 0;
2899 	int i;
2900 	struct RxD1 *rxdp1;
2901 	struct RxD3 *rxdp3;
2902 
2903 	if (budget <= 0)
2904 		return napi_pkts;
2905 
2906 	get_info = ring_data->rx_curr_get_info;
2907 	get_block = get_info.block_index;
2908 	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2909 	put_block = put_info.block_index;
2910 	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2911 
2912 	while (RXD_IS_UP2DT(rxdp)) {
2913 		/*
2914 		 * If your are next to put index then it's
2915 		 * FIFO full condition
2916 		 */
2917 		if ((get_block == put_block) &&
2918 		    (get_info.offset + 1) == put_info.offset) {
2919 			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2920 				  ring_data->dev->name);
2921 			break;
2922 		}
2923 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2924 		if (skb == NULL) {
2925 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2926 				  ring_data->dev->name);
2927 			return 0;
2928 		}
2929 		if (ring_data->rxd_mode == RXD_MODE_1) {
2930 			rxdp1 = (struct RxD1 *)rxdp;
2931 			pci_unmap_single(ring_data->pdev, (dma_addr_t)
2932 					 rxdp1->Buffer0_ptr,
2933 					 ring_data->mtu +
2934 					 HEADER_ETHERNET_II_802_3_SIZE +
2935 					 HEADER_802_2_SIZE +
2936 					 HEADER_SNAP_SIZE,
2937 					 PCI_DMA_FROMDEVICE);
2938 		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2939 			rxdp3 = (struct RxD3 *)rxdp;
2940 			pci_dma_sync_single_for_cpu(ring_data->pdev,
2941 						    (dma_addr_t)rxdp3->Buffer0_ptr,
2942 						    BUF0_LEN,
2943 						    PCI_DMA_FROMDEVICE);
2944 			pci_unmap_single(ring_data->pdev,
2945 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2946 					 ring_data->mtu + 4,
2947 					 PCI_DMA_FROMDEVICE);
2948 		}
2949 		prefetch(skb->data);
2950 		rx_osm_handler(ring_data, rxdp);
2951 		get_info.offset++;
2952 		ring_data->rx_curr_get_info.offset = get_info.offset;
2953 		rxdp = ring_data->rx_blocks[get_block].
2954 			rxds[get_info.offset].virt_addr;
2955 		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2956 			get_info.offset = 0;
2957 			ring_data->rx_curr_get_info.offset = get_info.offset;
2958 			get_block++;
2959 			if (get_block == ring_data->block_count)
2960 				get_block = 0;
2961 			ring_data->rx_curr_get_info.block_index = get_block;
2962 			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2963 		}
2964 
2965 		if (ring_data->nic->config.napi) {
2966 			budget--;
2967 			napi_pkts++;
2968 			if (!budget)
2969 				break;
2970 		}
2971 		pkt_cnt++;
2972 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2973 			break;
2974 	}
2975 	if (ring_data->lro) {
2976 		/* Clear all LRO sessions before exiting */
2977 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2978 			struct lro *lro = &ring_data->lro0_n[i];
2979 			if (lro->in_use) {
2980 				update_L3L4_header(ring_data->nic, lro);
2981 				queue_rx_frame(lro->parent, lro->vlan_tag);
2982 				clear_lro_session(lro);
2983 			}
2984 		}
2985 	}
2986 	return napi_pkts;
2987 }
2988 
2989 /**
2990  *  tx_intr_handler - Transmit interrupt handler
2991  *  @nic : device private variable
2992  *  Description:
2993  *  If an interrupt was raised to indicate DMA complete of the
2994  *  Tx packet, this function is called. It identifies the last TxD
2995  *  whose buffer was freed and frees all skbs whose data have already
2996  *  DMA'ed into the NICs internal memory.
2997  *  Return Value:
2998  *  NONE
2999  */
3000 
3001 static void tx_intr_handler(struct fifo_info *fifo_data)
3002 {
3003 	struct s2io_nic *nic = fifo_data->nic;
3004 	struct tx_curr_get_info get_info, put_info;
3005 	struct sk_buff *skb = NULL;
3006 	struct TxD *txdlp;
3007 	int pkt_cnt = 0;
3008 	unsigned long flags = 0;
3009 	u8 err_mask;
3010 	struct stat_block *stats = nic->mac_control.stats_info;
3011 	struct swStat *swstats = &stats->sw_stat;
3012 
3013 	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3014 		return;
3015 
3016 	get_info = fifo_data->tx_curr_get_info;
3017 	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3018 	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3019 	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3020 	       (get_info.offset != put_info.offset) &&
3021 	       (txdlp->Host_Control)) {
3022 		/* Check for TxD errors */
3023 		if (txdlp->Control_1 & TXD_T_CODE) {
3024 			unsigned long long err;
3025 			err = txdlp->Control_1 & TXD_T_CODE;
3026 			if (err & 0x1) {
3027 				swstats->parity_err_cnt++;
3028 			}
3029 
3030 			/* update t_code statistics */
3031 			err_mask = err >> 48;
3032 			switch (err_mask) {
3033 			case 2:
3034 				swstats->tx_buf_abort_cnt++;
3035 				break;
3036 
3037 			case 3:
3038 				swstats->tx_desc_abort_cnt++;
3039 				break;
3040 
3041 			case 7:
3042 				swstats->tx_parity_err_cnt++;
3043 				break;
3044 
3045 			case 10:
3046 				swstats->tx_link_loss_cnt++;
3047 				break;
3048 
3049 			case 15:
3050 				swstats->tx_list_proc_err_cnt++;
3051 				break;
3052 			}
3053 		}
3054 
3055 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3056 		if (skb == NULL) {
3057 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3058 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3059 				  __func__);
3060 			return;
3061 		}
3062 		pkt_cnt++;
3063 
3064 		/* Updating the statistics block */
3065 		swstats->mem_freed += skb->truesize;
3066 		dev_kfree_skb_irq(skb);
3067 
3068 		get_info.offset++;
3069 		if (get_info.offset == get_info.fifo_len + 1)
3070 			get_info.offset = 0;
3071 		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3072 		fifo_data->tx_curr_get_info.offset = get_info.offset;
3073 	}
3074 
3075 	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3076 
3077 	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3078 }
3079 
3080 /**
3081  *  s2io_mdio_write - Function to write in to MDIO registers
3082  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3083  *  @addr     : address value
3084  *  @value    : data value
3085  *  @dev      : pointer to net_device structure
3086  *  Description:
3087  *  This function is used to write values to the MDIO registers
3088  *  NONE
3089  */
3090 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3091 			    struct net_device *dev)
3092 {
3093 	u64 val64;
3094 	struct s2io_nic *sp = netdev_priv(dev);
3095 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3096 
3097 	/* address transaction */
3098 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3099 		MDIO_MMD_DEV_ADDR(mmd_type) |
3100 		MDIO_MMS_PRT_ADDR(0x0);
3101 	writeq(val64, &bar0->mdio_control);
3102 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3103 	writeq(val64, &bar0->mdio_control);
3104 	udelay(100);
3105 
3106 	/* Data transaction */
3107 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3108 		MDIO_MMD_DEV_ADDR(mmd_type) |
3109 		MDIO_MMS_PRT_ADDR(0x0) |
3110 		MDIO_MDIO_DATA(value) |
3111 		MDIO_OP(MDIO_OP_WRITE_TRANS);
3112 	writeq(val64, &bar0->mdio_control);
3113 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3114 	writeq(val64, &bar0->mdio_control);
3115 	udelay(100);
3116 
3117 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3118 		MDIO_MMD_DEV_ADDR(mmd_type) |
3119 		MDIO_MMS_PRT_ADDR(0x0) |
3120 		MDIO_OP(MDIO_OP_READ_TRANS);
3121 	writeq(val64, &bar0->mdio_control);
3122 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3123 	writeq(val64, &bar0->mdio_control);
3124 	udelay(100);
3125 }
3126 
3127 /**
3128  *  s2io_mdio_read - Function to write in to MDIO registers
3129  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3130  *  @addr     : address value
3131  *  @dev      : pointer to net_device structure
3132  *  Description:
3133  *  This function is used to read values to the MDIO registers
3134  *  NONE
3135  */
3136 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3137 {
3138 	u64 val64 = 0x0;
3139 	u64 rval64 = 0x0;
3140 	struct s2io_nic *sp = netdev_priv(dev);
3141 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3142 
3143 	/* address transaction */
3144 	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3145 			 | MDIO_MMD_DEV_ADDR(mmd_type)
3146 			 | MDIO_MMS_PRT_ADDR(0x0));
3147 	writeq(val64, &bar0->mdio_control);
3148 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3149 	writeq(val64, &bar0->mdio_control);
3150 	udelay(100);
3151 
3152 	/* Data transaction */
3153 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3154 		MDIO_MMD_DEV_ADDR(mmd_type) |
3155 		MDIO_MMS_PRT_ADDR(0x0) |
3156 		MDIO_OP(MDIO_OP_READ_TRANS);
3157 	writeq(val64, &bar0->mdio_control);
3158 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3159 	writeq(val64, &bar0->mdio_control);
3160 	udelay(100);
3161 
3162 	/* Read the value from regs */
3163 	rval64 = readq(&bar0->mdio_control);
3164 	rval64 = rval64 & 0xFFFF0000;
3165 	rval64 = rval64 >> 16;
3166 	return rval64;
3167 }
3168 
3169 /**
3170  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3171  *  @counter      : counter value to be updated
3172  *  @flag         : flag to indicate the status
3173  *  @type         : counter type
3174  *  Description:
3175  *  This function is to check the status of the xpak counters value
3176  *  NONE
3177  */
3178 
3179 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3180 				  u16 flag, u16 type)
3181 {
3182 	u64 mask = 0x3;
3183 	u64 val64;
3184 	int i;
3185 	for (i = 0; i < index; i++)
3186 		mask = mask << 0x2;
3187 
3188 	if (flag > 0) {
3189 		*counter = *counter + 1;
3190 		val64 = *regs_stat & mask;
3191 		val64 = val64 >> (index * 0x2);
3192 		val64 = val64 + 1;
3193 		if (val64 == 3) {
3194 			switch (type) {
3195 			case 1:
3196 				DBG_PRINT(ERR_DBG,
3197 					  "Take Xframe NIC out of service.\n");
3198 				DBG_PRINT(ERR_DBG,
3199 "Excessive temperatures may result in premature transceiver failure.\n");
3200 				break;
3201 			case 2:
3202 				DBG_PRINT(ERR_DBG,
3203 					  "Take Xframe NIC out of service.\n");
3204 				DBG_PRINT(ERR_DBG,
3205 "Excessive bias currents may indicate imminent laser diode failure.\n");
3206 				break;
3207 			case 3:
3208 				DBG_PRINT(ERR_DBG,
3209 					  "Take Xframe NIC out of service.\n");
3210 				DBG_PRINT(ERR_DBG,
3211 "Excessive laser output power may saturate far-end receiver.\n");
3212 				break;
3213 			default:
3214 				DBG_PRINT(ERR_DBG,
3215 					  "Incorrect XPAK Alarm type\n");
3216 			}
3217 			val64 = 0x0;
3218 		}
3219 		val64 = val64 << (index * 0x2);
3220 		*regs_stat = (*regs_stat & (~mask)) | (val64);
3221 
3222 	} else {
3223 		*regs_stat = *regs_stat & (~mask);
3224 	}
3225 }
3226 
3227 /**
3228  *  s2io_updt_xpak_counter - Function to update the xpak counters
3229  *  @dev         : pointer to net_device struct
3230  *  Description:
3231  *  This function is to upate the status of the xpak counters value
3232  *  NONE
3233  */
3234 static void s2io_updt_xpak_counter(struct net_device *dev)
3235 {
3236 	u16 flag  = 0x0;
3237 	u16 type  = 0x0;
3238 	u16 val16 = 0x0;
3239 	u64 val64 = 0x0;
3240 	u64 addr  = 0x0;
3241 
3242 	struct s2io_nic *sp = netdev_priv(dev);
3243 	struct stat_block *stats = sp->mac_control.stats_info;
3244 	struct xpakStat *xstats = &stats->xpak_stat;
3245 
3246 	/* Check the communication with the MDIO slave */
3247 	addr = MDIO_CTRL1;
3248 	val64 = 0x0;
3249 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3250 	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3251 		DBG_PRINT(ERR_DBG,
3252 			  "ERR: MDIO slave access failed - Returned %llx\n",
3253 			  (unsigned long long)val64);
3254 		return;
3255 	}
3256 
3257 	/* Check for the expected value of control reg 1 */
3258 	if (val64 != MDIO_CTRL1_SPEED10G) {
3259 		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3260 			  "Returned: %llx- Expected: 0x%x\n",
3261 			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3262 		return;
3263 	}
3264 
3265 	/* Loading the DOM register to MDIO register */
3266 	addr = 0xA100;
3267 	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3268 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3269 
3270 	/* Reading the Alarm flags */
3271 	addr = 0xA070;
3272 	val64 = 0x0;
3273 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3274 
3275 	flag = CHECKBIT(val64, 0x7);
3276 	type = 1;
3277 	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3278 			      &xstats->xpak_regs_stat,
3279 			      0x0, flag, type);
3280 
3281 	if (CHECKBIT(val64, 0x6))
3282 		xstats->alarm_transceiver_temp_low++;
3283 
3284 	flag = CHECKBIT(val64, 0x3);
3285 	type = 2;
3286 	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3287 			      &xstats->xpak_regs_stat,
3288 			      0x2, flag, type);
3289 
3290 	if (CHECKBIT(val64, 0x2))
3291 		xstats->alarm_laser_bias_current_low++;
3292 
3293 	flag = CHECKBIT(val64, 0x1);
3294 	type = 3;
3295 	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3296 			      &xstats->xpak_regs_stat,
3297 			      0x4, flag, type);
3298 
3299 	if (CHECKBIT(val64, 0x0))
3300 		xstats->alarm_laser_output_power_low++;
3301 
3302 	/* Reading the Warning flags */
3303 	addr = 0xA074;
3304 	val64 = 0x0;
3305 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3306 
3307 	if (CHECKBIT(val64, 0x7))
3308 		xstats->warn_transceiver_temp_high++;
3309 
3310 	if (CHECKBIT(val64, 0x6))
3311 		xstats->warn_transceiver_temp_low++;
3312 
3313 	if (CHECKBIT(val64, 0x3))
3314 		xstats->warn_laser_bias_current_high++;
3315 
3316 	if (CHECKBIT(val64, 0x2))
3317 		xstats->warn_laser_bias_current_low++;
3318 
3319 	if (CHECKBIT(val64, 0x1))
3320 		xstats->warn_laser_output_power_high++;
3321 
3322 	if (CHECKBIT(val64, 0x0))
3323 		xstats->warn_laser_output_power_low++;
3324 }
3325 
3326 /**
3327  *  wait_for_cmd_complete - waits for a command to complete.
3328  *  @sp : private member of the device structure, which is a pointer to the
3329  *  s2io_nic structure.
3330  *  Description: Function that waits for a command to Write into RMAC
3331  *  ADDR DATA registers to be completed and returns either success or
3332  *  error depending on whether the command was complete or not.
3333  *  Return value:
3334  *   SUCCESS on success and FAILURE on failure.
3335  */
3336 
3337 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3338 				 int bit_state)
3339 {
3340 	int ret = FAILURE, cnt = 0, delay = 1;
3341 	u64 val64;
3342 
3343 	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3344 		return FAILURE;
3345 
3346 	do {
3347 		val64 = readq(addr);
3348 		if (bit_state == S2IO_BIT_RESET) {
3349 			if (!(val64 & busy_bit)) {
3350 				ret = SUCCESS;
3351 				break;
3352 			}
3353 		} else {
3354 			if (val64 & busy_bit) {
3355 				ret = SUCCESS;
3356 				break;
3357 			}
3358 		}
3359 
3360 		if (in_interrupt())
3361 			mdelay(delay);
3362 		else
3363 			msleep(delay);
3364 
3365 		if (++cnt >= 10)
3366 			delay = 50;
3367 	} while (cnt < 20);
3368 	return ret;
3369 }
3370 /**
3371  * check_pci_device_id - Checks if the device id is supported
3372  * @id : device id
3373  * Description: Function to check if the pci device id is supported by driver.
3374  * Return value: Actual device id if supported else PCI_ANY_ID
3375  */
3376 static u16 check_pci_device_id(u16 id)
3377 {
3378 	switch (id) {
3379 	case PCI_DEVICE_ID_HERC_WIN:
3380 	case PCI_DEVICE_ID_HERC_UNI:
3381 		return XFRAME_II_DEVICE;
3382 	case PCI_DEVICE_ID_S2IO_UNI:
3383 	case PCI_DEVICE_ID_S2IO_WIN:
3384 		return XFRAME_I_DEVICE;
3385 	default:
3386 		return PCI_ANY_ID;
3387 	}
3388 }
3389 
3390 /**
3391  *  s2io_reset - Resets the card.
3392  *  @sp : private member of the device structure.
3393  *  Description: Function to Reset the card. This function then also
3394  *  restores the previously saved PCI configuration space registers as
3395  *  the card reset also resets the configuration space.
3396  *  Return value:
3397  *  void.
3398  */
3399 
3400 static void s2io_reset(struct s2io_nic *sp)
3401 {
3402 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3403 	u64 val64;
3404 	u16 subid, pci_cmd;
3405 	int i;
3406 	u16 val16;
3407 	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3408 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3409 	struct stat_block *stats;
3410 	struct swStat *swstats;
3411 
3412 	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3413 		  __func__, pci_name(sp->pdev));
3414 
3415 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3416 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3417 
3418 	val64 = SW_RESET_ALL;
3419 	writeq(val64, &bar0->sw_reset);
3420 	if (strstr(sp->product_name, "CX4"))
3421 		msleep(750);
3422 	msleep(250);
3423 	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3424 
3425 		/* Restore the PCI state saved during initialization. */
3426 		pci_restore_state(sp->pdev);
3427 		pci_save_state(sp->pdev);
3428 		pci_read_config_word(sp->pdev, 0x2, &val16);
3429 		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3430 			break;
3431 		msleep(200);
3432 	}
3433 
3434 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3435 		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3436 
3437 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3438 
3439 	s2io_init_pci(sp);
3440 
3441 	/* Set swapper to enable I/O register access */
3442 	s2io_set_swapper(sp);
3443 
3444 	/* restore mac_addr entries */
3445 	do_s2io_restore_unicast_mc(sp);
3446 
3447 	/* Restore the MSIX table entries from local variables */
3448 	restore_xmsi_data(sp);
3449 
3450 	/* Clear certain PCI/PCI-X fields after reset */
3451 	if (sp->device_type == XFRAME_II_DEVICE) {
3452 		/* Clear "detected parity error" bit */
3453 		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3454 
3455 		/* Clearing PCIX Ecc status register */
3456 		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3457 
3458 		/* Clearing PCI_STATUS error reflected here */
3459 		writeq(s2BIT(62), &bar0->txpic_int_reg);
3460 	}
3461 
3462 	/* Reset device statistics maintained by OS */
3463 	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3464 
3465 	stats = sp->mac_control.stats_info;
3466 	swstats = &stats->sw_stat;
3467 
3468 	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3469 	up_cnt = swstats->link_up_cnt;
3470 	down_cnt = swstats->link_down_cnt;
3471 	up_time = swstats->link_up_time;
3472 	down_time = swstats->link_down_time;
3473 	reset_cnt = swstats->soft_reset_cnt;
3474 	mem_alloc_cnt = swstats->mem_allocated;
3475 	mem_free_cnt = swstats->mem_freed;
3476 	watchdog_cnt = swstats->watchdog_timer_cnt;
3477 
3478 	memset(stats, 0, sizeof(struct stat_block));
3479 
3480 	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3481 	swstats->link_up_cnt = up_cnt;
3482 	swstats->link_down_cnt = down_cnt;
3483 	swstats->link_up_time = up_time;
3484 	swstats->link_down_time = down_time;
3485 	swstats->soft_reset_cnt = reset_cnt;
3486 	swstats->mem_allocated = mem_alloc_cnt;
3487 	swstats->mem_freed = mem_free_cnt;
3488 	swstats->watchdog_timer_cnt = watchdog_cnt;
3489 
3490 	/* SXE-002: Configure link and activity LED to turn it off */
3491 	subid = sp->pdev->subsystem_device;
3492 	if (((subid & 0xFF) >= 0x07) &&
3493 	    (sp->device_type == XFRAME_I_DEVICE)) {
3494 		val64 = readq(&bar0->gpio_control);
3495 		val64 |= 0x0000800000000000ULL;
3496 		writeq(val64, &bar0->gpio_control);
3497 		val64 = 0x0411040400000000ULL;
3498 		writeq(val64, (void __iomem *)bar0 + 0x2700);
3499 	}
3500 
3501 	/*
3502 	 * Clear spurious ECC interrupts that would have occurred on
3503 	 * XFRAME II cards after reset.
3504 	 */
3505 	if (sp->device_type == XFRAME_II_DEVICE) {
3506 		val64 = readq(&bar0->pcc_err_reg);
3507 		writeq(val64, &bar0->pcc_err_reg);
3508 	}
3509 
3510 	sp->device_enabled_once = false;
3511 }
3512 
3513 /**
3514  *  s2io_set_swapper - to set the swapper controle on the card
3515  *  @sp : private member of the device structure,
3516  *  pointer to the s2io_nic structure.
3517  *  Description: Function to set the swapper control on the card
3518  *  correctly depending on the 'endianness' of the system.
3519  *  Return value:
3520  *  SUCCESS on success and FAILURE on failure.
3521  */
3522 
3523 static int s2io_set_swapper(struct s2io_nic *sp)
3524 {
3525 	struct net_device *dev = sp->dev;
3526 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3527 	u64 val64, valt, valr;
3528 
3529 	/*
3530 	 * Set proper endian settings and verify the same by reading
3531 	 * the PIF Feed-back register.
3532 	 */
3533 
3534 	val64 = readq(&bar0->pif_rd_swapper_fb);
3535 	if (val64 != 0x0123456789ABCDEFULL) {
3536 		int i = 0;
3537 		static const u64 value[] = {
3538 			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3539 			0x8100008181000081ULL,	/* FE=1, SE=0 */
3540 			0x4200004242000042ULL,	/* FE=0, SE=1 */
3541 			0			/* FE=0, SE=0 */
3542 		};
3543 
3544 		while (i < 4) {
3545 			writeq(value[i], &bar0->swapper_ctrl);
3546 			val64 = readq(&bar0->pif_rd_swapper_fb);
3547 			if (val64 == 0x0123456789ABCDEFULL)
3548 				break;
3549 			i++;
3550 		}
3551 		if (i == 4) {
3552 			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3553 				  "feedback read %llx\n",
3554 				  dev->name, (unsigned long long)val64);
3555 			return FAILURE;
3556 		}
3557 		valr = value[i];
3558 	} else {
3559 		valr = readq(&bar0->swapper_ctrl);
3560 	}
3561 
3562 	valt = 0x0123456789ABCDEFULL;
3563 	writeq(valt, &bar0->xmsi_address);
3564 	val64 = readq(&bar0->xmsi_address);
3565 
3566 	if (val64 != valt) {
3567 		int i = 0;
3568 		static const u64 value[] = {
3569 			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3570 			0x0081810000818100ULL,	/* FE=1, SE=0 */
3571 			0x0042420000424200ULL,	/* FE=0, SE=1 */
3572 			0			/* FE=0, SE=0 */
3573 		};
3574 
3575 		while (i < 4) {
3576 			writeq((value[i] | valr), &bar0->swapper_ctrl);
3577 			writeq(valt, &bar0->xmsi_address);
3578 			val64 = readq(&bar0->xmsi_address);
3579 			if (val64 == valt)
3580 				break;
3581 			i++;
3582 		}
3583 		if (i == 4) {
3584 			unsigned long long x = val64;
3585 			DBG_PRINT(ERR_DBG,
3586 				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3587 			return FAILURE;
3588 		}
3589 	}
3590 	val64 = readq(&bar0->swapper_ctrl);
3591 	val64 &= 0xFFFF000000000000ULL;
3592 
3593 #ifdef __BIG_ENDIAN
3594 	/*
3595 	 * The device by default set to a big endian format, so a
3596 	 * big endian driver need not set anything.
3597 	 */
3598 	val64 |= (SWAPPER_CTRL_TXP_FE |
3599 		  SWAPPER_CTRL_TXP_SE |
3600 		  SWAPPER_CTRL_TXD_R_FE |
3601 		  SWAPPER_CTRL_TXD_W_FE |
3602 		  SWAPPER_CTRL_TXF_R_FE |
3603 		  SWAPPER_CTRL_RXD_R_FE |
3604 		  SWAPPER_CTRL_RXD_W_FE |
3605 		  SWAPPER_CTRL_RXF_W_FE |
3606 		  SWAPPER_CTRL_XMSI_FE |
3607 		  SWAPPER_CTRL_STATS_FE |
3608 		  SWAPPER_CTRL_STATS_SE);
3609 	if (sp->config.intr_type == INTA)
3610 		val64 |= SWAPPER_CTRL_XMSI_SE;
3611 	writeq(val64, &bar0->swapper_ctrl);
3612 #else
3613 	/*
3614 	 * Initially we enable all bits to make it accessible by the
3615 	 * driver, then we selectively enable only those bits that
3616 	 * we want to set.
3617 	 */
3618 	val64 |= (SWAPPER_CTRL_TXP_FE |
3619 		  SWAPPER_CTRL_TXP_SE |
3620 		  SWAPPER_CTRL_TXD_R_FE |
3621 		  SWAPPER_CTRL_TXD_R_SE |
3622 		  SWAPPER_CTRL_TXD_W_FE |
3623 		  SWAPPER_CTRL_TXD_W_SE |
3624 		  SWAPPER_CTRL_TXF_R_FE |
3625 		  SWAPPER_CTRL_RXD_R_FE |
3626 		  SWAPPER_CTRL_RXD_R_SE |
3627 		  SWAPPER_CTRL_RXD_W_FE |
3628 		  SWAPPER_CTRL_RXD_W_SE |
3629 		  SWAPPER_CTRL_RXF_W_FE |
3630 		  SWAPPER_CTRL_XMSI_FE |
3631 		  SWAPPER_CTRL_STATS_FE |
3632 		  SWAPPER_CTRL_STATS_SE);
3633 	if (sp->config.intr_type == INTA)
3634 		val64 |= SWAPPER_CTRL_XMSI_SE;
3635 	writeq(val64, &bar0->swapper_ctrl);
3636 #endif
3637 	val64 = readq(&bar0->swapper_ctrl);
3638 
3639 	/*
3640 	 * Verifying if endian settings are accurate by reading a
3641 	 * feedback register.
3642 	 */
3643 	val64 = readq(&bar0->pif_rd_swapper_fb);
3644 	if (val64 != 0x0123456789ABCDEFULL) {
3645 		/* Endian settings are incorrect, calls for another dekko. */
3646 		DBG_PRINT(ERR_DBG,
3647 			  "%s: Endian settings are wrong, feedback read %llx\n",
3648 			  dev->name, (unsigned long long)val64);
3649 		return FAILURE;
3650 	}
3651 
3652 	return SUCCESS;
3653 }
3654 
3655 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3656 {
3657 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3658 	u64 val64;
3659 	int ret = 0, cnt = 0;
3660 
3661 	do {
3662 		val64 = readq(&bar0->xmsi_access);
3663 		if (!(val64 & s2BIT(15)))
3664 			break;
3665 		mdelay(1);
3666 		cnt++;
3667 	} while (cnt < 5);
3668 	if (cnt == 5) {
3669 		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3670 		ret = 1;
3671 	}
3672 
3673 	return ret;
3674 }
3675 
3676 static void restore_xmsi_data(struct s2io_nic *nic)
3677 {
3678 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3679 	u64 val64;
3680 	int i, msix_index;
3681 
3682 	if (nic->device_type == XFRAME_I_DEVICE)
3683 		return;
3684 
3685 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3686 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3687 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3688 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3689 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3690 		writeq(val64, &bar0->xmsi_access);
3691 		if (wait_for_msix_trans(nic, msix_index)) {
3692 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3693 				  __func__, msix_index);
3694 			continue;
3695 		}
3696 	}
3697 }
3698 
3699 static void store_xmsi_data(struct s2io_nic *nic)
3700 {
3701 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3702 	u64 val64, addr, data;
3703 	int i, msix_index;
3704 
3705 	if (nic->device_type == XFRAME_I_DEVICE)
3706 		return;
3707 
3708 	/* Store and display */
3709 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3710 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3711 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3712 		writeq(val64, &bar0->xmsi_access);
3713 		if (wait_for_msix_trans(nic, msix_index)) {
3714 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3715 				  __func__, msix_index);
3716 			continue;
3717 		}
3718 		addr = readq(&bar0->xmsi_address);
3719 		data = readq(&bar0->xmsi_data);
3720 		if (addr && data) {
3721 			nic->msix_info[i].addr = addr;
3722 			nic->msix_info[i].data = data;
3723 		}
3724 	}
3725 }
3726 
3727 static int s2io_enable_msi_x(struct s2io_nic *nic)
3728 {
3729 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3730 	u64 rx_mat;
3731 	u16 msi_control; /* Temp variable */
3732 	int ret, i, j, msix_indx = 1;
3733 	int size;
3734 	struct stat_block *stats = nic->mac_control.stats_info;
3735 	struct swStat *swstats = &stats->sw_stat;
3736 
3737 	size = nic->num_entries * sizeof(struct msix_entry);
3738 	nic->entries = kzalloc(size, GFP_KERNEL);
3739 	if (!nic->entries) {
3740 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3741 			  __func__);
3742 		swstats->mem_alloc_fail_cnt++;
3743 		return -ENOMEM;
3744 	}
3745 	swstats->mem_allocated += size;
3746 
3747 	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3748 	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3749 	if (!nic->s2io_entries) {
3750 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3751 			  __func__);
3752 		swstats->mem_alloc_fail_cnt++;
3753 		kfree(nic->entries);
3754 		swstats->mem_freed
3755 			+= (nic->num_entries * sizeof(struct msix_entry));
3756 		return -ENOMEM;
3757 	}
3758 	swstats->mem_allocated += size;
3759 
3760 	nic->entries[0].entry = 0;
3761 	nic->s2io_entries[0].entry = 0;
3762 	nic->s2io_entries[0].in_use = MSIX_FLG;
3763 	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3764 	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3765 
3766 	for (i = 1; i < nic->num_entries; i++) {
3767 		nic->entries[i].entry = ((i - 1) * 8) + 1;
3768 		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3769 		nic->s2io_entries[i].arg = NULL;
3770 		nic->s2io_entries[i].in_use = 0;
3771 	}
3772 
3773 	rx_mat = readq(&bar0->rx_mat);
3774 	for (j = 0; j < nic->config.rx_ring_num; j++) {
3775 		rx_mat |= RX_MAT_SET(j, msix_indx);
3776 		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3777 		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3778 		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3779 		msix_indx += 8;
3780 	}
3781 	writeq(rx_mat, &bar0->rx_mat);
3782 	readq(&bar0->rx_mat);
3783 
3784 	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3785 				    nic->num_entries, nic->num_entries);
3786 	/* We fail init if error or we get less vectors than min required */
3787 	if (ret < 0) {
3788 		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3789 		kfree(nic->entries);
3790 		swstats->mem_freed += nic->num_entries *
3791 			sizeof(struct msix_entry);
3792 		kfree(nic->s2io_entries);
3793 		swstats->mem_freed += nic->num_entries *
3794 			sizeof(struct s2io_msix_entry);
3795 		nic->entries = NULL;
3796 		nic->s2io_entries = NULL;
3797 		return -ENOMEM;
3798 	}
3799 
3800 	/*
3801 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3802 	 * in the herc NIC. (Temp change, needs to be removed later)
3803 	 */
3804 	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3805 	msi_control |= 0x1; /* Enable MSI */
3806 	pci_write_config_word(nic->pdev, 0x42, msi_control);
3807 
3808 	return 0;
3809 }
3810 
3811 /* Handle software interrupt used during MSI(X) test */
3812 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3813 {
3814 	struct s2io_nic *sp = dev_id;
3815 
3816 	sp->msi_detected = 1;
3817 	wake_up(&sp->msi_wait);
3818 
3819 	return IRQ_HANDLED;
3820 }
3821 
3822 /* Test interrupt path by forcing a a software IRQ */
3823 static int s2io_test_msi(struct s2io_nic *sp)
3824 {
3825 	struct pci_dev *pdev = sp->pdev;
3826 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3827 	int err;
3828 	u64 val64, saved64;
3829 
3830 	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3831 			  sp->name, sp);
3832 	if (err) {
3833 		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3834 			  sp->dev->name, pci_name(pdev), pdev->irq);
3835 		return err;
3836 	}
3837 
3838 	init_waitqueue_head(&sp->msi_wait);
3839 	sp->msi_detected = 0;
3840 
3841 	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3842 	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3843 	val64 |= SCHED_INT_CTRL_TIMER_EN;
3844 	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3845 	writeq(val64, &bar0->scheduled_int_ctrl);
3846 
3847 	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3848 
3849 	if (!sp->msi_detected) {
3850 		/* MSI(X) test failed, go back to INTx mode */
3851 		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3852 			  "using MSI(X) during test\n",
3853 			  sp->dev->name, pci_name(pdev));
3854 
3855 		err = -EOPNOTSUPP;
3856 	}
3857 
3858 	free_irq(sp->entries[1].vector, sp);
3859 
3860 	writeq(saved64, &bar0->scheduled_int_ctrl);
3861 
3862 	return err;
3863 }
3864 
3865 static void remove_msix_isr(struct s2io_nic *sp)
3866 {
3867 	int i;
3868 	u16 msi_control;
3869 
3870 	for (i = 0; i < sp->num_entries; i++) {
3871 		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3872 			int vector = sp->entries[i].vector;
3873 			void *arg = sp->s2io_entries[i].arg;
3874 			free_irq(vector, arg);
3875 		}
3876 	}
3877 
3878 	kfree(sp->entries);
3879 	kfree(sp->s2io_entries);
3880 	sp->entries = NULL;
3881 	sp->s2io_entries = NULL;
3882 
3883 	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3884 	msi_control &= 0xFFFE; /* Disable MSI */
3885 	pci_write_config_word(sp->pdev, 0x42, msi_control);
3886 
3887 	pci_disable_msix(sp->pdev);
3888 }
3889 
3890 static void remove_inta_isr(struct s2io_nic *sp)
3891 {
3892 	free_irq(sp->pdev->irq, sp->dev);
3893 }
3894 
3895 /* ********************************************************* *
3896  * Functions defined below concern the OS part of the driver *
3897  * ********************************************************* */
3898 
3899 /**
3900  *  s2io_open - open entry point of the driver
3901  *  @dev : pointer to the device structure.
3902  *  Description:
3903  *  This function is the open entry point of the driver. It mainly calls a
3904  *  function to allocate Rx buffers and inserts them into the buffer
3905  *  descriptors and then enables the Rx part of the NIC.
3906  *  Return value:
3907  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3908  *   file on failure.
3909  */
3910 
3911 static int s2io_open(struct net_device *dev)
3912 {
3913 	struct s2io_nic *sp = netdev_priv(dev);
3914 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3915 	int err = 0;
3916 
3917 	/*
3918 	 * Make sure you have link off by default every time
3919 	 * Nic is initialized
3920 	 */
3921 	netif_carrier_off(dev);
3922 	sp->last_link_state = 0;
3923 
3924 	/* Initialize H/W and enable interrupts */
3925 	err = s2io_card_up(sp);
3926 	if (err) {
3927 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3928 			  dev->name);
3929 		goto hw_init_failed;
3930 	}
3931 
3932 	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3933 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3934 		s2io_card_down(sp);
3935 		err = -ENODEV;
3936 		goto hw_init_failed;
3937 	}
3938 	s2io_start_all_tx_queue(sp);
3939 	return 0;
3940 
3941 hw_init_failed:
3942 	if (sp->config.intr_type == MSI_X) {
3943 		if (sp->entries) {
3944 			kfree(sp->entries);
3945 			swstats->mem_freed += sp->num_entries *
3946 				sizeof(struct msix_entry);
3947 		}
3948 		if (sp->s2io_entries) {
3949 			kfree(sp->s2io_entries);
3950 			swstats->mem_freed += sp->num_entries *
3951 				sizeof(struct s2io_msix_entry);
3952 		}
3953 	}
3954 	return err;
3955 }
3956 
3957 /**
3958  *  s2io_close -close entry point of the driver
3959  *  @dev : device pointer.
3960  *  Description:
3961  *  This is the stop entry point of the driver. It needs to undo exactly
3962  *  whatever was done by the open entry point,thus it's usually referred to
3963  *  as the close function.Among other things this function mainly stops the
3964  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3965  *  Return value:
3966  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3967  *  file on failure.
3968  */
3969 
3970 static int s2io_close(struct net_device *dev)
3971 {
3972 	struct s2io_nic *sp = netdev_priv(dev);
3973 	struct config_param *config = &sp->config;
3974 	u64 tmp64;
3975 	int offset;
3976 
3977 	/* Return if the device is already closed               *
3978 	 *  Can happen when s2io_card_up failed in change_mtu    *
3979 	 */
3980 	if (!is_s2io_card_up(sp))
3981 		return 0;
3982 
3983 	s2io_stop_all_tx_queue(sp);
3984 	/* delete all populated mac entries */
3985 	for (offset = 1; offset < config->max_mc_addr; offset++) {
3986 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3987 		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3988 			do_s2io_delete_unicast_mc(sp, tmp64);
3989 	}
3990 
3991 	s2io_card_down(sp);
3992 
3993 	return 0;
3994 }
3995 
3996 /**
3997  *  s2io_xmit - Tx entry point of te driver
3998  *  @skb : the socket buffer containing the Tx data.
3999  *  @dev : device pointer.
4000  *  Description :
4001  *  This function is the Tx entry point of the driver. S2IO NIC supports
4002  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4003  *  NOTE: when device can't queue the pkt,just the trans_start variable will
4004  *  not be upadted.
4005  *  Return value:
4006  *  0 on success & 1 on failure.
4007  */
4008 
4009 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4010 {
4011 	struct s2io_nic *sp = netdev_priv(dev);
4012 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4013 	register u64 val64;
4014 	struct TxD *txdp;
4015 	struct TxFIFO_element __iomem *tx_fifo;
4016 	unsigned long flags = 0;
4017 	u16 vlan_tag = 0;
4018 	struct fifo_info *fifo = NULL;
4019 	int offload_type;
4020 	int enable_per_list_interrupt = 0;
4021 	struct config_param *config = &sp->config;
4022 	struct mac_info *mac_control = &sp->mac_control;
4023 	struct stat_block *stats = mac_control->stats_info;
4024 	struct swStat *swstats = &stats->sw_stat;
4025 
4026 	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4027 
4028 	if (unlikely(skb->len <= 0)) {
4029 		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4030 		dev_kfree_skb_any(skb);
4031 		return NETDEV_TX_OK;
4032 	}
4033 
4034 	if (!is_s2io_card_up(sp)) {
4035 		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4036 			  dev->name);
4037 		dev_kfree_skb_any(skb);
4038 		return NETDEV_TX_OK;
4039 	}
4040 
4041 	queue = 0;
4042 	if (skb_vlan_tag_present(skb))
4043 		vlan_tag = skb_vlan_tag_get(skb);
4044 	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4045 		if (skb->protocol == htons(ETH_P_IP)) {
4046 			struct iphdr *ip;
4047 			struct tcphdr *th;
4048 			ip = ip_hdr(skb);
4049 
4050 			if (!ip_is_fragment(ip)) {
4051 				th = (struct tcphdr *)(((unsigned char *)ip) +
4052 						       ip->ihl*4);
4053 
4054 				if (ip->protocol == IPPROTO_TCP) {
4055 					queue_len = sp->total_tcp_fifos;
4056 					queue = (ntohs(th->source) +
4057 						 ntohs(th->dest)) &
4058 						sp->fifo_selector[queue_len - 1];
4059 					if (queue >= queue_len)
4060 						queue = queue_len - 1;
4061 				} else if (ip->protocol == IPPROTO_UDP) {
4062 					queue_len = sp->total_udp_fifos;
4063 					queue = (ntohs(th->source) +
4064 						 ntohs(th->dest)) &
4065 						sp->fifo_selector[queue_len - 1];
4066 					if (queue >= queue_len)
4067 						queue = queue_len - 1;
4068 					queue += sp->udp_fifo_idx;
4069 					if (skb->len > 1024)
4070 						enable_per_list_interrupt = 1;
4071 				}
4072 			}
4073 		}
4074 	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4075 		/* get fifo number based on skb->priority value */
4076 		queue = config->fifo_mapping
4077 			[skb->priority & (MAX_TX_FIFOS - 1)];
4078 	fifo = &mac_control->fifos[queue];
4079 
4080 	spin_lock_irqsave(&fifo->tx_lock, flags);
4081 
4082 	if (sp->config.multiq) {
4083 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4084 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4085 			return NETDEV_TX_BUSY;
4086 		}
4087 	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4088 		if (netif_queue_stopped(dev)) {
4089 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4090 			return NETDEV_TX_BUSY;
4091 		}
4092 	}
4093 
4094 	put_off = (u16)fifo->tx_curr_put_info.offset;
4095 	get_off = (u16)fifo->tx_curr_get_info.offset;
4096 	txdp = fifo->list_info[put_off].list_virt_addr;
4097 
4098 	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4099 	/* Avoid "put" pointer going beyond "get" pointer */
4100 	if (txdp->Host_Control ||
4101 	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4102 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4103 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4104 		dev_kfree_skb_any(skb);
4105 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4106 		return NETDEV_TX_OK;
4107 	}
4108 
4109 	offload_type = s2io_offload_type(skb);
4110 	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4111 		txdp->Control_1 |= TXD_TCP_LSO_EN;
4112 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4113 	}
4114 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4115 		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4116 				    TXD_TX_CKO_TCP_EN |
4117 				    TXD_TX_CKO_UDP_EN);
4118 	}
4119 	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4120 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4121 	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4122 	if (enable_per_list_interrupt)
4123 		if (put_off & (queue_len >> 5))
4124 			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4125 	if (vlan_tag) {
4126 		txdp->Control_2 |= TXD_VLAN_ENABLE;
4127 		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4128 	}
4129 
4130 	frg_len = skb_headlen(skb);
4131 	if (offload_type == SKB_GSO_UDP) {
4132 		int ufo_size;
4133 
4134 		ufo_size = s2io_udp_mss(skb);
4135 		ufo_size &= ~7;
4136 		txdp->Control_1 |= TXD_UFO_EN;
4137 		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4138 		txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4139 #ifdef __BIG_ENDIAN
4140 		/* both variants do cpu_to_be64(be32_to_cpu(...)) */
4141 		fifo->ufo_in_band_v[put_off] =
4142 			(__force u64)skb_shinfo(skb)->ip6_frag_id;
4143 #else
4144 		fifo->ufo_in_band_v[put_off] =
4145 			(__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4146 #endif
4147 		txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4148 		txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4149 						      fifo->ufo_in_band_v,
4150 						      sizeof(u64),
4151 						      PCI_DMA_TODEVICE);
4152 		if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4153 			goto pci_map_failed;
4154 		txdp++;
4155 	}
4156 
4157 	txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4158 					      frg_len, PCI_DMA_TODEVICE);
4159 	if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4160 		goto pci_map_failed;
4161 
4162 	txdp->Host_Control = (unsigned long)skb;
4163 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4164 	if (offload_type == SKB_GSO_UDP)
4165 		txdp->Control_1 |= TXD_UFO_EN;
4166 
4167 	frg_cnt = skb_shinfo(skb)->nr_frags;
4168 	/* For fragmented SKB. */
4169 	for (i = 0; i < frg_cnt; i++) {
4170 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4171 		/* A '0' length fragment will be ignored */
4172 		if (!skb_frag_size(frag))
4173 			continue;
4174 		txdp++;
4175 		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4176 							     frag, 0,
4177 							     skb_frag_size(frag),
4178 							     DMA_TO_DEVICE);
4179 		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4180 		if (offload_type == SKB_GSO_UDP)
4181 			txdp->Control_1 |= TXD_UFO_EN;
4182 	}
4183 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4184 
4185 	if (offload_type == SKB_GSO_UDP)
4186 		frg_cnt++; /* as Txd0 was used for inband header */
4187 
4188 	tx_fifo = mac_control->tx_FIFO_start[queue];
4189 	val64 = fifo->list_info[put_off].list_phy_addr;
4190 	writeq(val64, &tx_fifo->TxDL_Pointer);
4191 
4192 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4193 		 TX_FIFO_LAST_LIST);
4194 	if (offload_type)
4195 		val64 |= TX_FIFO_SPECIAL_FUNC;
4196 
4197 	writeq(val64, &tx_fifo->List_Control);
4198 
4199 	mmiowb();
4200 
4201 	put_off++;
4202 	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4203 		put_off = 0;
4204 	fifo->tx_curr_put_info.offset = put_off;
4205 
4206 	/* Avoid "put" pointer going beyond "get" pointer */
4207 	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4208 		swstats->fifo_full_cnt++;
4209 		DBG_PRINT(TX_DBG,
4210 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4211 			  put_off, get_off);
4212 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4213 	}
4214 	swstats->mem_allocated += skb->truesize;
4215 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4216 
4217 	if (sp->config.intr_type == MSI_X)
4218 		tx_intr_handler(fifo);
4219 
4220 	return NETDEV_TX_OK;
4221 
4222 pci_map_failed:
4223 	swstats->pci_map_fail_cnt++;
4224 	s2io_stop_tx_queue(sp, fifo->fifo_no);
4225 	swstats->mem_freed += skb->truesize;
4226 	dev_kfree_skb_any(skb);
4227 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4228 	return NETDEV_TX_OK;
4229 }
4230 
4231 static void
4232 s2io_alarm_handle(unsigned long data)
4233 {
4234 	struct s2io_nic *sp = (struct s2io_nic *)data;
4235 	struct net_device *dev = sp->dev;
4236 
4237 	s2io_handle_errors(dev);
4238 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4239 }
4240 
4241 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4242 {
4243 	struct ring_info *ring = (struct ring_info *)dev_id;
4244 	struct s2io_nic *sp = ring->nic;
4245 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4246 
4247 	if (unlikely(!is_s2io_card_up(sp)))
4248 		return IRQ_HANDLED;
4249 
4250 	if (sp->config.napi) {
4251 		u8 __iomem *addr = NULL;
4252 		u8 val8 = 0;
4253 
4254 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4255 		addr += (7 - ring->ring_no);
4256 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4257 		writeb(val8, addr);
4258 		val8 = readb(addr);
4259 		napi_schedule(&ring->napi);
4260 	} else {
4261 		rx_intr_handler(ring, 0);
4262 		s2io_chk_rx_buffers(sp, ring);
4263 	}
4264 
4265 	return IRQ_HANDLED;
4266 }
4267 
4268 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4269 {
4270 	int i;
4271 	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4272 	struct s2io_nic *sp = fifos->nic;
4273 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4274 	struct config_param *config  = &sp->config;
4275 	u64 reason;
4276 
4277 	if (unlikely(!is_s2io_card_up(sp)))
4278 		return IRQ_NONE;
4279 
4280 	reason = readq(&bar0->general_int_status);
4281 	if (unlikely(reason == S2IO_MINUS_ONE))
4282 		/* Nothing much can be done. Get out */
4283 		return IRQ_HANDLED;
4284 
4285 	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4286 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4287 
4288 		if (reason & GEN_INTR_TXPIC)
4289 			s2io_txpic_intr_handle(sp);
4290 
4291 		if (reason & GEN_INTR_TXTRAFFIC)
4292 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4293 
4294 		for (i = 0; i < config->tx_fifo_num; i++)
4295 			tx_intr_handler(&fifos[i]);
4296 
4297 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4298 		readl(&bar0->general_int_status);
4299 		return IRQ_HANDLED;
4300 	}
4301 	/* The interrupt was not raised by us */
4302 	return IRQ_NONE;
4303 }
4304 
4305 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4306 {
4307 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4308 	u64 val64;
4309 
4310 	val64 = readq(&bar0->pic_int_status);
4311 	if (val64 & PIC_INT_GPIO) {
4312 		val64 = readq(&bar0->gpio_int_reg);
4313 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4314 		    (val64 & GPIO_INT_REG_LINK_UP)) {
4315 			/*
4316 			 * This is unstable state so clear both up/down
4317 			 * interrupt and adapter to re-evaluate the link state.
4318 			 */
4319 			val64 |= GPIO_INT_REG_LINK_DOWN;
4320 			val64 |= GPIO_INT_REG_LINK_UP;
4321 			writeq(val64, &bar0->gpio_int_reg);
4322 			val64 = readq(&bar0->gpio_int_mask);
4323 			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4324 				   GPIO_INT_MASK_LINK_DOWN);
4325 			writeq(val64, &bar0->gpio_int_mask);
4326 		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4327 			val64 = readq(&bar0->adapter_status);
4328 			/* Enable Adapter */
4329 			val64 = readq(&bar0->adapter_control);
4330 			val64 |= ADAPTER_CNTL_EN;
4331 			writeq(val64, &bar0->adapter_control);
4332 			val64 |= ADAPTER_LED_ON;
4333 			writeq(val64, &bar0->adapter_control);
4334 			if (!sp->device_enabled_once)
4335 				sp->device_enabled_once = 1;
4336 
4337 			s2io_link(sp, LINK_UP);
4338 			/*
4339 			 * unmask link down interrupt and mask link-up
4340 			 * intr
4341 			 */
4342 			val64 = readq(&bar0->gpio_int_mask);
4343 			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4344 			val64 |= GPIO_INT_MASK_LINK_UP;
4345 			writeq(val64, &bar0->gpio_int_mask);
4346 
4347 		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4348 			val64 = readq(&bar0->adapter_status);
4349 			s2io_link(sp, LINK_DOWN);
4350 			/* Link is down so unmaks link up interrupt */
4351 			val64 = readq(&bar0->gpio_int_mask);
4352 			val64 &= ~GPIO_INT_MASK_LINK_UP;
4353 			val64 |= GPIO_INT_MASK_LINK_DOWN;
4354 			writeq(val64, &bar0->gpio_int_mask);
4355 
4356 			/* turn off LED */
4357 			val64 = readq(&bar0->adapter_control);
4358 			val64 = val64 & (~ADAPTER_LED_ON);
4359 			writeq(val64, &bar0->adapter_control);
4360 		}
4361 	}
4362 	val64 = readq(&bar0->gpio_int_mask);
4363 }
4364 
4365 /**
4366  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4367  *  @value: alarm bits
4368  *  @addr: address value
4369  *  @cnt: counter variable
4370  *  Description: Check for alarm and increment the counter
4371  *  Return Value:
4372  *  1 - if alarm bit set
4373  *  0 - if alarm bit is not set
4374  */
4375 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4376 				 unsigned long long *cnt)
4377 {
4378 	u64 val64;
4379 	val64 = readq(addr);
4380 	if (val64 & value) {
4381 		writeq(val64, addr);
4382 		(*cnt)++;
4383 		return 1;
4384 	}
4385 	return 0;
4386 
4387 }
4388 
4389 /**
4390  *  s2io_handle_errors - Xframe error indication handler
4391  *  @nic: device private variable
4392  *  Description: Handle alarms such as loss of link, single or
4393  *  double ECC errors, critical and serious errors.
4394  *  Return Value:
4395  *  NONE
4396  */
4397 static void s2io_handle_errors(void *dev_id)
4398 {
4399 	struct net_device *dev = (struct net_device *)dev_id;
4400 	struct s2io_nic *sp = netdev_priv(dev);
4401 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4402 	u64 temp64 = 0, val64 = 0;
4403 	int i = 0;
4404 
4405 	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4406 	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4407 
4408 	if (!is_s2io_card_up(sp))
4409 		return;
4410 
4411 	if (pci_channel_offline(sp->pdev))
4412 		return;
4413 
4414 	memset(&sw_stat->ring_full_cnt, 0,
4415 	       sizeof(sw_stat->ring_full_cnt));
4416 
4417 	/* Handling the XPAK counters update */
4418 	if (stats->xpak_timer_count < 72000) {
4419 		/* waiting for an hour */
4420 		stats->xpak_timer_count++;
4421 	} else {
4422 		s2io_updt_xpak_counter(dev);
4423 		/* reset the count to zero */
4424 		stats->xpak_timer_count = 0;
4425 	}
4426 
4427 	/* Handling link status change error Intr */
4428 	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4429 		val64 = readq(&bar0->mac_rmac_err_reg);
4430 		writeq(val64, &bar0->mac_rmac_err_reg);
4431 		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4432 			schedule_work(&sp->set_link_task);
4433 	}
4434 
4435 	/* In case of a serious error, the device will be Reset. */
4436 	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4437 				  &sw_stat->serious_err_cnt))
4438 		goto reset;
4439 
4440 	/* Check for data parity error */
4441 	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4442 				  &sw_stat->parity_err_cnt))
4443 		goto reset;
4444 
4445 	/* Check for ring full counter */
4446 	if (sp->device_type == XFRAME_II_DEVICE) {
4447 		val64 = readq(&bar0->ring_bump_counter1);
4448 		for (i = 0; i < 4; i++) {
4449 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4450 			temp64 >>= 64 - ((i+1)*16);
4451 			sw_stat->ring_full_cnt[i] += temp64;
4452 		}
4453 
4454 		val64 = readq(&bar0->ring_bump_counter2);
4455 		for (i = 0; i < 4; i++) {
4456 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4457 			temp64 >>= 64 - ((i+1)*16);
4458 			sw_stat->ring_full_cnt[i+4] += temp64;
4459 		}
4460 	}
4461 
4462 	val64 = readq(&bar0->txdma_int_status);
4463 	/*check for pfc_err*/
4464 	if (val64 & TXDMA_PFC_INT) {
4465 		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4466 					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4467 					  PFC_PCIX_ERR,
4468 					  &bar0->pfc_err_reg,
4469 					  &sw_stat->pfc_err_cnt))
4470 			goto reset;
4471 		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4472 				      &bar0->pfc_err_reg,
4473 				      &sw_stat->pfc_err_cnt);
4474 	}
4475 
4476 	/*check for tda_err*/
4477 	if (val64 & TXDMA_TDA_INT) {
4478 		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4479 					  TDA_SM0_ERR_ALARM |
4480 					  TDA_SM1_ERR_ALARM,
4481 					  &bar0->tda_err_reg,
4482 					  &sw_stat->tda_err_cnt))
4483 			goto reset;
4484 		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4485 				      &bar0->tda_err_reg,
4486 				      &sw_stat->tda_err_cnt);
4487 	}
4488 	/*check for pcc_err*/
4489 	if (val64 & TXDMA_PCC_INT) {
4490 		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4491 					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4492 					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4493 					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4494 					  PCC_TXB_ECC_DB_ERR,
4495 					  &bar0->pcc_err_reg,
4496 					  &sw_stat->pcc_err_cnt))
4497 			goto reset;
4498 		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4499 				      &bar0->pcc_err_reg,
4500 				      &sw_stat->pcc_err_cnt);
4501 	}
4502 
4503 	/*check for tti_err*/
4504 	if (val64 & TXDMA_TTI_INT) {
4505 		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4506 					  &bar0->tti_err_reg,
4507 					  &sw_stat->tti_err_cnt))
4508 			goto reset;
4509 		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4510 				      &bar0->tti_err_reg,
4511 				      &sw_stat->tti_err_cnt);
4512 	}
4513 
4514 	/*check for lso_err*/
4515 	if (val64 & TXDMA_LSO_INT) {
4516 		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4517 					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4518 					  &bar0->lso_err_reg,
4519 					  &sw_stat->lso_err_cnt))
4520 			goto reset;
4521 		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4522 				      &bar0->lso_err_reg,
4523 				      &sw_stat->lso_err_cnt);
4524 	}
4525 
4526 	/*check for tpa_err*/
4527 	if (val64 & TXDMA_TPA_INT) {
4528 		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4529 					  &bar0->tpa_err_reg,
4530 					  &sw_stat->tpa_err_cnt))
4531 			goto reset;
4532 		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4533 				      &bar0->tpa_err_reg,
4534 				      &sw_stat->tpa_err_cnt);
4535 	}
4536 
4537 	/*check for sm_err*/
4538 	if (val64 & TXDMA_SM_INT) {
4539 		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4540 					  &bar0->sm_err_reg,
4541 					  &sw_stat->sm_err_cnt))
4542 			goto reset;
4543 	}
4544 
4545 	val64 = readq(&bar0->mac_int_status);
4546 	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4547 		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4548 					  &bar0->mac_tmac_err_reg,
4549 					  &sw_stat->mac_tmac_err_cnt))
4550 			goto reset;
4551 		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4552 				      TMAC_DESC_ECC_SG_ERR |
4553 				      TMAC_DESC_ECC_DB_ERR,
4554 				      &bar0->mac_tmac_err_reg,
4555 				      &sw_stat->mac_tmac_err_cnt);
4556 	}
4557 
4558 	val64 = readq(&bar0->xgxs_int_status);
4559 	if (val64 & XGXS_INT_STATUS_TXGXS) {
4560 		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4561 					  &bar0->xgxs_txgxs_err_reg,
4562 					  &sw_stat->xgxs_txgxs_err_cnt))
4563 			goto reset;
4564 		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4565 				      &bar0->xgxs_txgxs_err_reg,
4566 				      &sw_stat->xgxs_txgxs_err_cnt);
4567 	}
4568 
4569 	val64 = readq(&bar0->rxdma_int_status);
4570 	if (val64 & RXDMA_INT_RC_INT_M) {
4571 		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4572 					  RC_FTC_ECC_DB_ERR |
4573 					  RC_PRCn_SM_ERR_ALARM |
4574 					  RC_FTC_SM_ERR_ALARM,
4575 					  &bar0->rc_err_reg,
4576 					  &sw_stat->rc_err_cnt))
4577 			goto reset;
4578 		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4579 				      RC_FTC_ECC_SG_ERR |
4580 				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4581 				      &sw_stat->rc_err_cnt);
4582 		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4583 					  PRC_PCI_AB_WR_Rn |
4584 					  PRC_PCI_AB_F_WR_Rn,
4585 					  &bar0->prc_pcix_err_reg,
4586 					  &sw_stat->prc_pcix_err_cnt))
4587 			goto reset;
4588 		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4589 				      PRC_PCI_DP_WR_Rn |
4590 				      PRC_PCI_DP_F_WR_Rn,
4591 				      &bar0->prc_pcix_err_reg,
4592 				      &sw_stat->prc_pcix_err_cnt);
4593 	}
4594 
4595 	if (val64 & RXDMA_INT_RPA_INT_M) {
4596 		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4597 					  &bar0->rpa_err_reg,
4598 					  &sw_stat->rpa_err_cnt))
4599 			goto reset;
4600 		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4601 				      &bar0->rpa_err_reg,
4602 				      &sw_stat->rpa_err_cnt);
4603 	}
4604 
4605 	if (val64 & RXDMA_INT_RDA_INT_M) {
4606 		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4607 					  RDA_FRM_ECC_DB_N_AERR |
4608 					  RDA_SM1_ERR_ALARM |
4609 					  RDA_SM0_ERR_ALARM |
4610 					  RDA_RXD_ECC_DB_SERR,
4611 					  &bar0->rda_err_reg,
4612 					  &sw_stat->rda_err_cnt))
4613 			goto reset;
4614 		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4615 				      RDA_FRM_ECC_SG_ERR |
4616 				      RDA_MISC_ERR |
4617 				      RDA_PCIX_ERR,
4618 				      &bar0->rda_err_reg,
4619 				      &sw_stat->rda_err_cnt);
4620 	}
4621 
4622 	if (val64 & RXDMA_INT_RTI_INT_M) {
4623 		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4624 					  &bar0->rti_err_reg,
4625 					  &sw_stat->rti_err_cnt))
4626 			goto reset;
4627 		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4628 				      &bar0->rti_err_reg,
4629 				      &sw_stat->rti_err_cnt);
4630 	}
4631 
4632 	val64 = readq(&bar0->mac_int_status);
4633 	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4634 		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4635 					  &bar0->mac_rmac_err_reg,
4636 					  &sw_stat->mac_rmac_err_cnt))
4637 			goto reset;
4638 		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4639 				      RMAC_SINGLE_ECC_ERR |
4640 				      RMAC_DOUBLE_ECC_ERR,
4641 				      &bar0->mac_rmac_err_reg,
4642 				      &sw_stat->mac_rmac_err_cnt);
4643 	}
4644 
4645 	val64 = readq(&bar0->xgxs_int_status);
4646 	if (val64 & XGXS_INT_STATUS_RXGXS) {
4647 		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4648 					  &bar0->xgxs_rxgxs_err_reg,
4649 					  &sw_stat->xgxs_rxgxs_err_cnt))
4650 			goto reset;
4651 	}
4652 
4653 	val64 = readq(&bar0->mc_int_status);
4654 	if (val64 & MC_INT_STATUS_MC_INT) {
4655 		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4656 					  &bar0->mc_err_reg,
4657 					  &sw_stat->mc_err_cnt))
4658 			goto reset;
4659 
4660 		/* Handling Ecc errors */
4661 		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4662 			writeq(val64, &bar0->mc_err_reg);
4663 			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4664 				sw_stat->double_ecc_errs++;
4665 				if (sp->device_type != XFRAME_II_DEVICE) {
4666 					/*
4667 					 * Reset XframeI only if critical error
4668 					 */
4669 					if (val64 &
4670 					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4671 					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4672 						goto reset;
4673 				}
4674 			} else
4675 				sw_stat->single_ecc_errs++;
4676 		}
4677 	}
4678 	return;
4679 
4680 reset:
4681 	s2io_stop_all_tx_queue(sp);
4682 	schedule_work(&sp->rst_timer_task);
4683 	sw_stat->soft_reset_cnt++;
4684 }
4685 
4686 /**
4687  *  s2io_isr - ISR handler of the device .
4688  *  @irq: the irq of the device.
4689  *  @dev_id: a void pointer to the dev structure of the NIC.
4690  *  Description:  This function is the ISR handler of the device. It
4691  *  identifies the reason for the interrupt and calls the relevant
4692  *  service routines. As a contongency measure, this ISR allocates the
4693  *  recv buffers, if their numbers are below the panic value which is
4694  *  presently set to 25% of the original number of rcv buffers allocated.
4695  *  Return value:
4696  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4697  *   IRQ_NONE: will be returned if interrupt is not from our device
4698  */
4699 static irqreturn_t s2io_isr(int irq, void *dev_id)
4700 {
4701 	struct net_device *dev = (struct net_device *)dev_id;
4702 	struct s2io_nic *sp = netdev_priv(dev);
4703 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4704 	int i;
4705 	u64 reason = 0;
4706 	struct mac_info *mac_control;
4707 	struct config_param *config;
4708 
4709 	/* Pretend we handled any irq's from a disconnected card */
4710 	if (pci_channel_offline(sp->pdev))
4711 		return IRQ_NONE;
4712 
4713 	if (!is_s2io_card_up(sp))
4714 		return IRQ_NONE;
4715 
4716 	config = &sp->config;
4717 	mac_control = &sp->mac_control;
4718 
4719 	/*
4720 	 * Identify the cause for interrupt and call the appropriate
4721 	 * interrupt handler. Causes for the interrupt could be;
4722 	 * 1. Rx of packet.
4723 	 * 2. Tx complete.
4724 	 * 3. Link down.
4725 	 */
4726 	reason = readq(&bar0->general_int_status);
4727 
4728 	if (unlikely(reason == S2IO_MINUS_ONE))
4729 		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4730 
4731 	if (reason &
4732 	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4733 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4734 
4735 		if (config->napi) {
4736 			if (reason & GEN_INTR_RXTRAFFIC) {
4737 				napi_schedule(&sp->napi);
4738 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4739 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4740 				readl(&bar0->rx_traffic_int);
4741 			}
4742 		} else {
4743 			/*
4744 			 * rx_traffic_int reg is an R1 register, writing all 1's
4745 			 * will ensure that the actual interrupt causing bit
4746 			 * get's cleared and hence a read can be avoided.
4747 			 */
4748 			if (reason & GEN_INTR_RXTRAFFIC)
4749 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4750 
4751 			for (i = 0; i < config->rx_ring_num; i++) {
4752 				struct ring_info *ring = &mac_control->rings[i];
4753 
4754 				rx_intr_handler(ring, 0);
4755 			}
4756 		}
4757 
4758 		/*
4759 		 * tx_traffic_int reg is an R1 register, writing all 1's
4760 		 * will ensure that the actual interrupt causing bit get's
4761 		 * cleared and hence a read can be avoided.
4762 		 */
4763 		if (reason & GEN_INTR_TXTRAFFIC)
4764 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4765 
4766 		for (i = 0; i < config->tx_fifo_num; i++)
4767 			tx_intr_handler(&mac_control->fifos[i]);
4768 
4769 		if (reason & GEN_INTR_TXPIC)
4770 			s2io_txpic_intr_handle(sp);
4771 
4772 		/*
4773 		 * Reallocate the buffers from the interrupt handler itself.
4774 		 */
4775 		if (!config->napi) {
4776 			for (i = 0; i < config->rx_ring_num; i++) {
4777 				struct ring_info *ring = &mac_control->rings[i];
4778 
4779 				s2io_chk_rx_buffers(sp, ring);
4780 			}
4781 		}
4782 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4783 		readl(&bar0->general_int_status);
4784 
4785 		return IRQ_HANDLED;
4786 
4787 	} else if (!reason) {
4788 		/* The interrupt was not raised by us */
4789 		return IRQ_NONE;
4790 	}
4791 
4792 	return IRQ_HANDLED;
4793 }
4794 
4795 /**
4796  * s2io_updt_stats -
4797  */
4798 static void s2io_updt_stats(struct s2io_nic *sp)
4799 {
4800 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4801 	u64 val64;
4802 	int cnt = 0;
4803 
4804 	if (is_s2io_card_up(sp)) {
4805 		/* Apprx 30us on a 133 MHz bus */
4806 		val64 = SET_UPDT_CLICKS(10) |
4807 			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4808 		writeq(val64, &bar0->stat_cfg);
4809 		do {
4810 			udelay(100);
4811 			val64 = readq(&bar0->stat_cfg);
4812 			if (!(val64 & s2BIT(0)))
4813 				break;
4814 			cnt++;
4815 			if (cnt == 5)
4816 				break; /* Updt failed */
4817 		} while (1);
4818 	}
4819 }
4820 
4821 /**
4822  *  s2io_get_stats - Updates the device statistics structure.
4823  *  @dev : pointer to the device structure.
4824  *  Description:
4825  *  This function updates the device statistics structure in the s2io_nic
4826  *  structure and returns a pointer to the same.
4827  *  Return value:
4828  *  pointer to the updated net_device_stats structure.
4829  */
4830 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4831 {
4832 	struct s2io_nic *sp = netdev_priv(dev);
4833 	struct mac_info *mac_control = &sp->mac_control;
4834 	struct stat_block *stats = mac_control->stats_info;
4835 	u64 delta;
4836 
4837 	/* Configure Stats for immediate updt */
4838 	s2io_updt_stats(sp);
4839 
4840 	/* A device reset will cause the on-adapter statistics to be zero'ed.
4841 	 * This can be done while running by changing the MTU.  To prevent the
4842 	 * system from having the stats zero'ed, the driver keeps a copy of the
4843 	 * last update to the system (which is also zero'ed on reset).  This
4844 	 * enables the driver to accurately know the delta between the last
4845 	 * update and the current update.
4846 	 */
4847 	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4848 		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4849 	sp->stats.rx_packets += delta;
4850 	dev->stats.rx_packets += delta;
4851 
4852 	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4853 		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4854 	sp->stats.tx_packets += delta;
4855 	dev->stats.tx_packets += delta;
4856 
4857 	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4858 		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4859 	sp->stats.rx_bytes += delta;
4860 	dev->stats.rx_bytes += delta;
4861 
4862 	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4863 		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4864 	sp->stats.tx_bytes += delta;
4865 	dev->stats.tx_bytes += delta;
4866 
4867 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4868 	sp->stats.rx_errors += delta;
4869 	dev->stats.rx_errors += delta;
4870 
4871 	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4872 		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4873 	sp->stats.tx_errors += delta;
4874 	dev->stats.tx_errors += delta;
4875 
4876 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4877 	sp->stats.rx_dropped += delta;
4878 	dev->stats.rx_dropped += delta;
4879 
4880 	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4881 	sp->stats.tx_dropped += delta;
4882 	dev->stats.tx_dropped += delta;
4883 
4884 	/* The adapter MAC interprets pause frames as multicast packets, but
4885 	 * does not pass them up.  This erroneously increases the multicast
4886 	 * packet count and needs to be deducted when the multicast frame count
4887 	 * is queried.
4888 	 */
4889 	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4890 		le32_to_cpu(stats->rmac_vld_mcst_frms);
4891 	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4892 	delta -= sp->stats.multicast;
4893 	sp->stats.multicast += delta;
4894 	dev->stats.multicast += delta;
4895 
4896 	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4897 		le32_to_cpu(stats->rmac_usized_frms)) +
4898 		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4899 	sp->stats.rx_length_errors += delta;
4900 	dev->stats.rx_length_errors += delta;
4901 
4902 	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4903 	sp->stats.rx_crc_errors += delta;
4904 	dev->stats.rx_crc_errors += delta;
4905 
4906 	return &dev->stats;
4907 }
4908 
4909 /**
4910  *  s2io_set_multicast - entry point for multicast address enable/disable.
4911  *  @dev : pointer to the device structure
4912  *  Description:
4913  *  This function is a driver entry point which gets called by the kernel
4914  *  whenever multicast addresses must be enabled/disabled. This also gets
4915  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4916  *  determine, if multicast address must be enabled or if promiscuous mode
4917  *  is to be disabled etc.
4918  *  Return value:
4919  *  void.
4920  */
4921 
4922 static void s2io_set_multicast(struct net_device *dev)
4923 {
4924 	int i, j, prev_cnt;
4925 	struct netdev_hw_addr *ha;
4926 	struct s2io_nic *sp = netdev_priv(dev);
4927 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4928 	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4929 		0xfeffffffffffULL;
4930 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4931 	void __iomem *add;
4932 	struct config_param *config = &sp->config;
4933 
4934 	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4935 		/*  Enable all Multicast addresses */
4936 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4937 		       &bar0->rmac_addr_data0_mem);
4938 		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4939 		       &bar0->rmac_addr_data1_mem);
4940 		val64 = RMAC_ADDR_CMD_MEM_WE |
4941 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4942 			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4943 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4944 		/* Wait till command completes */
4945 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4946 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4947 				      S2IO_BIT_RESET);
4948 
4949 		sp->m_cast_flg = 1;
4950 		sp->all_multi_pos = config->max_mc_addr - 1;
4951 	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4952 		/*  Disable all Multicast addresses */
4953 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4954 		       &bar0->rmac_addr_data0_mem);
4955 		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4956 		       &bar0->rmac_addr_data1_mem);
4957 		val64 = RMAC_ADDR_CMD_MEM_WE |
4958 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4959 			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4960 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4961 		/* Wait till command completes */
4962 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4963 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4964 				      S2IO_BIT_RESET);
4965 
4966 		sp->m_cast_flg = 0;
4967 		sp->all_multi_pos = 0;
4968 	}
4969 
4970 	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4971 		/*  Put the NIC into promiscuous mode */
4972 		add = &bar0->mac_cfg;
4973 		val64 = readq(&bar0->mac_cfg);
4974 		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4975 
4976 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4977 		writel((u32)val64, add);
4978 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4979 		writel((u32) (val64 >> 32), (add + 4));
4980 
4981 		if (vlan_tag_strip != 1) {
4982 			val64 = readq(&bar0->rx_pa_cfg);
4983 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4984 			writeq(val64, &bar0->rx_pa_cfg);
4985 			sp->vlan_strip_flag = 0;
4986 		}
4987 
4988 		val64 = readq(&bar0->mac_cfg);
4989 		sp->promisc_flg = 1;
4990 		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4991 			  dev->name);
4992 	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4993 		/*  Remove the NIC from promiscuous mode */
4994 		add = &bar0->mac_cfg;
4995 		val64 = readq(&bar0->mac_cfg);
4996 		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4997 
4998 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4999 		writel((u32)val64, add);
5000 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5001 		writel((u32) (val64 >> 32), (add + 4));
5002 
5003 		if (vlan_tag_strip != 0) {
5004 			val64 = readq(&bar0->rx_pa_cfg);
5005 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5006 			writeq(val64, &bar0->rx_pa_cfg);
5007 			sp->vlan_strip_flag = 1;
5008 		}
5009 
5010 		val64 = readq(&bar0->mac_cfg);
5011 		sp->promisc_flg = 0;
5012 		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
5013 	}
5014 
5015 	/*  Update individual M_CAST address list */
5016 	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5017 		if (netdev_mc_count(dev) >
5018 		    (config->max_mc_addr - config->max_mac_addr)) {
5019 			DBG_PRINT(ERR_DBG,
5020 				  "%s: No more Rx filters can be added - "
5021 				  "please enable ALL_MULTI instead\n",
5022 				  dev->name);
5023 			return;
5024 		}
5025 
5026 		prev_cnt = sp->mc_addr_count;
5027 		sp->mc_addr_count = netdev_mc_count(dev);
5028 
5029 		/* Clear out the previous list of Mc in the H/W. */
5030 		for (i = 0; i < prev_cnt; i++) {
5031 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5032 			       &bar0->rmac_addr_data0_mem);
5033 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5034 			       &bar0->rmac_addr_data1_mem);
5035 			val64 = RMAC_ADDR_CMD_MEM_WE |
5036 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5037 				RMAC_ADDR_CMD_MEM_OFFSET
5038 				(config->mc_start_offset + i);
5039 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5040 
5041 			/* Wait for command completes */
5042 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5043 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5044 						  S2IO_BIT_RESET)) {
5045 				DBG_PRINT(ERR_DBG,
5046 					  "%s: Adding Multicasts failed\n",
5047 					  dev->name);
5048 				return;
5049 			}
5050 		}
5051 
5052 		/* Create the new Rx filter list and update the same in H/W. */
5053 		i = 0;
5054 		netdev_for_each_mc_addr(ha, dev) {
5055 			mac_addr = 0;
5056 			for (j = 0; j < ETH_ALEN; j++) {
5057 				mac_addr |= ha->addr[j];
5058 				mac_addr <<= 8;
5059 			}
5060 			mac_addr >>= 8;
5061 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5062 			       &bar0->rmac_addr_data0_mem);
5063 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5064 			       &bar0->rmac_addr_data1_mem);
5065 			val64 = RMAC_ADDR_CMD_MEM_WE |
5066 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5067 				RMAC_ADDR_CMD_MEM_OFFSET
5068 				(i + config->mc_start_offset);
5069 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5070 
5071 			/* Wait for command completes */
5072 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5073 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5074 						  S2IO_BIT_RESET)) {
5075 				DBG_PRINT(ERR_DBG,
5076 					  "%s: Adding Multicasts failed\n",
5077 					  dev->name);
5078 				return;
5079 			}
5080 			i++;
5081 		}
5082 	}
5083 }
5084 
5085 /* read from CAM unicast & multicast addresses and store it in
5086  * def_mac_addr structure
5087  */
5088 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5089 {
5090 	int offset;
5091 	u64 mac_addr = 0x0;
5092 	struct config_param *config = &sp->config;
5093 
5094 	/* store unicast & multicast mac addresses */
5095 	for (offset = 0; offset < config->max_mc_addr; offset++) {
5096 		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5097 		/* if read fails disable the entry */
5098 		if (mac_addr == FAILURE)
5099 			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5100 		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5101 	}
5102 }
5103 
5104 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5105 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5106 {
5107 	int offset;
5108 	struct config_param *config = &sp->config;
5109 	/* restore unicast mac address */
5110 	for (offset = 0; offset < config->max_mac_addr; offset++)
5111 		do_s2io_prog_unicast(sp->dev,
5112 				     sp->def_mac_addr[offset].mac_addr);
5113 
5114 	/* restore multicast mac address */
5115 	for (offset = config->mc_start_offset;
5116 	     offset < config->max_mc_addr; offset++)
5117 		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5118 }
5119 
5120 /* add a multicast MAC address to CAM */
5121 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5122 {
5123 	int i;
5124 	u64 mac_addr = 0;
5125 	struct config_param *config = &sp->config;
5126 
5127 	for (i = 0; i < ETH_ALEN; i++) {
5128 		mac_addr <<= 8;
5129 		mac_addr |= addr[i];
5130 	}
5131 	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5132 		return SUCCESS;
5133 
5134 	/* check if the multicast mac already preset in CAM */
5135 	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5136 		u64 tmp64;
5137 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5138 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5139 			break;
5140 
5141 		if (tmp64 == mac_addr)
5142 			return SUCCESS;
5143 	}
5144 	if (i == config->max_mc_addr) {
5145 		DBG_PRINT(ERR_DBG,
5146 			  "CAM full no space left for multicast MAC\n");
5147 		return FAILURE;
5148 	}
5149 	/* Update the internal structure with this new mac address */
5150 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5151 
5152 	return do_s2io_add_mac(sp, mac_addr, i);
5153 }
5154 
5155 /* add MAC address to CAM */
5156 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5157 {
5158 	u64 val64;
5159 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5160 
5161 	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5162 	       &bar0->rmac_addr_data0_mem);
5163 
5164 	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5165 		RMAC_ADDR_CMD_MEM_OFFSET(off);
5166 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5167 
5168 	/* Wait till command completes */
5169 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5170 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5171 				  S2IO_BIT_RESET)) {
5172 		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5173 		return FAILURE;
5174 	}
5175 	return SUCCESS;
5176 }
5177 /* deletes a specified unicast/multicast mac entry from CAM */
5178 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5179 {
5180 	int offset;
5181 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5182 	struct config_param *config = &sp->config;
5183 
5184 	for (offset = 1;
5185 	     offset < config->max_mc_addr; offset++) {
5186 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5187 		if (tmp64 == addr) {
5188 			/* disable the entry by writing  0xffffffffffffULL */
5189 			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5190 				return FAILURE;
5191 			/* store the new mac list from CAM */
5192 			do_s2io_store_unicast_mc(sp);
5193 			return SUCCESS;
5194 		}
5195 	}
5196 	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5197 		  (unsigned long long)addr);
5198 	return FAILURE;
5199 }
5200 
5201 /* read mac entries from CAM */
5202 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5203 {
5204 	u64 tmp64 = 0xffffffffffff0000ULL, val64;
5205 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5206 
5207 	/* read mac addr */
5208 	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5209 		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5210 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5211 
5212 	/* Wait till command completes */
5213 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5214 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5215 				  S2IO_BIT_RESET)) {
5216 		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5217 		return FAILURE;
5218 	}
5219 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5220 
5221 	return tmp64 >> 16;
5222 }
5223 
5224 /**
5225  * s2io_set_mac_addr - driver entry point
5226  */
5227 
5228 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5229 {
5230 	struct sockaddr *addr = p;
5231 
5232 	if (!is_valid_ether_addr(addr->sa_data))
5233 		return -EADDRNOTAVAIL;
5234 
5235 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5236 
5237 	/* store the MAC address in CAM */
5238 	return do_s2io_prog_unicast(dev, dev->dev_addr);
5239 }
5240 /**
5241  *  do_s2io_prog_unicast - Programs the Xframe mac address
5242  *  @dev : pointer to the device structure.
5243  *  @addr: a uchar pointer to the new mac address which is to be set.
5244  *  Description : This procedure will program the Xframe to receive
5245  *  frames with new Mac Address
5246  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5247  *  as defined in errno.h file on failure.
5248  */
5249 
5250 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5251 {
5252 	struct s2io_nic *sp = netdev_priv(dev);
5253 	register u64 mac_addr = 0, perm_addr = 0;
5254 	int i;
5255 	u64 tmp64;
5256 	struct config_param *config = &sp->config;
5257 
5258 	/*
5259 	 * Set the new MAC address as the new unicast filter and reflect this
5260 	 * change on the device address registered with the OS. It will be
5261 	 * at offset 0.
5262 	 */
5263 	for (i = 0; i < ETH_ALEN; i++) {
5264 		mac_addr <<= 8;
5265 		mac_addr |= addr[i];
5266 		perm_addr <<= 8;
5267 		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5268 	}
5269 
5270 	/* check if the dev_addr is different than perm_addr */
5271 	if (mac_addr == perm_addr)
5272 		return SUCCESS;
5273 
5274 	/* check if the mac already preset in CAM */
5275 	for (i = 1; i < config->max_mac_addr; i++) {
5276 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5277 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5278 			break;
5279 
5280 		if (tmp64 == mac_addr) {
5281 			DBG_PRINT(INFO_DBG,
5282 				  "MAC addr:0x%llx already present in CAM\n",
5283 				  (unsigned long long)mac_addr);
5284 			return SUCCESS;
5285 		}
5286 	}
5287 	if (i == config->max_mac_addr) {
5288 		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5289 		return FAILURE;
5290 	}
5291 	/* Update the internal structure with this new mac address */
5292 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5293 
5294 	return do_s2io_add_mac(sp, mac_addr, i);
5295 }
5296 
5297 /**
5298  * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5299  * @sp : private member of the device structure, which is a pointer to the
5300  * s2io_nic structure.
5301  * @cmd: pointer to the structure with parameters given by ethtool to set
5302  * link information.
5303  * Description:
5304  * The function sets different link parameters provided by the user onto
5305  * the NIC.
5306  * Return value:
5307  * 0 on success.
5308  */
5309 
5310 static int
5311 s2io_ethtool_set_link_ksettings(struct net_device *dev,
5312 				const struct ethtool_link_ksettings *cmd)
5313 {
5314 	struct s2io_nic *sp = netdev_priv(dev);
5315 	if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5316 	    (cmd->base.speed != SPEED_10000) ||
5317 	    (cmd->base.duplex != DUPLEX_FULL))
5318 		return -EINVAL;
5319 	else {
5320 		s2io_close(sp->dev);
5321 		s2io_open(sp->dev);
5322 	}
5323 
5324 	return 0;
5325 }
5326 
5327 /**
5328  * s2io_ethtol_get_link_ksettings - Return link specific information.
5329  * @sp : private member of the device structure, pointer to the
5330  *      s2io_nic structure.
5331  * @cmd : pointer to the structure with parameters given by ethtool
5332  * to return link information.
5333  * Description:
5334  * Returns link specific information like speed, duplex etc.. to ethtool.
5335  * Return value :
5336  * return 0 on success.
5337  */
5338 
5339 static int
5340 s2io_ethtool_get_link_ksettings(struct net_device *dev,
5341 				struct ethtool_link_ksettings *cmd)
5342 {
5343 	struct s2io_nic *sp = netdev_priv(dev);
5344 
5345 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
5346 	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5347 	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5348 
5349 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5350 	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5351 	ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5352 
5353 	cmd->base.port = PORT_FIBRE;
5354 
5355 	if (netif_carrier_ok(sp->dev)) {
5356 		cmd->base.speed = SPEED_10000;
5357 		cmd->base.duplex = DUPLEX_FULL;
5358 	} else {
5359 		cmd->base.speed = SPEED_UNKNOWN;
5360 		cmd->base.duplex = DUPLEX_UNKNOWN;
5361 	}
5362 
5363 	cmd->base.autoneg = AUTONEG_DISABLE;
5364 	return 0;
5365 }
5366 
5367 /**
5368  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5369  * @sp : private member of the device structure, which is a pointer to the
5370  * s2io_nic structure.
5371  * @info : pointer to the structure with parameters given by ethtool to
5372  * return driver information.
5373  * Description:
5374  * Returns driver specefic information like name, version etc.. to ethtool.
5375  * Return value:
5376  *  void
5377  */
5378 
5379 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5380 				  struct ethtool_drvinfo *info)
5381 {
5382 	struct s2io_nic *sp = netdev_priv(dev);
5383 
5384 	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5385 	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5386 	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5387 }
5388 
5389 /**
5390  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5391  *  @sp: private member of the device structure, which is a pointer to the
5392  *  s2io_nic structure.
5393  *  @regs : pointer to the structure with parameters given by ethtool for
5394  *  dumping the registers.
5395  *  @reg_space: The input argument into which all the registers are dumped.
5396  *  Description:
5397  *  Dumps the entire register space of xFrame NIC into the user given
5398  *  buffer area.
5399  * Return value :
5400  * void .
5401  */
5402 
5403 static void s2io_ethtool_gregs(struct net_device *dev,
5404 			       struct ethtool_regs *regs, void *space)
5405 {
5406 	int i;
5407 	u64 reg;
5408 	u8 *reg_space = (u8 *)space;
5409 	struct s2io_nic *sp = netdev_priv(dev);
5410 
5411 	regs->len = XENA_REG_SPACE;
5412 	regs->version = sp->pdev->subsystem_device;
5413 
5414 	for (i = 0; i < regs->len; i += 8) {
5415 		reg = readq(sp->bar0 + i);
5416 		memcpy((reg_space + i), &reg, 8);
5417 	}
5418 }
5419 
5420 /*
5421  *  s2io_set_led - control NIC led
5422  */
5423 static void s2io_set_led(struct s2io_nic *sp, bool on)
5424 {
5425 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5426 	u16 subid = sp->pdev->subsystem_device;
5427 	u64 val64;
5428 
5429 	if ((sp->device_type == XFRAME_II_DEVICE) ||
5430 	    ((subid & 0xFF) >= 0x07)) {
5431 		val64 = readq(&bar0->gpio_control);
5432 		if (on)
5433 			val64 |= GPIO_CTRL_GPIO_0;
5434 		else
5435 			val64 &= ~GPIO_CTRL_GPIO_0;
5436 
5437 		writeq(val64, &bar0->gpio_control);
5438 	} else {
5439 		val64 = readq(&bar0->adapter_control);
5440 		if (on)
5441 			val64 |= ADAPTER_LED_ON;
5442 		else
5443 			val64 &= ~ADAPTER_LED_ON;
5444 
5445 		writeq(val64, &bar0->adapter_control);
5446 	}
5447 
5448 }
5449 
5450 /**
5451  * s2io_ethtool_set_led - To physically identify the nic on the system.
5452  * @dev : network device
5453  * @state: led setting
5454  *
5455  * Description: Used to physically identify the NIC on the system.
5456  * The Link LED will blink for a time specified by the user for
5457  * identification.
5458  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5459  * identification is possible only if it's link is up.
5460  */
5461 
5462 static int s2io_ethtool_set_led(struct net_device *dev,
5463 				enum ethtool_phys_id_state state)
5464 {
5465 	struct s2io_nic *sp = netdev_priv(dev);
5466 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5467 	u16 subid = sp->pdev->subsystem_device;
5468 
5469 	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5470 		u64 val64 = readq(&bar0->adapter_control);
5471 		if (!(val64 & ADAPTER_CNTL_EN)) {
5472 			pr_err("Adapter Link down, cannot blink LED\n");
5473 			return -EAGAIN;
5474 		}
5475 	}
5476 
5477 	switch (state) {
5478 	case ETHTOOL_ID_ACTIVE:
5479 		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5480 		return 1;	/* cycle on/off once per second */
5481 
5482 	case ETHTOOL_ID_ON:
5483 		s2io_set_led(sp, true);
5484 		break;
5485 
5486 	case ETHTOOL_ID_OFF:
5487 		s2io_set_led(sp, false);
5488 		break;
5489 
5490 	case ETHTOOL_ID_INACTIVE:
5491 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5492 			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5493 	}
5494 
5495 	return 0;
5496 }
5497 
5498 static void s2io_ethtool_gringparam(struct net_device *dev,
5499 				    struct ethtool_ringparam *ering)
5500 {
5501 	struct s2io_nic *sp = netdev_priv(dev);
5502 	int i, tx_desc_count = 0, rx_desc_count = 0;
5503 
5504 	if (sp->rxd_mode == RXD_MODE_1) {
5505 		ering->rx_max_pending = MAX_RX_DESC_1;
5506 		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5507 	} else {
5508 		ering->rx_max_pending = MAX_RX_DESC_2;
5509 		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5510 	}
5511 
5512 	ering->tx_max_pending = MAX_TX_DESC;
5513 
5514 	for (i = 0; i < sp->config.rx_ring_num; i++)
5515 		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5516 	ering->rx_pending = rx_desc_count;
5517 	ering->rx_jumbo_pending = rx_desc_count;
5518 
5519 	for (i = 0; i < sp->config.tx_fifo_num; i++)
5520 		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5521 	ering->tx_pending = tx_desc_count;
5522 	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5523 }
5524 
5525 /**
5526  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5527  * @sp : private member of the device structure, which is a pointer to the
5528  *	s2io_nic structure.
5529  * @ep : pointer to the structure with pause parameters given by ethtool.
5530  * Description:
5531  * Returns the Pause frame generation and reception capability of the NIC.
5532  * Return value:
5533  *  void
5534  */
5535 static void s2io_ethtool_getpause_data(struct net_device *dev,
5536 				       struct ethtool_pauseparam *ep)
5537 {
5538 	u64 val64;
5539 	struct s2io_nic *sp = netdev_priv(dev);
5540 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5541 
5542 	val64 = readq(&bar0->rmac_pause_cfg);
5543 	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5544 		ep->tx_pause = true;
5545 	if (val64 & RMAC_PAUSE_RX_ENABLE)
5546 		ep->rx_pause = true;
5547 	ep->autoneg = false;
5548 }
5549 
5550 /**
5551  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5552  * @sp : private member of the device structure, which is a pointer to the
5553  *      s2io_nic structure.
5554  * @ep : pointer to the structure with pause parameters given by ethtool.
5555  * Description:
5556  * It can be used to set or reset Pause frame generation or reception
5557  * support of the NIC.
5558  * Return value:
5559  * int, returns 0 on Success
5560  */
5561 
5562 static int s2io_ethtool_setpause_data(struct net_device *dev,
5563 				      struct ethtool_pauseparam *ep)
5564 {
5565 	u64 val64;
5566 	struct s2io_nic *sp = netdev_priv(dev);
5567 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5568 
5569 	val64 = readq(&bar0->rmac_pause_cfg);
5570 	if (ep->tx_pause)
5571 		val64 |= RMAC_PAUSE_GEN_ENABLE;
5572 	else
5573 		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5574 	if (ep->rx_pause)
5575 		val64 |= RMAC_PAUSE_RX_ENABLE;
5576 	else
5577 		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5578 	writeq(val64, &bar0->rmac_pause_cfg);
5579 	return 0;
5580 }
5581 
5582 /**
5583  * read_eeprom - reads 4 bytes of data from user given offset.
5584  * @sp : private member of the device structure, which is a pointer to the
5585  *      s2io_nic structure.
5586  * @off : offset at which the data must be written
5587  * @data : Its an output parameter where the data read at the given
5588  *	offset is stored.
5589  * Description:
5590  * Will read 4 bytes of data from the user given offset and return the
5591  * read data.
5592  * NOTE: Will allow to read only part of the EEPROM visible through the
5593  *   I2C bus.
5594  * Return value:
5595  *  -1 on failure and 0 on success.
5596  */
5597 
5598 #define S2IO_DEV_ID		5
5599 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5600 {
5601 	int ret = -1;
5602 	u32 exit_cnt = 0;
5603 	u64 val64;
5604 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5605 
5606 	if (sp->device_type == XFRAME_I_DEVICE) {
5607 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5608 			I2C_CONTROL_ADDR(off) |
5609 			I2C_CONTROL_BYTE_CNT(0x3) |
5610 			I2C_CONTROL_READ |
5611 			I2C_CONTROL_CNTL_START;
5612 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5613 
5614 		while (exit_cnt < 5) {
5615 			val64 = readq(&bar0->i2c_control);
5616 			if (I2C_CONTROL_CNTL_END(val64)) {
5617 				*data = I2C_CONTROL_GET_DATA(val64);
5618 				ret = 0;
5619 				break;
5620 			}
5621 			msleep(50);
5622 			exit_cnt++;
5623 		}
5624 	}
5625 
5626 	if (sp->device_type == XFRAME_II_DEVICE) {
5627 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5628 			SPI_CONTROL_BYTECNT(0x3) |
5629 			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5630 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5631 		val64 |= SPI_CONTROL_REQ;
5632 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5633 		while (exit_cnt < 5) {
5634 			val64 = readq(&bar0->spi_control);
5635 			if (val64 & SPI_CONTROL_NACK) {
5636 				ret = 1;
5637 				break;
5638 			} else if (val64 & SPI_CONTROL_DONE) {
5639 				*data = readq(&bar0->spi_data);
5640 				*data &= 0xffffff;
5641 				ret = 0;
5642 				break;
5643 			}
5644 			msleep(50);
5645 			exit_cnt++;
5646 		}
5647 	}
5648 	return ret;
5649 }
5650 
5651 /**
5652  *  write_eeprom - actually writes the relevant part of the data value.
5653  *  @sp : private member of the device structure, which is a pointer to the
5654  *       s2io_nic structure.
5655  *  @off : offset at which the data must be written
5656  *  @data : The data that is to be written
5657  *  @cnt : Number of bytes of the data that are actually to be written into
5658  *  the Eeprom. (max of 3)
5659  * Description:
5660  *  Actually writes the relevant part of the data value into the Eeprom
5661  *  through the I2C bus.
5662  * Return value:
5663  *  0 on success, -1 on failure.
5664  */
5665 
5666 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5667 {
5668 	int exit_cnt = 0, ret = -1;
5669 	u64 val64;
5670 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5671 
5672 	if (sp->device_type == XFRAME_I_DEVICE) {
5673 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5674 			I2C_CONTROL_ADDR(off) |
5675 			I2C_CONTROL_BYTE_CNT(cnt) |
5676 			I2C_CONTROL_SET_DATA((u32)data) |
5677 			I2C_CONTROL_CNTL_START;
5678 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5679 
5680 		while (exit_cnt < 5) {
5681 			val64 = readq(&bar0->i2c_control);
5682 			if (I2C_CONTROL_CNTL_END(val64)) {
5683 				if (!(val64 & I2C_CONTROL_NACK))
5684 					ret = 0;
5685 				break;
5686 			}
5687 			msleep(50);
5688 			exit_cnt++;
5689 		}
5690 	}
5691 
5692 	if (sp->device_type == XFRAME_II_DEVICE) {
5693 		int write_cnt = (cnt == 8) ? 0 : cnt;
5694 		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5695 
5696 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5697 			SPI_CONTROL_BYTECNT(write_cnt) |
5698 			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5699 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5700 		val64 |= SPI_CONTROL_REQ;
5701 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5702 		while (exit_cnt < 5) {
5703 			val64 = readq(&bar0->spi_control);
5704 			if (val64 & SPI_CONTROL_NACK) {
5705 				ret = 1;
5706 				break;
5707 			} else if (val64 & SPI_CONTROL_DONE) {
5708 				ret = 0;
5709 				break;
5710 			}
5711 			msleep(50);
5712 			exit_cnt++;
5713 		}
5714 	}
5715 	return ret;
5716 }
5717 static void s2io_vpd_read(struct s2io_nic *nic)
5718 {
5719 	u8 *vpd_data;
5720 	u8 data;
5721 	int i = 0, cnt, len, fail = 0;
5722 	int vpd_addr = 0x80;
5723 	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5724 
5725 	if (nic->device_type == XFRAME_II_DEVICE) {
5726 		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5727 		vpd_addr = 0x80;
5728 	} else {
5729 		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5730 		vpd_addr = 0x50;
5731 	}
5732 	strcpy(nic->serial_num, "NOT AVAILABLE");
5733 
5734 	vpd_data = kmalloc(256, GFP_KERNEL);
5735 	if (!vpd_data) {
5736 		swstats->mem_alloc_fail_cnt++;
5737 		return;
5738 	}
5739 	swstats->mem_allocated += 256;
5740 
5741 	for (i = 0; i < 256; i += 4) {
5742 		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5743 		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5744 		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5745 		for (cnt = 0; cnt < 5; cnt++) {
5746 			msleep(2);
5747 			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5748 			if (data == 0x80)
5749 				break;
5750 		}
5751 		if (cnt >= 5) {
5752 			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5753 			fail = 1;
5754 			break;
5755 		}
5756 		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5757 				      (u32 *)&vpd_data[i]);
5758 	}
5759 
5760 	if (!fail) {
5761 		/* read serial number of adapter */
5762 		for (cnt = 0; cnt < 252; cnt++) {
5763 			if ((vpd_data[cnt] == 'S') &&
5764 			    (vpd_data[cnt+1] == 'N')) {
5765 				len = vpd_data[cnt+2];
5766 				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5767 					memcpy(nic->serial_num,
5768 					       &vpd_data[cnt + 3],
5769 					       len);
5770 					memset(nic->serial_num+len,
5771 					       0,
5772 					       VPD_STRING_LEN-len);
5773 					break;
5774 				}
5775 			}
5776 		}
5777 	}
5778 
5779 	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5780 		len = vpd_data[1];
5781 		memcpy(nic->product_name, &vpd_data[3], len);
5782 		nic->product_name[len] = 0;
5783 	}
5784 	kfree(vpd_data);
5785 	swstats->mem_freed += 256;
5786 }
5787 
5788 /**
5789  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5790  *  @sp : private member of the device structure, which is a pointer to the
5791  *  s2io_nic structure.
5792  *  @eeprom : pointer to the user level structure provided by ethtool,
5793  *  containing all relevant information.
5794  *  @data_buf : user defined value to be written into Eeprom.
5795  *  Description: Reads the values stored in the Eeprom at given offset
5796  *  for a given length. Stores these values int the input argument data
5797  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5798  *  Return value:
5799  *  int  0 on success
5800  */
5801 
5802 static int s2io_ethtool_geeprom(struct net_device *dev,
5803 				struct ethtool_eeprom *eeprom, u8 * data_buf)
5804 {
5805 	u32 i, valid;
5806 	u64 data;
5807 	struct s2io_nic *sp = netdev_priv(dev);
5808 
5809 	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5810 
5811 	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5812 		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5813 
5814 	for (i = 0; i < eeprom->len; i += 4) {
5815 		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5816 			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5817 			return -EFAULT;
5818 		}
5819 		valid = INV(data);
5820 		memcpy((data_buf + i), &valid, 4);
5821 	}
5822 	return 0;
5823 }
5824 
5825 /**
5826  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5827  *  @sp : private member of the device structure, which is a pointer to the
5828  *  s2io_nic structure.
5829  *  @eeprom : pointer to the user level structure provided by ethtool,
5830  *  containing all relevant information.
5831  *  @data_buf ; user defined value to be written into Eeprom.
5832  *  Description:
5833  *  Tries to write the user provided value in the Eeprom, at the offset
5834  *  given by the user.
5835  *  Return value:
5836  *  0 on success, -EFAULT on failure.
5837  */
5838 
5839 static int s2io_ethtool_seeprom(struct net_device *dev,
5840 				struct ethtool_eeprom *eeprom,
5841 				u8 *data_buf)
5842 {
5843 	int len = eeprom->len, cnt = 0;
5844 	u64 valid = 0, data;
5845 	struct s2io_nic *sp = netdev_priv(dev);
5846 
5847 	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5848 		DBG_PRINT(ERR_DBG,
5849 			  "ETHTOOL_WRITE_EEPROM Err: "
5850 			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5851 			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5852 			  eeprom->magic);
5853 		return -EFAULT;
5854 	}
5855 
5856 	while (len) {
5857 		data = (u32)data_buf[cnt] & 0x000000FF;
5858 		if (data)
5859 			valid = (u32)(data << 24);
5860 		else
5861 			valid = data;
5862 
5863 		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5864 			DBG_PRINT(ERR_DBG,
5865 				  "ETHTOOL_WRITE_EEPROM Err: "
5866 				  "Cannot write into the specified offset\n");
5867 			return -EFAULT;
5868 		}
5869 		cnt++;
5870 		len--;
5871 	}
5872 
5873 	return 0;
5874 }
5875 
5876 /**
5877  * s2io_register_test - reads and writes into all clock domains.
5878  * @sp : private member of the device structure, which is a pointer to the
5879  * s2io_nic structure.
5880  * @data : variable that returns the result of each of the test conducted b
5881  * by the driver.
5882  * Description:
5883  * Read and write into all clock domains. The NIC has 3 clock domains,
5884  * see that registers in all the three regions are accessible.
5885  * Return value:
5886  * 0 on success.
5887  */
5888 
5889 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5890 {
5891 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5892 	u64 val64 = 0, exp_val;
5893 	int fail = 0;
5894 
5895 	val64 = readq(&bar0->pif_rd_swapper_fb);
5896 	if (val64 != 0x123456789abcdefULL) {
5897 		fail = 1;
5898 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5899 	}
5900 
5901 	val64 = readq(&bar0->rmac_pause_cfg);
5902 	if (val64 != 0xc000ffff00000000ULL) {
5903 		fail = 1;
5904 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5905 	}
5906 
5907 	val64 = readq(&bar0->rx_queue_cfg);
5908 	if (sp->device_type == XFRAME_II_DEVICE)
5909 		exp_val = 0x0404040404040404ULL;
5910 	else
5911 		exp_val = 0x0808080808080808ULL;
5912 	if (val64 != exp_val) {
5913 		fail = 1;
5914 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5915 	}
5916 
5917 	val64 = readq(&bar0->xgxs_efifo_cfg);
5918 	if (val64 != 0x000000001923141EULL) {
5919 		fail = 1;
5920 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5921 	}
5922 
5923 	val64 = 0x5A5A5A5A5A5A5A5AULL;
5924 	writeq(val64, &bar0->xmsi_data);
5925 	val64 = readq(&bar0->xmsi_data);
5926 	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5927 		fail = 1;
5928 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5929 	}
5930 
5931 	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5932 	writeq(val64, &bar0->xmsi_data);
5933 	val64 = readq(&bar0->xmsi_data);
5934 	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5935 		fail = 1;
5936 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5937 	}
5938 
5939 	*data = fail;
5940 	return fail;
5941 }
5942 
5943 /**
5944  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5945  * @sp : private member of the device structure, which is a pointer to the
5946  * s2io_nic structure.
5947  * @data:variable that returns the result of each of the test conducted by
5948  * the driver.
5949  * Description:
5950  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5951  * register.
5952  * Return value:
5953  * 0 on success.
5954  */
5955 
5956 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5957 {
5958 	int fail = 0;
5959 	u64 ret_data, org_4F0, org_7F0;
5960 	u8 saved_4F0 = 0, saved_7F0 = 0;
5961 	struct net_device *dev = sp->dev;
5962 
5963 	/* Test Write Error at offset 0 */
5964 	/* Note that SPI interface allows write access to all areas
5965 	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5966 	 */
5967 	if (sp->device_type == XFRAME_I_DEVICE)
5968 		if (!write_eeprom(sp, 0, 0, 3))
5969 			fail = 1;
5970 
5971 	/* Save current values at offsets 0x4F0 and 0x7F0 */
5972 	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5973 		saved_4F0 = 1;
5974 	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5975 		saved_7F0 = 1;
5976 
5977 	/* Test Write at offset 4f0 */
5978 	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5979 		fail = 1;
5980 	if (read_eeprom(sp, 0x4F0, &ret_data))
5981 		fail = 1;
5982 
5983 	if (ret_data != 0x012345) {
5984 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5985 			  "Data written %llx Data read %llx\n",
5986 			  dev->name, (unsigned long long)0x12345,
5987 			  (unsigned long long)ret_data);
5988 		fail = 1;
5989 	}
5990 
5991 	/* Reset the EEPROM data go FFFF */
5992 	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5993 
5994 	/* Test Write Request Error at offset 0x7c */
5995 	if (sp->device_type == XFRAME_I_DEVICE)
5996 		if (!write_eeprom(sp, 0x07C, 0, 3))
5997 			fail = 1;
5998 
5999 	/* Test Write Request at offset 0x7f0 */
6000 	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6001 		fail = 1;
6002 	if (read_eeprom(sp, 0x7F0, &ret_data))
6003 		fail = 1;
6004 
6005 	if (ret_data != 0x012345) {
6006 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6007 			  "Data written %llx Data read %llx\n",
6008 			  dev->name, (unsigned long long)0x12345,
6009 			  (unsigned long long)ret_data);
6010 		fail = 1;
6011 	}
6012 
6013 	/* Reset the EEPROM data go FFFF */
6014 	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6015 
6016 	if (sp->device_type == XFRAME_I_DEVICE) {
6017 		/* Test Write Error at offset 0x80 */
6018 		if (!write_eeprom(sp, 0x080, 0, 3))
6019 			fail = 1;
6020 
6021 		/* Test Write Error at offset 0xfc */
6022 		if (!write_eeprom(sp, 0x0FC, 0, 3))
6023 			fail = 1;
6024 
6025 		/* Test Write Error at offset 0x100 */
6026 		if (!write_eeprom(sp, 0x100, 0, 3))
6027 			fail = 1;
6028 
6029 		/* Test Write Error at offset 4ec */
6030 		if (!write_eeprom(sp, 0x4EC, 0, 3))
6031 			fail = 1;
6032 	}
6033 
6034 	/* Restore values at offsets 0x4F0 and 0x7F0 */
6035 	if (saved_4F0)
6036 		write_eeprom(sp, 0x4F0, org_4F0, 3);
6037 	if (saved_7F0)
6038 		write_eeprom(sp, 0x7F0, org_7F0, 3);
6039 
6040 	*data = fail;
6041 	return fail;
6042 }
6043 
6044 /**
6045  * s2io_bist_test - invokes the MemBist test of the card .
6046  * @sp : private member of the device structure, which is a pointer to the
6047  * s2io_nic structure.
6048  * @data:variable that returns the result of each of the test conducted by
6049  * the driver.
6050  * Description:
6051  * This invokes the MemBist test of the card. We give around
6052  * 2 secs time for the Test to complete. If it's still not complete
6053  * within this peiod, we consider that the test failed.
6054  * Return value:
6055  * 0 on success and -1 on failure.
6056  */
6057 
6058 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6059 {
6060 	u8 bist = 0;
6061 	int cnt = 0, ret = -1;
6062 
6063 	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6064 	bist |= PCI_BIST_START;
6065 	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6066 
6067 	while (cnt < 20) {
6068 		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6069 		if (!(bist & PCI_BIST_START)) {
6070 			*data = (bist & PCI_BIST_CODE_MASK);
6071 			ret = 0;
6072 			break;
6073 		}
6074 		msleep(100);
6075 		cnt++;
6076 	}
6077 
6078 	return ret;
6079 }
6080 
6081 /**
6082  * s2io_link_test - verifies the link state of the nic
6083  * @sp ; private member of the device structure, which is a pointer to the
6084  * s2io_nic structure.
6085  * @data: variable that returns the result of each of the test conducted by
6086  * the driver.
6087  * Description:
6088  * The function verifies the link state of the NIC and updates the input
6089  * argument 'data' appropriately.
6090  * Return value:
6091  * 0 on success.
6092  */
6093 
6094 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6095 {
6096 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6097 	u64 val64;
6098 
6099 	val64 = readq(&bar0->adapter_status);
6100 	if (!(LINK_IS_UP(val64)))
6101 		*data = 1;
6102 	else
6103 		*data = 0;
6104 
6105 	return *data;
6106 }
6107 
6108 /**
6109  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6110  * @sp: private member of the device structure, which is a pointer to the
6111  * s2io_nic structure.
6112  * @data: variable that returns the result of each of the test
6113  * conducted by the driver.
6114  * Description:
6115  *  This is one of the offline test that tests the read and write
6116  *  access to the RldRam chip on the NIC.
6117  * Return value:
6118  *  0 on success.
6119  */
6120 
6121 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6122 {
6123 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6124 	u64 val64;
6125 	int cnt, iteration = 0, test_fail = 0;
6126 
6127 	val64 = readq(&bar0->adapter_control);
6128 	val64 &= ~ADAPTER_ECC_EN;
6129 	writeq(val64, &bar0->adapter_control);
6130 
6131 	val64 = readq(&bar0->mc_rldram_test_ctrl);
6132 	val64 |= MC_RLDRAM_TEST_MODE;
6133 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6134 
6135 	val64 = readq(&bar0->mc_rldram_mrs);
6136 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6137 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6138 
6139 	val64 |= MC_RLDRAM_MRS_ENABLE;
6140 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6141 
6142 	while (iteration < 2) {
6143 		val64 = 0x55555555aaaa0000ULL;
6144 		if (iteration == 1)
6145 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6146 		writeq(val64, &bar0->mc_rldram_test_d0);
6147 
6148 		val64 = 0xaaaa5a5555550000ULL;
6149 		if (iteration == 1)
6150 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6151 		writeq(val64, &bar0->mc_rldram_test_d1);
6152 
6153 		val64 = 0x55aaaaaaaa5a0000ULL;
6154 		if (iteration == 1)
6155 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6156 		writeq(val64, &bar0->mc_rldram_test_d2);
6157 
6158 		val64 = (u64) (0x0000003ffffe0100ULL);
6159 		writeq(val64, &bar0->mc_rldram_test_add);
6160 
6161 		val64 = MC_RLDRAM_TEST_MODE |
6162 			MC_RLDRAM_TEST_WRITE |
6163 			MC_RLDRAM_TEST_GO;
6164 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6165 
6166 		for (cnt = 0; cnt < 5; cnt++) {
6167 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6168 			if (val64 & MC_RLDRAM_TEST_DONE)
6169 				break;
6170 			msleep(200);
6171 		}
6172 
6173 		if (cnt == 5)
6174 			break;
6175 
6176 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6177 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6178 
6179 		for (cnt = 0; cnt < 5; cnt++) {
6180 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6181 			if (val64 & MC_RLDRAM_TEST_DONE)
6182 				break;
6183 			msleep(500);
6184 		}
6185 
6186 		if (cnt == 5)
6187 			break;
6188 
6189 		val64 = readq(&bar0->mc_rldram_test_ctrl);
6190 		if (!(val64 & MC_RLDRAM_TEST_PASS))
6191 			test_fail = 1;
6192 
6193 		iteration++;
6194 	}
6195 
6196 	*data = test_fail;
6197 
6198 	/* Bring the adapter out of test mode */
6199 	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6200 
6201 	return test_fail;
6202 }
6203 
6204 /**
6205  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6206  *  @sp : private member of the device structure, which is a pointer to the
6207  *  s2io_nic structure.
6208  *  @ethtest : pointer to a ethtool command specific structure that will be
6209  *  returned to the user.
6210  *  @data : variable that returns the result of each of the test
6211  * conducted by the driver.
6212  * Description:
6213  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6214  *  the health of the card.
6215  * Return value:
6216  *  void
6217  */
6218 
6219 static void s2io_ethtool_test(struct net_device *dev,
6220 			      struct ethtool_test *ethtest,
6221 			      uint64_t *data)
6222 {
6223 	struct s2io_nic *sp = netdev_priv(dev);
6224 	int orig_state = netif_running(sp->dev);
6225 
6226 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6227 		/* Offline Tests. */
6228 		if (orig_state)
6229 			s2io_close(sp->dev);
6230 
6231 		if (s2io_register_test(sp, &data[0]))
6232 			ethtest->flags |= ETH_TEST_FL_FAILED;
6233 
6234 		s2io_reset(sp);
6235 
6236 		if (s2io_rldram_test(sp, &data[3]))
6237 			ethtest->flags |= ETH_TEST_FL_FAILED;
6238 
6239 		s2io_reset(sp);
6240 
6241 		if (s2io_eeprom_test(sp, &data[1]))
6242 			ethtest->flags |= ETH_TEST_FL_FAILED;
6243 
6244 		if (s2io_bist_test(sp, &data[4]))
6245 			ethtest->flags |= ETH_TEST_FL_FAILED;
6246 
6247 		if (orig_state)
6248 			s2io_open(sp->dev);
6249 
6250 		data[2] = 0;
6251 	} else {
6252 		/* Online Tests. */
6253 		if (!orig_state) {
6254 			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6255 				  dev->name);
6256 			data[0] = -1;
6257 			data[1] = -1;
6258 			data[2] = -1;
6259 			data[3] = -1;
6260 			data[4] = -1;
6261 		}
6262 
6263 		if (s2io_link_test(sp, &data[2]))
6264 			ethtest->flags |= ETH_TEST_FL_FAILED;
6265 
6266 		data[0] = 0;
6267 		data[1] = 0;
6268 		data[3] = 0;
6269 		data[4] = 0;
6270 	}
6271 }
6272 
6273 static void s2io_get_ethtool_stats(struct net_device *dev,
6274 				   struct ethtool_stats *estats,
6275 				   u64 *tmp_stats)
6276 {
6277 	int i = 0, k;
6278 	struct s2io_nic *sp = netdev_priv(dev);
6279 	struct stat_block *stats = sp->mac_control.stats_info;
6280 	struct swStat *swstats = &stats->sw_stat;
6281 	struct xpakStat *xstats = &stats->xpak_stat;
6282 
6283 	s2io_updt_stats(sp);
6284 	tmp_stats[i++] =
6285 		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6286 		le32_to_cpu(stats->tmac_frms);
6287 	tmp_stats[i++] =
6288 		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6289 		le32_to_cpu(stats->tmac_data_octets);
6290 	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6291 	tmp_stats[i++] =
6292 		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6293 		le32_to_cpu(stats->tmac_mcst_frms);
6294 	tmp_stats[i++] =
6295 		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6296 		le32_to_cpu(stats->tmac_bcst_frms);
6297 	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6298 	tmp_stats[i++] =
6299 		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6300 		le32_to_cpu(stats->tmac_ttl_octets);
6301 	tmp_stats[i++] =
6302 		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6303 		le32_to_cpu(stats->tmac_ucst_frms);
6304 	tmp_stats[i++] =
6305 		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6306 		le32_to_cpu(stats->tmac_nucst_frms);
6307 	tmp_stats[i++] =
6308 		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6309 		le32_to_cpu(stats->tmac_any_err_frms);
6310 	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6311 	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6312 	tmp_stats[i++] =
6313 		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6314 		le32_to_cpu(stats->tmac_vld_ip);
6315 	tmp_stats[i++] =
6316 		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6317 		le32_to_cpu(stats->tmac_drop_ip);
6318 	tmp_stats[i++] =
6319 		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6320 		le32_to_cpu(stats->tmac_icmp);
6321 	tmp_stats[i++] =
6322 		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6323 		le32_to_cpu(stats->tmac_rst_tcp);
6324 	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6325 	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6326 		le32_to_cpu(stats->tmac_udp);
6327 	tmp_stats[i++] =
6328 		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6329 		le32_to_cpu(stats->rmac_vld_frms);
6330 	tmp_stats[i++] =
6331 		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6332 		le32_to_cpu(stats->rmac_data_octets);
6333 	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6334 	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6335 	tmp_stats[i++] =
6336 		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6337 		le32_to_cpu(stats->rmac_vld_mcst_frms);
6338 	tmp_stats[i++] =
6339 		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6340 		le32_to_cpu(stats->rmac_vld_bcst_frms);
6341 	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6342 	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6343 	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6344 	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6345 	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6346 	tmp_stats[i++] =
6347 		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6348 		le32_to_cpu(stats->rmac_ttl_octets);
6349 	tmp_stats[i++] =
6350 		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6351 		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6352 	tmp_stats[i++] =
6353 		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6354 		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6355 	tmp_stats[i++] =
6356 		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6357 		le32_to_cpu(stats->rmac_discarded_frms);
6358 	tmp_stats[i++] =
6359 		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6360 		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6361 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6362 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6363 	tmp_stats[i++] =
6364 		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6365 		le32_to_cpu(stats->rmac_usized_frms);
6366 	tmp_stats[i++] =
6367 		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6368 		le32_to_cpu(stats->rmac_osized_frms);
6369 	tmp_stats[i++] =
6370 		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6371 		le32_to_cpu(stats->rmac_frag_frms);
6372 	tmp_stats[i++] =
6373 		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6374 		le32_to_cpu(stats->rmac_jabber_frms);
6375 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6376 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6377 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6378 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6379 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6380 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6381 	tmp_stats[i++] =
6382 		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6383 		le32_to_cpu(stats->rmac_ip);
6384 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6385 	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6386 	tmp_stats[i++] =
6387 		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6388 		le32_to_cpu(stats->rmac_drop_ip);
6389 	tmp_stats[i++] =
6390 		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6391 		le32_to_cpu(stats->rmac_icmp);
6392 	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6393 	tmp_stats[i++] =
6394 		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6395 		le32_to_cpu(stats->rmac_udp);
6396 	tmp_stats[i++] =
6397 		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6398 		le32_to_cpu(stats->rmac_err_drp_udp);
6399 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6400 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6401 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6402 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6403 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6404 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6405 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6406 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6407 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6408 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6409 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6410 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6411 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6412 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6413 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6414 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6415 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6416 	tmp_stats[i++] =
6417 		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6418 		le32_to_cpu(stats->rmac_pause_cnt);
6419 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6420 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6421 	tmp_stats[i++] =
6422 		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6423 		le32_to_cpu(stats->rmac_accepted_ip);
6424 	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6425 	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6426 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6427 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6428 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6429 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6430 	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6431 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6432 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6433 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6434 	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6435 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6436 	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6437 	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6438 	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6439 	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6440 	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6441 	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6442 	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6443 
6444 	/* Enhanced statistics exist only for Hercules */
6445 	if (sp->device_type == XFRAME_II_DEVICE) {
6446 		tmp_stats[i++] =
6447 			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6448 		tmp_stats[i++] =
6449 			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6450 		tmp_stats[i++] =
6451 			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6452 		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6453 		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6454 		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6455 		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6456 		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6457 		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6458 		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6459 		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6460 		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6461 		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6462 		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6463 		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6464 		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6465 	}
6466 
6467 	tmp_stats[i++] = 0;
6468 	tmp_stats[i++] = swstats->single_ecc_errs;
6469 	tmp_stats[i++] = swstats->double_ecc_errs;
6470 	tmp_stats[i++] = swstats->parity_err_cnt;
6471 	tmp_stats[i++] = swstats->serious_err_cnt;
6472 	tmp_stats[i++] = swstats->soft_reset_cnt;
6473 	tmp_stats[i++] = swstats->fifo_full_cnt;
6474 	for (k = 0; k < MAX_RX_RINGS; k++)
6475 		tmp_stats[i++] = swstats->ring_full_cnt[k];
6476 	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6477 	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6478 	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6479 	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6480 	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6481 	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6482 	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6483 	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6484 	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6485 	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6486 	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6487 	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6488 	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6489 	tmp_stats[i++] = swstats->sending_both;
6490 	tmp_stats[i++] = swstats->outof_sequence_pkts;
6491 	tmp_stats[i++] = swstats->flush_max_pkts;
6492 	if (swstats->num_aggregations) {
6493 		u64 tmp = swstats->sum_avg_pkts_aggregated;
6494 		int count = 0;
6495 		/*
6496 		 * Since 64-bit divide does not work on all platforms,
6497 		 * do repeated subtraction.
6498 		 */
6499 		while (tmp >= swstats->num_aggregations) {
6500 			tmp -= swstats->num_aggregations;
6501 			count++;
6502 		}
6503 		tmp_stats[i++] = count;
6504 	} else
6505 		tmp_stats[i++] = 0;
6506 	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6507 	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6508 	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6509 	tmp_stats[i++] = swstats->mem_allocated;
6510 	tmp_stats[i++] = swstats->mem_freed;
6511 	tmp_stats[i++] = swstats->link_up_cnt;
6512 	tmp_stats[i++] = swstats->link_down_cnt;
6513 	tmp_stats[i++] = swstats->link_up_time;
6514 	tmp_stats[i++] = swstats->link_down_time;
6515 
6516 	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6517 	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6518 	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6519 	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6520 	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6521 
6522 	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6523 	tmp_stats[i++] = swstats->rx_abort_cnt;
6524 	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6525 	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6526 	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6527 	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6528 	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6529 	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6530 	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6531 	tmp_stats[i++] = swstats->tda_err_cnt;
6532 	tmp_stats[i++] = swstats->pfc_err_cnt;
6533 	tmp_stats[i++] = swstats->pcc_err_cnt;
6534 	tmp_stats[i++] = swstats->tti_err_cnt;
6535 	tmp_stats[i++] = swstats->tpa_err_cnt;
6536 	tmp_stats[i++] = swstats->sm_err_cnt;
6537 	tmp_stats[i++] = swstats->lso_err_cnt;
6538 	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6539 	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6540 	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6541 	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6542 	tmp_stats[i++] = swstats->rc_err_cnt;
6543 	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6544 	tmp_stats[i++] = swstats->rpa_err_cnt;
6545 	tmp_stats[i++] = swstats->rda_err_cnt;
6546 	tmp_stats[i++] = swstats->rti_err_cnt;
6547 	tmp_stats[i++] = swstats->mc_err_cnt;
6548 }
6549 
6550 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6551 {
6552 	return XENA_REG_SPACE;
6553 }
6554 
6555 
6556 static int s2io_get_eeprom_len(struct net_device *dev)
6557 {
6558 	return XENA_EEPROM_SPACE;
6559 }
6560 
6561 static int s2io_get_sset_count(struct net_device *dev, int sset)
6562 {
6563 	struct s2io_nic *sp = netdev_priv(dev);
6564 
6565 	switch (sset) {
6566 	case ETH_SS_TEST:
6567 		return S2IO_TEST_LEN;
6568 	case ETH_SS_STATS:
6569 		switch (sp->device_type) {
6570 		case XFRAME_I_DEVICE:
6571 			return XFRAME_I_STAT_LEN;
6572 		case XFRAME_II_DEVICE:
6573 			return XFRAME_II_STAT_LEN;
6574 		default:
6575 			return 0;
6576 		}
6577 	default:
6578 		return -EOPNOTSUPP;
6579 	}
6580 }
6581 
6582 static void s2io_ethtool_get_strings(struct net_device *dev,
6583 				     u32 stringset, u8 *data)
6584 {
6585 	int stat_size = 0;
6586 	struct s2io_nic *sp = netdev_priv(dev);
6587 
6588 	switch (stringset) {
6589 	case ETH_SS_TEST:
6590 		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6591 		break;
6592 	case ETH_SS_STATS:
6593 		stat_size = sizeof(ethtool_xena_stats_keys);
6594 		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6595 		if (sp->device_type == XFRAME_II_DEVICE) {
6596 			memcpy(data + stat_size,
6597 			       &ethtool_enhanced_stats_keys,
6598 			       sizeof(ethtool_enhanced_stats_keys));
6599 			stat_size += sizeof(ethtool_enhanced_stats_keys);
6600 		}
6601 
6602 		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6603 		       sizeof(ethtool_driver_stats_keys));
6604 	}
6605 }
6606 
6607 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6608 {
6609 	struct s2io_nic *sp = netdev_priv(dev);
6610 	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6611 
6612 	if (changed && netif_running(dev)) {
6613 		int rc;
6614 
6615 		s2io_stop_all_tx_queue(sp);
6616 		s2io_card_down(sp);
6617 		dev->features = features;
6618 		rc = s2io_card_up(sp);
6619 		if (rc)
6620 			s2io_reset(sp);
6621 		else
6622 			s2io_start_all_tx_queue(sp);
6623 
6624 		return rc ? rc : 1;
6625 	}
6626 
6627 	return 0;
6628 }
6629 
6630 static const struct ethtool_ops netdev_ethtool_ops = {
6631 	.get_drvinfo = s2io_ethtool_gdrvinfo,
6632 	.get_regs_len = s2io_ethtool_get_regs_len,
6633 	.get_regs = s2io_ethtool_gregs,
6634 	.get_link = ethtool_op_get_link,
6635 	.get_eeprom_len = s2io_get_eeprom_len,
6636 	.get_eeprom = s2io_ethtool_geeprom,
6637 	.set_eeprom = s2io_ethtool_seeprom,
6638 	.get_ringparam = s2io_ethtool_gringparam,
6639 	.get_pauseparam = s2io_ethtool_getpause_data,
6640 	.set_pauseparam = s2io_ethtool_setpause_data,
6641 	.self_test = s2io_ethtool_test,
6642 	.get_strings = s2io_ethtool_get_strings,
6643 	.set_phys_id = s2io_ethtool_set_led,
6644 	.get_ethtool_stats = s2io_get_ethtool_stats,
6645 	.get_sset_count = s2io_get_sset_count,
6646 	.get_link_ksettings = s2io_ethtool_get_link_ksettings,
6647 	.set_link_ksettings = s2io_ethtool_set_link_ksettings,
6648 };
6649 
6650 /**
6651  *  s2io_ioctl - Entry point for the Ioctl
6652  *  @dev :  Device pointer.
6653  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6654  *  a proprietary structure used to pass information to the driver.
6655  *  @cmd :  This is used to distinguish between the different commands that
6656  *  can be passed to the IOCTL functions.
6657  *  Description:
6658  *  Currently there are no special functionality supported in IOCTL, hence
6659  *  function always return EOPNOTSUPPORTED
6660  */
6661 
6662 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6663 {
6664 	return -EOPNOTSUPP;
6665 }
6666 
6667 /**
6668  *  s2io_change_mtu - entry point to change MTU size for the device.
6669  *   @dev : device pointer.
6670  *   @new_mtu : the new MTU size for the device.
6671  *   Description: A driver entry point to change MTU size for the device.
6672  *   Before changing the MTU the device must be stopped.
6673  *  Return value:
6674  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6675  *   file on failure.
6676  */
6677 
6678 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6679 {
6680 	struct s2io_nic *sp = netdev_priv(dev);
6681 	int ret = 0;
6682 
6683 	dev->mtu = new_mtu;
6684 	if (netif_running(dev)) {
6685 		s2io_stop_all_tx_queue(sp);
6686 		s2io_card_down(sp);
6687 		ret = s2io_card_up(sp);
6688 		if (ret) {
6689 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6690 				  __func__);
6691 			return ret;
6692 		}
6693 		s2io_wake_all_tx_queue(sp);
6694 	} else { /* Device is down */
6695 		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6696 		u64 val64 = new_mtu;
6697 
6698 		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6699 	}
6700 
6701 	return ret;
6702 }
6703 
6704 /**
6705  * s2io_set_link - Set the LInk status
6706  * @data: long pointer to device private structue
6707  * Description: Sets the link status for the adapter
6708  */
6709 
6710 static void s2io_set_link(struct work_struct *work)
6711 {
6712 	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6713 					    set_link_task);
6714 	struct net_device *dev = nic->dev;
6715 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6716 	register u64 val64;
6717 	u16 subid;
6718 
6719 	rtnl_lock();
6720 
6721 	if (!netif_running(dev))
6722 		goto out_unlock;
6723 
6724 	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6725 		/* The card is being reset, no point doing anything */
6726 		goto out_unlock;
6727 	}
6728 
6729 	subid = nic->pdev->subsystem_device;
6730 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6731 		/*
6732 		 * Allow a small delay for the NICs self initiated
6733 		 * cleanup to complete.
6734 		 */
6735 		msleep(100);
6736 	}
6737 
6738 	val64 = readq(&bar0->adapter_status);
6739 	if (LINK_IS_UP(val64)) {
6740 		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6741 			if (verify_xena_quiescence(nic)) {
6742 				val64 = readq(&bar0->adapter_control);
6743 				val64 |= ADAPTER_CNTL_EN;
6744 				writeq(val64, &bar0->adapter_control);
6745 				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6746 					    nic->device_type, subid)) {
6747 					val64 = readq(&bar0->gpio_control);
6748 					val64 |= GPIO_CTRL_GPIO_0;
6749 					writeq(val64, &bar0->gpio_control);
6750 					val64 = readq(&bar0->gpio_control);
6751 				} else {
6752 					val64 |= ADAPTER_LED_ON;
6753 					writeq(val64, &bar0->adapter_control);
6754 				}
6755 				nic->device_enabled_once = true;
6756 			} else {
6757 				DBG_PRINT(ERR_DBG,
6758 					  "%s: Error: device is not Quiescent\n",
6759 					  dev->name);
6760 				s2io_stop_all_tx_queue(nic);
6761 			}
6762 		}
6763 		val64 = readq(&bar0->adapter_control);
6764 		val64 |= ADAPTER_LED_ON;
6765 		writeq(val64, &bar0->adapter_control);
6766 		s2io_link(nic, LINK_UP);
6767 	} else {
6768 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6769 						      subid)) {
6770 			val64 = readq(&bar0->gpio_control);
6771 			val64 &= ~GPIO_CTRL_GPIO_0;
6772 			writeq(val64, &bar0->gpio_control);
6773 			val64 = readq(&bar0->gpio_control);
6774 		}
6775 		/* turn off LED */
6776 		val64 = readq(&bar0->adapter_control);
6777 		val64 = val64 & (~ADAPTER_LED_ON);
6778 		writeq(val64, &bar0->adapter_control);
6779 		s2io_link(nic, LINK_DOWN);
6780 	}
6781 	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6782 
6783 out_unlock:
6784 	rtnl_unlock();
6785 }
6786 
6787 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6788 				  struct buffAdd *ba,
6789 				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6790 				  u64 *temp2, int size)
6791 {
6792 	struct net_device *dev = sp->dev;
6793 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6794 
6795 	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6796 		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6797 		/* allocate skb */
6798 		if (*skb) {
6799 			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6800 			/*
6801 			 * As Rx frame are not going to be processed,
6802 			 * using same mapped address for the Rxd
6803 			 * buffer pointer
6804 			 */
6805 			rxdp1->Buffer0_ptr = *temp0;
6806 		} else {
6807 			*skb = netdev_alloc_skb(dev, size);
6808 			if (!(*skb)) {
6809 				DBG_PRINT(INFO_DBG,
6810 					  "%s: Out of memory to allocate %s\n",
6811 					  dev->name, "1 buf mode SKBs");
6812 				stats->mem_alloc_fail_cnt++;
6813 				return -ENOMEM ;
6814 			}
6815 			stats->mem_allocated += (*skb)->truesize;
6816 			/* storing the mapped addr in a temp variable
6817 			 * such it will be used for next rxd whose
6818 			 * Host Control is NULL
6819 			 */
6820 			rxdp1->Buffer0_ptr = *temp0 =
6821 				pci_map_single(sp->pdev, (*skb)->data,
6822 					       size - NET_IP_ALIGN,
6823 					       PCI_DMA_FROMDEVICE);
6824 			if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6825 				goto memalloc_failed;
6826 			rxdp->Host_Control = (unsigned long) (*skb);
6827 		}
6828 	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6829 		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6830 		/* Two buffer Mode */
6831 		if (*skb) {
6832 			rxdp3->Buffer2_ptr = *temp2;
6833 			rxdp3->Buffer0_ptr = *temp0;
6834 			rxdp3->Buffer1_ptr = *temp1;
6835 		} else {
6836 			*skb = netdev_alloc_skb(dev, size);
6837 			if (!(*skb)) {
6838 				DBG_PRINT(INFO_DBG,
6839 					  "%s: Out of memory to allocate %s\n",
6840 					  dev->name,
6841 					  "2 buf mode SKBs");
6842 				stats->mem_alloc_fail_cnt++;
6843 				return -ENOMEM;
6844 			}
6845 			stats->mem_allocated += (*skb)->truesize;
6846 			rxdp3->Buffer2_ptr = *temp2 =
6847 				pci_map_single(sp->pdev, (*skb)->data,
6848 					       dev->mtu + 4,
6849 					       PCI_DMA_FROMDEVICE);
6850 			if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6851 				goto memalloc_failed;
6852 			rxdp3->Buffer0_ptr = *temp0 =
6853 				pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6854 					       PCI_DMA_FROMDEVICE);
6855 			if (pci_dma_mapping_error(sp->pdev,
6856 						  rxdp3->Buffer0_ptr)) {
6857 				pci_unmap_single(sp->pdev,
6858 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6859 						 dev->mtu + 4,
6860 						 PCI_DMA_FROMDEVICE);
6861 				goto memalloc_failed;
6862 			}
6863 			rxdp->Host_Control = (unsigned long) (*skb);
6864 
6865 			/* Buffer-1 will be dummy buffer not used */
6866 			rxdp3->Buffer1_ptr = *temp1 =
6867 				pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6868 					       PCI_DMA_FROMDEVICE);
6869 			if (pci_dma_mapping_error(sp->pdev,
6870 						  rxdp3->Buffer1_ptr)) {
6871 				pci_unmap_single(sp->pdev,
6872 						 (dma_addr_t)rxdp3->Buffer0_ptr,
6873 						 BUF0_LEN, PCI_DMA_FROMDEVICE);
6874 				pci_unmap_single(sp->pdev,
6875 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6876 						 dev->mtu + 4,
6877 						 PCI_DMA_FROMDEVICE);
6878 				goto memalloc_failed;
6879 			}
6880 		}
6881 	}
6882 	return 0;
6883 
6884 memalloc_failed:
6885 	stats->pci_map_fail_cnt++;
6886 	stats->mem_freed += (*skb)->truesize;
6887 	dev_kfree_skb(*skb);
6888 	return -ENOMEM;
6889 }
6890 
6891 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6892 				int size)
6893 {
6894 	struct net_device *dev = sp->dev;
6895 	if (sp->rxd_mode == RXD_MODE_1) {
6896 		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6897 	} else if (sp->rxd_mode == RXD_MODE_3B) {
6898 		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6899 		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6900 		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6901 	}
6902 }
6903 
6904 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6905 {
6906 	int i, j, k, blk_cnt = 0, size;
6907 	struct config_param *config = &sp->config;
6908 	struct mac_info *mac_control = &sp->mac_control;
6909 	struct net_device *dev = sp->dev;
6910 	struct RxD_t *rxdp = NULL;
6911 	struct sk_buff *skb = NULL;
6912 	struct buffAdd *ba = NULL;
6913 	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6914 
6915 	/* Calculate the size based on ring mode */
6916 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6917 		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6918 	if (sp->rxd_mode == RXD_MODE_1)
6919 		size += NET_IP_ALIGN;
6920 	else if (sp->rxd_mode == RXD_MODE_3B)
6921 		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6922 
6923 	for (i = 0; i < config->rx_ring_num; i++) {
6924 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6925 		struct ring_info *ring = &mac_control->rings[i];
6926 
6927 		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6928 
6929 		for (j = 0; j < blk_cnt; j++) {
6930 			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6931 				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6932 				if (sp->rxd_mode == RXD_MODE_3B)
6933 					ba = &ring->ba[j][k];
6934 				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6935 							   &temp0_64,
6936 							   &temp1_64,
6937 							   &temp2_64,
6938 							   size) == -ENOMEM) {
6939 					return 0;
6940 				}
6941 
6942 				set_rxd_buffer_size(sp, rxdp, size);
6943 				dma_wmb();
6944 				/* flip the Ownership bit to Hardware */
6945 				rxdp->Control_1 |= RXD_OWN_XENA;
6946 			}
6947 		}
6948 	}
6949 	return 0;
6950 
6951 }
6952 
6953 static int s2io_add_isr(struct s2io_nic *sp)
6954 {
6955 	int ret = 0;
6956 	struct net_device *dev = sp->dev;
6957 	int err = 0;
6958 
6959 	if (sp->config.intr_type == MSI_X)
6960 		ret = s2io_enable_msi_x(sp);
6961 	if (ret) {
6962 		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6963 		sp->config.intr_type = INTA;
6964 	}
6965 
6966 	/*
6967 	 * Store the values of the MSIX table in
6968 	 * the struct s2io_nic structure
6969 	 */
6970 	store_xmsi_data(sp);
6971 
6972 	/* After proper initialization of H/W, register ISR */
6973 	if (sp->config.intr_type == MSI_X) {
6974 		int i, msix_rx_cnt = 0;
6975 
6976 		for (i = 0; i < sp->num_entries; i++) {
6977 			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6978 				if (sp->s2io_entries[i].type ==
6979 				    MSIX_RING_TYPE) {
6980 					snprintf(sp->desc[i],
6981 						sizeof(sp->desc[i]),
6982 						"%s:MSI-X-%d-RX",
6983 						dev->name, i);
6984 					err = request_irq(sp->entries[i].vector,
6985 							  s2io_msix_ring_handle,
6986 							  0,
6987 							  sp->desc[i],
6988 							  sp->s2io_entries[i].arg);
6989 				} else if (sp->s2io_entries[i].type ==
6990 					   MSIX_ALARM_TYPE) {
6991 					snprintf(sp->desc[i],
6992 						sizeof(sp->desc[i]),
6993 						"%s:MSI-X-%d-TX",
6994 						dev->name, i);
6995 					err = request_irq(sp->entries[i].vector,
6996 							  s2io_msix_fifo_handle,
6997 							  0,
6998 							  sp->desc[i],
6999 							  sp->s2io_entries[i].arg);
7000 
7001 				}
7002 				/* if either data or addr is zero print it. */
7003 				if (!(sp->msix_info[i].addr &&
7004 				      sp->msix_info[i].data)) {
7005 					DBG_PRINT(ERR_DBG,
7006 						  "%s @Addr:0x%llx Data:0x%llx\n",
7007 						  sp->desc[i],
7008 						  (unsigned long long)
7009 						  sp->msix_info[i].addr,
7010 						  (unsigned long long)
7011 						  ntohl(sp->msix_info[i].data));
7012 				} else
7013 					msix_rx_cnt++;
7014 				if (err) {
7015 					remove_msix_isr(sp);
7016 
7017 					DBG_PRINT(ERR_DBG,
7018 						  "%s:MSI-X-%d registration "
7019 						  "failed\n", dev->name, i);
7020 
7021 					DBG_PRINT(ERR_DBG,
7022 						  "%s: Defaulting to INTA\n",
7023 						  dev->name);
7024 					sp->config.intr_type = INTA;
7025 					break;
7026 				}
7027 				sp->s2io_entries[i].in_use =
7028 					MSIX_REGISTERED_SUCCESS;
7029 			}
7030 		}
7031 		if (!err) {
7032 			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
7033 			DBG_PRINT(INFO_DBG,
7034 				  "MSI-X-TX entries enabled through alarm vector\n");
7035 		}
7036 	}
7037 	if (sp->config.intr_type == INTA) {
7038 		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
7039 				  sp->name, dev);
7040 		if (err) {
7041 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7042 				  dev->name);
7043 			return -1;
7044 		}
7045 	}
7046 	return 0;
7047 }
7048 
7049 static void s2io_rem_isr(struct s2io_nic *sp)
7050 {
7051 	if (sp->config.intr_type == MSI_X)
7052 		remove_msix_isr(sp);
7053 	else
7054 		remove_inta_isr(sp);
7055 }
7056 
7057 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7058 {
7059 	int cnt = 0;
7060 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7061 	register u64 val64 = 0;
7062 	struct config_param *config;
7063 	config = &sp->config;
7064 
7065 	if (!is_s2io_card_up(sp))
7066 		return;
7067 
7068 	del_timer_sync(&sp->alarm_timer);
7069 	/* If s2io_set_link task is executing, wait till it completes. */
7070 	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7071 		msleep(50);
7072 	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7073 
7074 	/* Disable napi */
7075 	if (sp->config.napi) {
7076 		int off = 0;
7077 		if (config->intr_type ==  MSI_X) {
7078 			for (; off < sp->config.rx_ring_num; off++)
7079 				napi_disable(&sp->mac_control.rings[off].napi);
7080 		}
7081 		else
7082 			napi_disable(&sp->napi);
7083 	}
7084 
7085 	/* disable Tx and Rx traffic on the NIC */
7086 	if (do_io)
7087 		stop_nic(sp);
7088 
7089 	s2io_rem_isr(sp);
7090 
7091 	/* stop the tx queue, indicate link down */
7092 	s2io_link(sp, LINK_DOWN);
7093 
7094 	/* Check if the device is Quiescent and then Reset the NIC */
7095 	while (do_io) {
7096 		/* As per the HW requirement we need to replenish the
7097 		 * receive buffer to avoid the ring bump. Since there is
7098 		 * no intention of processing the Rx frame at this pointwe are
7099 		 * just setting the ownership bit of rxd in Each Rx
7100 		 * ring to HW and set the appropriate buffer size
7101 		 * based on the ring mode
7102 		 */
7103 		rxd_owner_bit_reset(sp);
7104 
7105 		val64 = readq(&bar0->adapter_status);
7106 		if (verify_xena_quiescence(sp)) {
7107 			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7108 				break;
7109 		}
7110 
7111 		msleep(50);
7112 		cnt++;
7113 		if (cnt == 10) {
7114 			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7115 				  "adapter status reads 0x%llx\n",
7116 				  (unsigned long long)val64);
7117 			break;
7118 		}
7119 	}
7120 	if (do_io)
7121 		s2io_reset(sp);
7122 
7123 	/* Free all Tx buffers */
7124 	free_tx_buffers(sp);
7125 
7126 	/* Free all Rx buffers */
7127 	free_rx_buffers(sp);
7128 
7129 	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7130 }
7131 
7132 static void s2io_card_down(struct s2io_nic *sp)
7133 {
7134 	do_s2io_card_down(sp, 1);
7135 }
7136 
7137 static int s2io_card_up(struct s2io_nic *sp)
7138 {
7139 	int i, ret = 0;
7140 	struct config_param *config;
7141 	struct mac_info *mac_control;
7142 	struct net_device *dev = sp->dev;
7143 	u16 interruptible;
7144 
7145 	/* Initialize the H/W I/O registers */
7146 	ret = init_nic(sp);
7147 	if (ret != 0) {
7148 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7149 			  dev->name);
7150 		if (ret != -EIO)
7151 			s2io_reset(sp);
7152 		return ret;
7153 	}
7154 
7155 	/*
7156 	 * Initializing the Rx buffers. For now we are considering only 1
7157 	 * Rx ring and initializing buffers into 30 Rx blocks
7158 	 */
7159 	config = &sp->config;
7160 	mac_control = &sp->mac_control;
7161 
7162 	for (i = 0; i < config->rx_ring_num; i++) {
7163 		struct ring_info *ring = &mac_control->rings[i];
7164 
7165 		ring->mtu = dev->mtu;
7166 		ring->lro = !!(dev->features & NETIF_F_LRO);
7167 		ret = fill_rx_buffers(sp, ring, 1);
7168 		if (ret) {
7169 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7170 				  dev->name);
7171 			s2io_reset(sp);
7172 			free_rx_buffers(sp);
7173 			return -ENOMEM;
7174 		}
7175 		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7176 			  ring->rx_bufs_left);
7177 	}
7178 
7179 	/* Initialise napi */
7180 	if (config->napi) {
7181 		if (config->intr_type ==  MSI_X) {
7182 			for (i = 0; i < sp->config.rx_ring_num; i++)
7183 				napi_enable(&sp->mac_control.rings[i].napi);
7184 		} else {
7185 			napi_enable(&sp->napi);
7186 		}
7187 	}
7188 
7189 	/* Maintain the state prior to the open */
7190 	if (sp->promisc_flg)
7191 		sp->promisc_flg = 0;
7192 	if (sp->m_cast_flg) {
7193 		sp->m_cast_flg = 0;
7194 		sp->all_multi_pos = 0;
7195 	}
7196 
7197 	/* Setting its receive mode */
7198 	s2io_set_multicast(dev);
7199 
7200 	if (dev->features & NETIF_F_LRO) {
7201 		/* Initialize max aggregatable pkts per session based on MTU */
7202 		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7203 		/* Check if we can use (if specified) user provided value */
7204 		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7205 			sp->lro_max_aggr_per_sess = lro_max_pkts;
7206 	}
7207 
7208 	/* Enable Rx Traffic and interrupts on the NIC */
7209 	if (start_nic(sp)) {
7210 		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7211 		s2io_reset(sp);
7212 		free_rx_buffers(sp);
7213 		return -ENODEV;
7214 	}
7215 
7216 	/* Add interrupt service routine */
7217 	if (s2io_add_isr(sp) != 0) {
7218 		if (sp->config.intr_type == MSI_X)
7219 			s2io_rem_isr(sp);
7220 		s2io_reset(sp);
7221 		free_rx_buffers(sp);
7222 		return -ENODEV;
7223 	}
7224 
7225 	S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7226 
7227 	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7228 
7229 	/*  Enable select interrupts */
7230 	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7231 	if (sp->config.intr_type != INTA) {
7232 		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7233 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7234 	} else {
7235 		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7236 		interruptible |= TX_PIC_INTR;
7237 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7238 	}
7239 
7240 	return 0;
7241 }
7242 
7243 /**
7244  * s2io_restart_nic - Resets the NIC.
7245  * @data : long pointer to the device private structure
7246  * Description:
7247  * This function is scheduled to be run by the s2io_tx_watchdog
7248  * function after 0.5 secs to reset the NIC. The idea is to reduce
7249  * the run time of the watch dog routine which is run holding a
7250  * spin lock.
7251  */
7252 
7253 static void s2io_restart_nic(struct work_struct *work)
7254 {
7255 	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7256 	struct net_device *dev = sp->dev;
7257 
7258 	rtnl_lock();
7259 
7260 	if (!netif_running(dev))
7261 		goto out_unlock;
7262 
7263 	s2io_card_down(sp);
7264 	if (s2io_card_up(sp)) {
7265 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7266 	}
7267 	s2io_wake_all_tx_queue(sp);
7268 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7269 out_unlock:
7270 	rtnl_unlock();
7271 }
7272 
7273 /**
7274  *  s2io_tx_watchdog - Watchdog for transmit side.
7275  *  @dev : Pointer to net device structure
7276  *  Description:
7277  *  This function is triggered if the Tx Queue is stopped
7278  *  for a pre-defined amount of time when the Interface is still up.
7279  *  If the Interface is jammed in such a situation, the hardware is
7280  *  reset (by s2io_close) and restarted again (by s2io_open) to
7281  *  overcome any problem that might have been caused in the hardware.
7282  *  Return value:
7283  *  void
7284  */
7285 
7286 static void s2io_tx_watchdog(struct net_device *dev)
7287 {
7288 	struct s2io_nic *sp = netdev_priv(dev);
7289 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7290 
7291 	if (netif_carrier_ok(dev)) {
7292 		swstats->watchdog_timer_cnt++;
7293 		schedule_work(&sp->rst_timer_task);
7294 		swstats->soft_reset_cnt++;
7295 	}
7296 }
7297 
7298 /**
7299  *   rx_osm_handler - To perform some OS related operations on SKB.
7300  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7301  *   @skb : the socket buffer pointer.
7302  *   @len : length of the packet
7303  *   @cksum : FCS checksum of the frame.
7304  *   @ring_no : the ring from which this RxD was extracted.
7305  *   Description:
7306  *   This function is called by the Rx interrupt serivce routine to perform
7307  *   some OS related operations on the SKB before passing it to the upper
7308  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7309  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7310  *   to the upper layer. If the checksum is wrong, it increments the Rx
7311  *   packet error count, frees the SKB and returns error.
7312  *   Return value:
7313  *   SUCCESS on success and -1 on failure.
7314  */
7315 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7316 {
7317 	struct s2io_nic *sp = ring_data->nic;
7318 	struct net_device *dev = ring_data->dev;
7319 	struct sk_buff *skb = (struct sk_buff *)
7320 		((unsigned long)rxdp->Host_Control);
7321 	int ring_no = ring_data->ring_no;
7322 	u16 l3_csum, l4_csum;
7323 	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7324 	struct lro *uninitialized_var(lro);
7325 	u8 err_mask;
7326 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7327 
7328 	skb->dev = dev;
7329 
7330 	if (err) {
7331 		/* Check for parity error */
7332 		if (err & 0x1)
7333 			swstats->parity_err_cnt++;
7334 
7335 		err_mask = err >> 48;
7336 		switch (err_mask) {
7337 		case 1:
7338 			swstats->rx_parity_err_cnt++;
7339 			break;
7340 
7341 		case 2:
7342 			swstats->rx_abort_cnt++;
7343 			break;
7344 
7345 		case 3:
7346 			swstats->rx_parity_abort_cnt++;
7347 			break;
7348 
7349 		case 4:
7350 			swstats->rx_rda_fail_cnt++;
7351 			break;
7352 
7353 		case 5:
7354 			swstats->rx_unkn_prot_cnt++;
7355 			break;
7356 
7357 		case 6:
7358 			swstats->rx_fcs_err_cnt++;
7359 			break;
7360 
7361 		case 7:
7362 			swstats->rx_buf_size_err_cnt++;
7363 			break;
7364 
7365 		case 8:
7366 			swstats->rx_rxd_corrupt_cnt++;
7367 			break;
7368 
7369 		case 15:
7370 			swstats->rx_unkn_err_cnt++;
7371 			break;
7372 		}
7373 		/*
7374 		 * Drop the packet if bad transfer code. Exception being
7375 		 * 0x5, which could be due to unsupported IPv6 extension header.
7376 		 * In this case, we let stack handle the packet.
7377 		 * Note that in this case, since checksum will be incorrect,
7378 		 * stack will validate the same.
7379 		 */
7380 		if (err_mask != 0x5) {
7381 			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7382 				  dev->name, err_mask);
7383 			dev->stats.rx_crc_errors++;
7384 			swstats->mem_freed
7385 				+= skb->truesize;
7386 			dev_kfree_skb(skb);
7387 			ring_data->rx_bufs_left -= 1;
7388 			rxdp->Host_Control = 0;
7389 			return 0;
7390 		}
7391 	}
7392 
7393 	rxdp->Host_Control = 0;
7394 	if (sp->rxd_mode == RXD_MODE_1) {
7395 		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7396 
7397 		skb_put(skb, len);
7398 	} else if (sp->rxd_mode == RXD_MODE_3B) {
7399 		int get_block = ring_data->rx_curr_get_info.block_index;
7400 		int get_off = ring_data->rx_curr_get_info.offset;
7401 		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7402 		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7403 		unsigned char *buff = skb_push(skb, buf0_len);
7404 
7405 		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7406 		memcpy(buff, ba->ba_0, buf0_len);
7407 		skb_put(skb, buf2_len);
7408 	}
7409 
7410 	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7411 	    ((!ring_data->lro) ||
7412 	     (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7413 	    (dev->features & NETIF_F_RXCSUM)) {
7414 		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7415 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7416 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7417 			/*
7418 			 * NIC verifies if the Checksum of the received
7419 			 * frame is Ok or not and accordingly returns
7420 			 * a flag in the RxD.
7421 			 */
7422 			skb->ip_summed = CHECKSUM_UNNECESSARY;
7423 			if (ring_data->lro) {
7424 				u32 tcp_len = 0;
7425 				u8 *tcp;
7426 				int ret = 0;
7427 
7428 				ret = s2io_club_tcp_session(ring_data,
7429 							    skb->data, &tcp,
7430 							    &tcp_len, &lro,
7431 							    rxdp, sp);
7432 				switch (ret) {
7433 				case 3: /* Begin anew */
7434 					lro->parent = skb;
7435 					goto aggregate;
7436 				case 1: /* Aggregate */
7437 					lro_append_pkt(sp, lro, skb, tcp_len);
7438 					goto aggregate;
7439 				case 4: /* Flush session */
7440 					lro_append_pkt(sp, lro, skb, tcp_len);
7441 					queue_rx_frame(lro->parent,
7442 						       lro->vlan_tag);
7443 					clear_lro_session(lro);
7444 					swstats->flush_max_pkts++;
7445 					goto aggregate;
7446 				case 2: /* Flush both */
7447 					lro->parent->data_len = lro->frags_len;
7448 					swstats->sending_both++;
7449 					queue_rx_frame(lro->parent,
7450 						       lro->vlan_tag);
7451 					clear_lro_session(lro);
7452 					goto send_up;
7453 				case 0: /* sessions exceeded */
7454 				case -1: /* non-TCP or not L2 aggregatable */
7455 				case 5: /*
7456 					 * First pkt in session not
7457 					 * L3/L4 aggregatable
7458 					 */
7459 					break;
7460 				default:
7461 					DBG_PRINT(ERR_DBG,
7462 						  "%s: Samadhana!!\n",
7463 						  __func__);
7464 					BUG();
7465 				}
7466 			}
7467 		} else {
7468 			/*
7469 			 * Packet with erroneous checksum, let the
7470 			 * upper layers deal with it.
7471 			 */
7472 			skb_checksum_none_assert(skb);
7473 		}
7474 	} else
7475 		skb_checksum_none_assert(skb);
7476 
7477 	swstats->mem_freed += skb->truesize;
7478 send_up:
7479 	skb_record_rx_queue(skb, ring_no);
7480 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7481 aggregate:
7482 	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7483 	return SUCCESS;
7484 }
7485 
7486 /**
7487  *  s2io_link - stops/starts the Tx queue.
7488  *  @sp : private member of the device structure, which is a pointer to the
7489  *  s2io_nic structure.
7490  *  @link : inidicates whether link is UP/DOWN.
7491  *  Description:
7492  *  This function stops/starts the Tx queue depending on whether the link
7493  *  status of the NIC is is down or up. This is called by the Alarm
7494  *  interrupt handler whenever a link change interrupt comes up.
7495  *  Return value:
7496  *  void.
7497  */
7498 
7499 static void s2io_link(struct s2io_nic *sp, int link)
7500 {
7501 	struct net_device *dev = sp->dev;
7502 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7503 
7504 	if (link != sp->last_link_state) {
7505 		init_tti(sp, link);
7506 		if (link == LINK_DOWN) {
7507 			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7508 			s2io_stop_all_tx_queue(sp);
7509 			netif_carrier_off(dev);
7510 			if (swstats->link_up_cnt)
7511 				swstats->link_up_time =
7512 					jiffies - sp->start_time;
7513 			swstats->link_down_cnt++;
7514 		} else {
7515 			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7516 			if (swstats->link_down_cnt)
7517 				swstats->link_down_time =
7518 					jiffies - sp->start_time;
7519 			swstats->link_up_cnt++;
7520 			netif_carrier_on(dev);
7521 			s2io_wake_all_tx_queue(sp);
7522 		}
7523 	}
7524 	sp->last_link_state = link;
7525 	sp->start_time = jiffies;
7526 }
7527 
7528 /**
7529  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7530  *  @sp : private member of the device structure, which is a pointer to the
7531  *  s2io_nic structure.
7532  *  Description:
7533  *  This function initializes a few of the PCI and PCI-X configuration registers
7534  *  with recommended values.
7535  *  Return value:
7536  *  void
7537  */
7538 
7539 static void s2io_init_pci(struct s2io_nic *sp)
7540 {
7541 	u16 pci_cmd = 0, pcix_cmd = 0;
7542 
7543 	/* Enable Data Parity Error Recovery in PCI-X command register. */
7544 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7545 			     &(pcix_cmd));
7546 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7547 			      (pcix_cmd | 1));
7548 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7549 			     &(pcix_cmd));
7550 
7551 	/* Set the PErr Response bit in PCI command register. */
7552 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7553 	pci_write_config_word(sp->pdev, PCI_COMMAND,
7554 			      (pci_cmd | PCI_COMMAND_PARITY));
7555 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7556 }
7557 
7558 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7559 			    u8 *dev_multiq)
7560 {
7561 	int i;
7562 
7563 	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7564 		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7565 			  "(%d) not supported\n", tx_fifo_num);
7566 
7567 		if (tx_fifo_num < 1)
7568 			tx_fifo_num = 1;
7569 		else
7570 			tx_fifo_num = MAX_TX_FIFOS;
7571 
7572 		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7573 	}
7574 
7575 	if (multiq)
7576 		*dev_multiq = multiq;
7577 
7578 	if (tx_steering_type && (1 == tx_fifo_num)) {
7579 		if (tx_steering_type != TX_DEFAULT_STEERING)
7580 			DBG_PRINT(ERR_DBG,
7581 				  "Tx steering is not supported with "
7582 				  "one fifo. Disabling Tx steering.\n");
7583 		tx_steering_type = NO_STEERING;
7584 	}
7585 
7586 	if ((tx_steering_type < NO_STEERING) ||
7587 	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7588 		DBG_PRINT(ERR_DBG,
7589 			  "Requested transmit steering not supported\n");
7590 		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7591 		tx_steering_type = NO_STEERING;
7592 	}
7593 
7594 	if (rx_ring_num > MAX_RX_RINGS) {
7595 		DBG_PRINT(ERR_DBG,
7596 			  "Requested number of rx rings not supported\n");
7597 		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7598 			  MAX_RX_RINGS);
7599 		rx_ring_num = MAX_RX_RINGS;
7600 	}
7601 
7602 	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7603 		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7604 			  "Defaulting to INTA\n");
7605 		*dev_intr_type = INTA;
7606 	}
7607 
7608 	if ((*dev_intr_type == MSI_X) &&
7609 	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7610 	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7611 		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7612 			  "Defaulting to INTA\n");
7613 		*dev_intr_type = INTA;
7614 	}
7615 
7616 	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7617 		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7618 		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7619 		rx_ring_mode = 1;
7620 	}
7621 
7622 	for (i = 0; i < MAX_RX_RINGS; i++)
7623 		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7624 			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7625 				  "supported\nDefaulting to %d\n",
7626 				  MAX_RX_BLOCKS_PER_RING);
7627 			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7628 		}
7629 
7630 	return SUCCESS;
7631 }
7632 
7633 /**
7634  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7635  * or Traffic class respectively.
7636  * @nic: device private variable
7637  * Description: The function configures the receive steering to
7638  * desired receive ring.
7639  * Return Value:  SUCCESS on success and
7640  * '-1' on failure (endian settings incorrect).
7641  */
7642 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7643 {
7644 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7645 	register u64 val64 = 0;
7646 
7647 	if (ds_codepoint > 63)
7648 		return FAILURE;
7649 
7650 	val64 = RTS_DS_MEM_DATA(ring);
7651 	writeq(val64, &bar0->rts_ds_mem_data);
7652 
7653 	val64 = RTS_DS_MEM_CTRL_WE |
7654 		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7655 		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7656 
7657 	writeq(val64, &bar0->rts_ds_mem_ctrl);
7658 
7659 	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7660 				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7661 				     S2IO_BIT_RESET);
7662 }
7663 
7664 static const struct net_device_ops s2io_netdev_ops = {
7665 	.ndo_open	        = s2io_open,
7666 	.ndo_stop	        = s2io_close,
7667 	.ndo_get_stats	        = s2io_get_stats,
7668 	.ndo_start_xmit    	= s2io_xmit,
7669 	.ndo_validate_addr	= eth_validate_addr,
7670 	.ndo_set_rx_mode	= s2io_set_multicast,
7671 	.ndo_do_ioctl	   	= s2io_ioctl,
7672 	.ndo_set_mac_address    = s2io_set_mac_addr,
7673 	.ndo_change_mtu	   	= s2io_change_mtu,
7674 	.ndo_set_features	= s2io_set_features,
7675 	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7676 #ifdef CONFIG_NET_POLL_CONTROLLER
7677 	.ndo_poll_controller    = s2io_netpoll,
7678 #endif
7679 };
7680 
7681 /**
7682  *  s2io_init_nic - Initialization of the adapter .
7683  *  @pdev : structure containing the PCI related information of the device.
7684  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7685  *  Description:
7686  *  The function initializes an adapter identified by the pci_dec structure.
7687  *  All OS related initialization including memory and device structure and
7688  *  initlaization of the device private variable is done. Also the swapper
7689  *  control register is initialized to enable read and write into the I/O
7690  *  registers of the device.
7691  *  Return value:
7692  *  returns 0 on success and negative on failure.
7693  */
7694 
7695 static int
7696 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7697 {
7698 	struct s2io_nic *sp;
7699 	struct net_device *dev;
7700 	int i, j, ret;
7701 	int dma_flag = false;
7702 	u32 mac_up, mac_down;
7703 	u64 val64 = 0, tmp64 = 0;
7704 	struct XENA_dev_config __iomem *bar0 = NULL;
7705 	u16 subid;
7706 	struct config_param *config;
7707 	struct mac_info *mac_control;
7708 	int mode;
7709 	u8 dev_intr_type = intr_type;
7710 	u8 dev_multiq = 0;
7711 
7712 	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7713 	if (ret)
7714 		return ret;
7715 
7716 	ret = pci_enable_device(pdev);
7717 	if (ret) {
7718 		DBG_PRINT(ERR_DBG,
7719 			  "%s: pci_enable_device failed\n", __func__);
7720 		return ret;
7721 	}
7722 
7723 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7724 		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7725 		dma_flag = true;
7726 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7727 			DBG_PRINT(ERR_DBG,
7728 				  "Unable to obtain 64bit DMA "
7729 				  "for consistent allocations\n");
7730 			pci_disable_device(pdev);
7731 			return -ENOMEM;
7732 		}
7733 	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7734 		DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7735 	} else {
7736 		pci_disable_device(pdev);
7737 		return -ENOMEM;
7738 	}
7739 	ret = pci_request_regions(pdev, s2io_driver_name);
7740 	if (ret) {
7741 		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7742 			  __func__, ret);
7743 		pci_disable_device(pdev);
7744 		return -ENODEV;
7745 	}
7746 	if (dev_multiq)
7747 		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7748 	else
7749 		dev = alloc_etherdev(sizeof(struct s2io_nic));
7750 	if (dev == NULL) {
7751 		pci_disable_device(pdev);
7752 		pci_release_regions(pdev);
7753 		return -ENODEV;
7754 	}
7755 
7756 	pci_set_master(pdev);
7757 	pci_set_drvdata(pdev, dev);
7758 	SET_NETDEV_DEV(dev, &pdev->dev);
7759 
7760 	/*  Private member variable initialized to s2io NIC structure */
7761 	sp = netdev_priv(dev);
7762 	sp->dev = dev;
7763 	sp->pdev = pdev;
7764 	sp->high_dma_flag = dma_flag;
7765 	sp->device_enabled_once = false;
7766 	if (rx_ring_mode == 1)
7767 		sp->rxd_mode = RXD_MODE_1;
7768 	if (rx_ring_mode == 2)
7769 		sp->rxd_mode = RXD_MODE_3B;
7770 
7771 	sp->config.intr_type = dev_intr_type;
7772 
7773 	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7774 	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7775 		sp->device_type = XFRAME_II_DEVICE;
7776 	else
7777 		sp->device_type = XFRAME_I_DEVICE;
7778 
7779 
7780 	/* Initialize some PCI/PCI-X fields of the NIC. */
7781 	s2io_init_pci(sp);
7782 
7783 	/*
7784 	 * Setting the device configuration parameters.
7785 	 * Most of these parameters can be specified by the user during
7786 	 * module insertion as they are module loadable parameters. If
7787 	 * these parameters are not not specified during load time, they
7788 	 * are initialized with default values.
7789 	 */
7790 	config = &sp->config;
7791 	mac_control = &sp->mac_control;
7792 
7793 	config->napi = napi;
7794 	config->tx_steering_type = tx_steering_type;
7795 
7796 	/* Tx side parameters. */
7797 	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7798 		config->tx_fifo_num = MAX_TX_FIFOS;
7799 	else
7800 		config->tx_fifo_num = tx_fifo_num;
7801 
7802 	/* Initialize the fifos used for tx steering */
7803 	if (config->tx_fifo_num < 5) {
7804 		if (config->tx_fifo_num  == 1)
7805 			sp->total_tcp_fifos = 1;
7806 		else
7807 			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7808 		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7809 		sp->total_udp_fifos = 1;
7810 		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7811 	} else {
7812 		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7813 				       FIFO_OTHER_MAX_NUM);
7814 		sp->udp_fifo_idx = sp->total_tcp_fifos;
7815 		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7816 		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7817 	}
7818 
7819 	config->multiq = dev_multiq;
7820 	for (i = 0; i < config->tx_fifo_num; i++) {
7821 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7822 
7823 		tx_cfg->fifo_len = tx_fifo_len[i];
7824 		tx_cfg->fifo_priority = i;
7825 	}
7826 
7827 	/* mapping the QoS priority to the configured fifos */
7828 	for (i = 0; i < MAX_TX_FIFOS; i++)
7829 		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7830 
7831 	/* map the hashing selector table to the configured fifos */
7832 	for (i = 0; i < config->tx_fifo_num; i++)
7833 		sp->fifo_selector[i] = fifo_selector[i];
7834 
7835 
7836 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7837 	for (i = 0; i < config->tx_fifo_num; i++) {
7838 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7839 
7840 		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7841 		if (tx_cfg->fifo_len < 65) {
7842 			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7843 			break;
7844 		}
7845 	}
7846 	/* + 2 because one Txd for skb->data and one Txd for UFO */
7847 	config->max_txds = MAX_SKB_FRAGS + 2;
7848 
7849 	/* Rx side parameters. */
7850 	config->rx_ring_num = rx_ring_num;
7851 	for (i = 0; i < config->rx_ring_num; i++) {
7852 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7853 		struct ring_info *ring = &mac_control->rings[i];
7854 
7855 		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7856 		rx_cfg->ring_priority = i;
7857 		ring->rx_bufs_left = 0;
7858 		ring->rxd_mode = sp->rxd_mode;
7859 		ring->rxd_count = rxd_count[sp->rxd_mode];
7860 		ring->pdev = sp->pdev;
7861 		ring->dev = sp->dev;
7862 	}
7863 
7864 	for (i = 0; i < rx_ring_num; i++) {
7865 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7866 
7867 		rx_cfg->ring_org = RING_ORG_BUFF1;
7868 		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7869 	}
7870 
7871 	/*  Setting Mac Control parameters */
7872 	mac_control->rmac_pause_time = rmac_pause_time;
7873 	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7874 	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7875 
7876 
7877 	/*  initialize the shared memory used by the NIC and the host */
7878 	if (init_shared_mem(sp)) {
7879 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7880 		ret = -ENOMEM;
7881 		goto mem_alloc_failed;
7882 	}
7883 
7884 	sp->bar0 = pci_ioremap_bar(pdev, 0);
7885 	if (!sp->bar0) {
7886 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7887 			  dev->name);
7888 		ret = -ENOMEM;
7889 		goto bar0_remap_failed;
7890 	}
7891 
7892 	sp->bar1 = pci_ioremap_bar(pdev, 2);
7893 	if (!sp->bar1) {
7894 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7895 			  dev->name);
7896 		ret = -ENOMEM;
7897 		goto bar1_remap_failed;
7898 	}
7899 
7900 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7901 	for (j = 0; j < MAX_TX_FIFOS; j++) {
7902 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7903 	}
7904 
7905 	/*  Driver entry points */
7906 	dev->netdev_ops = &s2io_netdev_ops;
7907 	dev->ethtool_ops = &netdev_ethtool_ops;
7908 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7909 		NETIF_F_TSO | NETIF_F_TSO6 |
7910 		NETIF_F_RXCSUM | NETIF_F_LRO;
7911 	dev->features |= dev->hw_features |
7912 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7913 	if (sp->device_type & XFRAME_II_DEVICE) {
7914 		dev->hw_features |= NETIF_F_UFO;
7915 		if (ufo)
7916 			dev->features |= NETIF_F_UFO;
7917 	}
7918 	if (sp->high_dma_flag == true)
7919 		dev->features |= NETIF_F_HIGHDMA;
7920 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7921 	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7922 	INIT_WORK(&sp->set_link_task, s2io_set_link);
7923 
7924 	pci_save_state(sp->pdev);
7925 
7926 	/* Setting swapper control on the NIC, for proper reset operation */
7927 	if (s2io_set_swapper(sp)) {
7928 		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7929 			  dev->name);
7930 		ret = -EAGAIN;
7931 		goto set_swap_failed;
7932 	}
7933 
7934 	/* Verify if the Herc works on the slot its placed into */
7935 	if (sp->device_type & XFRAME_II_DEVICE) {
7936 		mode = s2io_verify_pci_mode(sp);
7937 		if (mode < 0) {
7938 			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7939 				  __func__);
7940 			ret = -EBADSLT;
7941 			goto set_swap_failed;
7942 		}
7943 	}
7944 
7945 	if (sp->config.intr_type == MSI_X) {
7946 		sp->num_entries = config->rx_ring_num + 1;
7947 		ret = s2io_enable_msi_x(sp);
7948 
7949 		if (!ret) {
7950 			ret = s2io_test_msi(sp);
7951 			/* rollback MSI-X, will re-enable during add_isr() */
7952 			remove_msix_isr(sp);
7953 		}
7954 		if (ret) {
7955 
7956 			DBG_PRINT(ERR_DBG,
7957 				  "MSI-X requested but failed to enable\n");
7958 			sp->config.intr_type = INTA;
7959 		}
7960 	}
7961 
7962 	if (config->intr_type ==  MSI_X) {
7963 		for (i = 0; i < config->rx_ring_num ; i++) {
7964 			struct ring_info *ring = &mac_control->rings[i];
7965 
7966 			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7967 		}
7968 	} else {
7969 		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7970 	}
7971 
7972 	/* Not needed for Herc */
7973 	if (sp->device_type & XFRAME_I_DEVICE) {
7974 		/*
7975 		 * Fix for all "FFs" MAC address problems observed on
7976 		 * Alpha platforms
7977 		 */
7978 		fix_mac_address(sp);
7979 		s2io_reset(sp);
7980 	}
7981 
7982 	/*
7983 	 * MAC address initialization.
7984 	 * For now only one mac address will be read and used.
7985 	 */
7986 	bar0 = sp->bar0;
7987 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7988 		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7989 	writeq(val64, &bar0->rmac_addr_cmd_mem);
7990 	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7991 			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7992 			      S2IO_BIT_RESET);
7993 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
7994 	mac_down = (u32)tmp64;
7995 	mac_up = (u32) (tmp64 >> 32);
7996 
7997 	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7998 	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7999 	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8000 	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8001 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8002 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8003 
8004 	/*  Set the factory defined MAC address initially   */
8005 	dev->addr_len = ETH_ALEN;
8006 	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8007 
8008 	/* initialize number of multicast & unicast MAC entries variables */
8009 	if (sp->device_type == XFRAME_I_DEVICE) {
8010 		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8011 		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8012 		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8013 	} else if (sp->device_type == XFRAME_II_DEVICE) {
8014 		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8015 		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8016 		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8017 	}
8018 
8019 	/* MTU range: 46 - 9600 */
8020 	dev->min_mtu = MIN_MTU;
8021 	dev->max_mtu = S2IO_JUMBO_SIZE;
8022 
8023 	/* store mac addresses from CAM to s2io_nic structure */
8024 	do_s2io_store_unicast_mc(sp);
8025 
8026 	/* Configure MSIX vector for number of rings configured plus one */
8027 	if ((sp->device_type == XFRAME_II_DEVICE) &&
8028 	    (config->intr_type == MSI_X))
8029 		sp->num_entries = config->rx_ring_num + 1;
8030 
8031 	/* Store the values of the MSIX table in the s2io_nic structure */
8032 	store_xmsi_data(sp);
8033 	/* reset Nic and bring it to known state */
8034 	s2io_reset(sp);
8035 
8036 	/*
8037 	 * Initialize link state flags
8038 	 * and the card state parameter
8039 	 */
8040 	sp->state = 0;
8041 
8042 	/* Initialize spinlocks */
8043 	for (i = 0; i < sp->config.tx_fifo_num; i++) {
8044 		struct fifo_info *fifo = &mac_control->fifos[i];
8045 
8046 		spin_lock_init(&fifo->tx_lock);
8047 	}
8048 
8049 	/*
8050 	 * SXE-002: Configure link and activity LED to init state
8051 	 * on driver load.
8052 	 */
8053 	subid = sp->pdev->subsystem_device;
8054 	if ((subid & 0xFF) >= 0x07) {
8055 		val64 = readq(&bar0->gpio_control);
8056 		val64 |= 0x0000800000000000ULL;
8057 		writeq(val64, &bar0->gpio_control);
8058 		val64 = 0x0411040400000000ULL;
8059 		writeq(val64, (void __iomem *)bar0 + 0x2700);
8060 		val64 = readq(&bar0->gpio_control);
8061 	}
8062 
8063 	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8064 
8065 	if (register_netdev(dev)) {
8066 		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8067 		ret = -ENODEV;
8068 		goto register_failed;
8069 	}
8070 	s2io_vpd_read(sp);
8071 	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8072 	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8073 		  sp->product_name, pdev->revision);
8074 	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8075 		  s2io_driver_version);
8076 	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8077 	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8078 	if (sp->device_type & XFRAME_II_DEVICE) {
8079 		mode = s2io_print_pci_mode(sp);
8080 		if (mode < 0) {
8081 			ret = -EBADSLT;
8082 			unregister_netdev(dev);
8083 			goto set_swap_failed;
8084 		}
8085 	}
8086 	switch (sp->rxd_mode) {
8087 	case RXD_MODE_1:
8088 		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8089 			  dev->name);
8090 		break;
8091 	case RXD_MODE_3B:
8092 		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8093 			  dev->name);
8094 		break;
8095 	}
8096 
8097 	switch (sp->config.napi) {
8098 	case 0:
8099 		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8100 		break;
8101 	case 1:
8102 		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8103 		break;
8104 	}
8105 
8106 	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8107 		  sp->config.tx_fifo_num);
8108 
8109 	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8110 		  sp->config.rx_ring_num);
8111 
8112 	switch (sp->config.intr_type) {
8113 	case INTA:
8114 		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8115 		break;
8116 	case MSI_X:
8117 		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8118 		break;
8119 	}
8120 	if (sp->config.multiq) {
8121 		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8122 			struct fifo_info *fifo = &mac_control->fifos[i];
8123 
8124 			fifo->multiq = config->multiq;
8125 		}
8126 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8127 			  dev->name);
8128 	} else
8129 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8130 			  dev->name);
8131 
8132 	switch (sp->config.tx_steering_type) {
8133 	case NO_STEERING:
8134 		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8135 			  dev->name);
8136 		break;
8137 	case TX_PRIORITY_STEERING:
8138 		DBG_PRINT(ERR_DBG,
8139 			  "%s: Priority steering enabled for transmit\n",
8140 			  dev->name);
8141 		break;
8142 	case TX_DEFAULT_STEERING:
8143 		DBG_PRINT(ERR_DBG,
8144 			  "%s: Default steering enabled for transmit\n",
8145 			  dev->name);
8146 	}
8147 
8148 	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8149 		  dev->name);
8150 	if (ufo)
8151 		DBG_PRINT(ERR_DBG,
8152 			  "%s: UDP Fragmentation Offload(UFO) enabled\n",
8153 			  dev->name);
8154 	/* Initialize device name */
8155 	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8156 		 sp->product_name);
8157 
8158 	if (vlan_tag_strip)
8159 		sp->vlan_strip_flag = 1;
8160 	else
8161 		sp->vlan_strip_flag = 0;
8162 
8163 	/*
8164 	 * Make Link state as off at this point, when the Link change
8165 	 * interrupt comes the state will be automatically changed to
8166 	 * the right state.
8167 	 */
8168 	netif_carrier_off(dev);
8169 
8170 	return 0;
8171 
8172 register_failed:
8173 set_swap_failed:
8174 	iounmap(sp->bar1);
8175 bar1_remap_failed:
8176 	iounmap(sp->bar0);
8177 bar0_remap_failed:
8178 mem_alloc_failed:
8179 	free_shared_mem(sp);
8180 	pci_disable_device(pdev);
8181 	pci_release_regions(pdev);
8182 	free_netdev(dev);
8183 
8184 	return ret;
8185 }
8186 
8187 /**
8188  * s2io_rem_nic - Free the PCI device
8189  * @pdev: structure containing the PCI related information of the device.
8190  * Description: This function is called by the Pci subsystem to release a
8191  * PCI device and free up all resource held up by the device. This could
8192  * be in response to a Hot plug event or when the driver is to be removed
8193  * from memory.
8194  */
8195 
8196 static void s2io_rem_nic(struct pci_dev *pdev)
8197 {
8198 	struct net_device *dev = pci_get_drvdata(pdev);
8199 	struct s2io_nic *sp;
8200 
8201 	if (dev == NULL) {
8202 		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8203 		return;
8204 	}
8205 
8206 	sp = netdev_priv(dev);
8207 
8208 	cancel_work_sync(&sp->rst_timer_task);
8209 	cancel_work_sync(&sp->set_link_task);
8210 
8211 	unregister_netdev(dev);
8212 
8213 	free_shared_mem(sp);
8214 	iounmap(sp->bar0);
8215 	iounmap(sp->bar1);
8216 	pci_release_regions(pdev);
8217 	free_netdev(dev);
8218 	pci_disable_device(pdev);
8219 }
8220 
8221 module_pci_driver(s2io_driver);
8222 
8223 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8224 				struct tcphdr **tcp, struct RxD_t *rxdp,
8225 				struct s2io_nic *sp)
8226 {
8227 	int ip_off;
8228 	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8229 
8230 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8231 		DBG_PRINT(INIT_DBG,
8232 			  "%s: Non-TCP frames not supported for LRO\n",
8233 			  __func__);
8234 		return -1;
8235 	}
8236 
8237 	/* Checking for DIX type or DIX type with VLAN */
8238 	if ((l2_type == 0) || (l2_type == 4)) {
8239 		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8240 		/*
8241 		 * If vlan stripping is disabled and the frame is VLAN tagged,
8242 		 * shift the offset by the VLAN header size bytes.
8243 		 */
8244 		if ((!sp->vlan_strip_flag) &&
8245 		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8246 			ip_off += HEADER_VLAN_SIZE;
8247 	} else {
8248 		/* LLC, SNAP etc are considered non-mergeable */
8249 		return -1;
8250 	}
8251 
8252 	*ip = (struct iphdr *)(buffer + ip_off);
8253 	ip_len = (u8)((*ip)->ihl);
8254 	ip_len <<= 2;
8255 	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8256 
8257 	return 0;
8258 }
8259 
8260 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8261 				  struct tcphdr *tcp)
8262 {
8263 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8264 	if ((lro->iph->saddr != ip->saddr) ||
8265 	    (lro->iph->daddr != ip->daddr) ||
8266 	    (lro->tcph->source != tcp->source) ||
8267 	    (lro->tcph->dest != tcp->dest))
8268 		return -1;
8269 	return 0;
8270 }
8271 
8272 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8273 {
8274 	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8275 }
8276 
8277 static void initiate_new_session(struct lro *lro, u8 *l2h,
8278 				 struct iphdr *ip, struct tcphdr *tcp,
8279 				 u32 tcp_pyld_len, u16 vlan_tag)
8280 {
8281 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8282 	lro->l2h = l2h;
8283 	lro->iph = ip;
8284 	lro->tcph = tcp;
8285 	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8286 	lro->tcp_ack = tcp->ack_seq;
8287 	lro->sg_num = 1;
8288 	lro->total_len = ntohs(ip->tot_len);
8289 	lro->frags_len = 0;
8290 	lro->vlan_tag = vlan_tag;
8291 	/*
8292 	 * Check if we saw TCP timestamp.
8293 	 * Other consistency checks have already been done.
8294 	 */
8295 	if (tcp->doff == 8) {
8296 		__be32 *ptr;
8297 		ptr = (__be32 *)(tcp+1);
8298 		lro->saw_ts = 1;
8299 		lro->cur_tsval = ntohl(*(ptr+1));
8300 		lro->cur_tsecr = *(ptr+2);
8301 	}
8302 	lro->in_use = 1;
8303 }
8304 
8305 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8306 {
8307 	struct iphdr *ip = lro->iph;
8308 	struct tcphdr *tcp = lro->tcph;
8309 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8310 
8311 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8312 
8313 	/* Update L3 header */
8314 	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8315 	ip->tot_len = htons(lro->total_len);
8316 
8317 	/* Update L4 header */
8318 	tcp->ack_seq = lro->tcp_ack;
8319 	tcp->window = lro->window;
8320 
8321 	/* Update tsecr field if this session has timestamps enabled */
8322 	if (lro->saw_ts) {
8323 		__be32 *ptr = (__be32 *)(tcp + 1);
8324 		*(ptr+2) = lro->cur_tsecr;
8325 	}
8326 
8327 	/* Update counters required for calculation of
8328 	 * average no. of packets aggregated.
8329 	 */
8330 	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8331 	swstats->num_aggregations++;
8332 }
8333 
8334 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8335 			     struct tcphdr *tcp, u32 l4_pyld)
8336 {
8337 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8338 	lro->total_len += l4_pyld;
8339 	lro->frags_len += l4_pyld;
8340 	lro->tcp_next_seq += l4_pyld;
8341 	lro->sg_num++;
8342 
8343 	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8344 	lro->tcp_ack = tcp->ack_seq;
8345 	lro->window = tcp->window;
8346 
8347 	if (lro->saw_ts) {
8348 		__be32 *ptr;
8349 		/* Update tsecr and tsval from this packet */
8350 		ptr = (__be32 *)(tcp+1);
8351 		lro->cur_tsval = ntohl(*(ptr+1));
8352 		lro->cur_tsecr = *(ptr + 2);
8353 	}
8354 }
8355 
8356 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8357 				    struct tcphdr *tcp, u32 tcp_pyld_len)
8358 {
8359 	u8 *ptr;
8360 
8361 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8362 
8363 	if (!tcp_pyld_len) {
8364 		/* Runt frame or a pure ack */
8365 		return -1;
8366 	}
8367 
8368 	if (ip->ihl != 5) /* IP has options */
8369 		return -1;
8370 
8371 	/* If we see CE codepoint in IP header, packet is not mergeable */
8372 	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8373 		return -1;
8374 
8375 	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8376 	if (tcp->urg || tcp->psh || tcp->rst ||
8377 	    tcp->syn || tcp->fin ||
8378 	    tcp->ece || tcp->cwr || !tcp->ack) {
8379 		/*
8380 		 * Currently recognize only the ack control word and
8381 		 * any other control field being set would result in
8382 		 * flushing the LRO session
8383 		 */
8384 		return -1;
8385 	}
8386 
8387 	/*
8388 	 * Allow only one TCP timestamp option. Don't aggregate if
8389 	 * any other options are detected.
8390 	 */
8391 	if (tcp->doff != 5 && tcp->doff != 8)
8392 		return -1;
8393 
8394 	if (tcp->doff == 8) {
8395 		ptr = (u8 *)(tcp + 1);
8396 		while (*ptr == TCPOPT_NOP)
8397 			ptr++;
8398 		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8399 			return -1;
8400 
8401 		/* Ensure timestamp value increases monotonically */
8402 		if (l_lro)
8403 			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8404 				return -1;
8405 
8406 		/* timestamp echo reply should be non-zero */
8407 		if (*((__be32 *)(ptr+6)) == 0)
8408 			return -1;
8409 	}
8410 
8411 	return 0;
8412 }
8413 
8414 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8415 				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8416 				 struct RxD_t *rxdp, struct s2io_nic *sp)
8417 {
8418 	struct iphdr *ip;
8419 	struct tcphdr *tcph;
8420 	int ret = 0, i;
8421 	u16 vlan_tag = 0;
8422 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8423 
8424 	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8425 				   rxdp, sp);
8426 	if (ret)
8427 		return ret;
8428 
8429 	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8430 
8431 	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8432 	tcph = (struct tcphdr *)*tcp;
8433 	*tcp_len = get_l4_pyld_length(ip, tcph);
8434 	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8435 		struct lro *l_lro = &ring_data->lro0_n[i];
8436 		if (l_lro->in_use) {
8437 			if (check_for_socket_match(l_lro, ip, tcph))
8438 				continue;
8439 			/* Sock pair matched */
8440 			*lro = l_lro;
8441 
8442 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8443 				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8444 					  "expected 0x%x, actual 0x%x\n",
8445 					  __func__,
8446 					  (*lro)->tcp_next_seq,
8447 					  ntohl(tcph->seq));
8448 
8449 				swstats->outof_sequence_pkts++;
8450 				ret = 2;
8451 				break;
8452 			}
8453 
8454 			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8455 						      *tcp_len))
8456 				ret = 1; /* Aggregate */
8457 			else
8458 				ret = 2; /* Flush both */
8459 			break;
8460 		}
8461 	}
8462 
8463 	if (ret == 0) {
8464 		/* Before searching for available LRO objects,
8465 		 * check if the pkt is L3/L4 aggregatable. If not
8466 		 * don't create new LRO session. Just send this
8467 		 * packet up.
8468 		 */
8469 		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8470 			return 5;
8471 
8472 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8473 			struct lro *l_lro = &ring_data->lro0_n[i];
8474 			if (!(l_lro->in_use)) {
8475 				*lro = l_lro;
8476 				ret = 3; /* Begin anew */
8477 				break;
8478 			}
8479 		}
8480 	}
8481 
8482 	if (ret == 0) { /* sessions exceeded */
8483 		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8484 			  __func__);
8485 		*lro = NULL;
8486 		return ret;
8487 	}
8488 
8489 	switch (ret) {
8490 	case 3:
8491 		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8492 				     vlan_tag);
8493 		break;
8494 	case 2:
8495 		update_L3L4_header(sp, *lro);
8496 		break;
8497 	case 1:
8498 		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8499 		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8500 			update_L3L4_header(sp, *lro);
8501 			ret = 4; /* Flush the LRO */
8502 		}
8503 		break;
8504 	default:
8505 		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8506 		break;
8507 	}
8508 
8509 	return ret;
8510 }
8511 
8512 static void clear_lro_session(struct lro *lro)
8513 {
8514 	static u16 lro_struct_size = sizeof(struct lro);
8515 
8516 	memset(lro, 0, lro_struct_size);
8517 }
8518 
8519 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8520 {
8521 	struct net_device *dev = skb->dev;
8522 	struct s2io_nic *sp = netdev_priv(dev);
8523 
8524 	skb->protocol = eth_type_trans(skb, dev);
8525 	if (vlan_tag && sp->vlan_strip_flag)
8526 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8527 	if (sp->config.napi)
8528 		netif_receive_skb(skb);
8529 	else
8530 		netif_rx(skb);
8531 }
8532 
8533 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8534 			   struct sk_buff *skb, u32 tcp_len)
8535 {
8536 	struct sk_buff *first = lro->parent;
8537 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8538 
8539 	first->len += tcp_len;
8540 	first->data_len = lro->frags_len;
8541 	skb_pull(skb, (skb->len - tcp_len));
8542 	if (skb_shinfo(first)->frag_list)
8543 		lro->last_frag->next = skb;
8544 	else
8545 		skb_shinfo(first)->frag_list = skb;
8546 	first->truesize += skb->truesize;
8547 	lro->last_frag = skb;
8548 	swstats->clubbed_frms_cnt++;
8549 }
8550 
8551 /**
8552  * s2io_io_error_detected - called when PCI error is detected
8553  * @pdev: Pointer to PCI device
8554  * @state: The current pci connection state
8555  *
8556  * This function is called after a PCI bus error affecting
8557  * this device has been detected.
8558  */
8559 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8560 					       pci_channel_state_t state)
8561 {
8562 	struct net_device *netdev = pci_get_drvdata(pdev);
8563 	struct s2io_nic *sp = netdev_priv(netdev);
8564 
8565 	netif_device_detach(netdev);
8566 
8567 	if (state == pci_channel_io_perm_failure)
8568 		return PCI_ERS_RESULT_DISCONNECT;
8569 
8570 	if (netif_running(netdev)) {
8571 		/* Bring down the card, while avoiding PCI I/O */
8572 		do_s2io_card_down(sp, 0);
8573 	}
8574 	pci_disable_device(pdev);
8575 
8576 	return PCI_ERS_RESULT_NEED_RESET;
8577 }
8578 
8579 /**
8580  * s2io_io_slot_reset - called after the pci bus has been reset.
8581  * @pdev: Pointer to PCI device
8582  *
8583  * Restart the card from scratch, as if from a cold-boot.
8584  * At this point, the card has exprienced a hard reset,
8585  * followed by fixups by BIOS, and has its config space
8586  * set up identically to what it was at cold boot.
8587  */
8588 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8589 {
8590 	struct net_device *netdev = pci_get_drvdata(pdev);
8591 	struct s2io_nic *sp = netdev_priv(netdev);
8592 
8593 	if (pci_enable_device(pdev)) {
8594 		pr_err("Cannot re-enable PCI device after reset.\n");
8595 		return PCI_ERS_RESULT_DISCONNECT;
8596 	}
8597 
8598 	pci_set_master(pdev);
8599 	s2io_reset(sp);
8600 
8601 	return PCI_ERS_RESULT_RECOVERED;
8602 }
8603 
8604 /**
8605  * s2io_io_resume - called when traffic can start flowing again.
8606  * @pdev: Pointer to PCI device
8607  *
8608  * This callback is called when the error recovery driver tells
8609  * us that its OK to resume normal operation.
8610  */
8611 static void s2io_io_resume(struct pci_dev *pdev)
8612 {
8613 	struct net_device *netdev = pci_get_drvdata(pdev);
8614 	struct s2io_nic *sp = netdev_priv(netdev);
8615 
8616 	if (netif_running(netdev)) {
8617 		if (s2io_card_up(sp)) {
8618 			pr_err("Can't bring device back up after reset.\n");
8619 			return;
8620 		}
8621 
8622 		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8623 			s2io_card_down(sp);
8624 			pr_err("Can't restore mac addr after reset.\n");
8625 			return;
8626 		}
8627 	}
8628 
8629 	netif_device_attach(netdev);
8630 	netif_tx_wake_all_queues(netdev);
8631 }
8632