1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2010 Exar Corp.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik		: For pointing out the improper error condition
15  *			  check in the s2io_xmit routine and also some
16  *			  issues in the Tx watch dog function. Also for
17  *			  patiently answering all those innumerable
18  *			  questions regaring the 2.6 porting issues.
19  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
20  *			  macros available only in 2.6 Kernel.
21  * Francois Romieu	: For pointing out all code part that were
22  *			  deprecated and also styling related comments.
23  * Grant Grundler	: For helping me get rid of some Architecture
24  *			  dependent code.
25  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explanation of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *		values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_max_pkts: This parameter defines maximum number of packets can be
42  *     aggregated as a single large packet
43  * napi: This parameter used to enable/disable NAPI (polling Rx)
44  *     Possible values '1' for enable and '0' for disable. Default is '1'
45  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
46  *                 Possible values '1' for enable , '0' for disable.
47  *                 Default is '2' - which means disable in promisc mode
48  *                 and enable in non-promiscuous mode.
49  * multiq: This parameter used to enable/disable MULTIQUEUE support.
50  *      Possible values '1' for enable and '0' for disable. Default is '0'
51  ************************************************************************/
52 
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/mdio.h>
65 #include <linux/skbuff.h>
66 #include <linux/init.h>
67 #include <linux/delay.h>
68 #include <linux/stddef.h>
69 #include <linux/ioctl.h>
70 #include <linux/timex.h>
71 #include <linux/ethtool.h>
72 #include <linux/workqueue.h>
73 #include <linux/if_vlan.h>
74 #include <linux/ip.h>
75 #include <linux/tcp.h>
76 #include <linux/uaccess.h>
77 #include <linux/io.h>
78 #include <linux/io-64-nonatomic-lo-hi.h>
79 #include <linux/slab.h>
80 #include <linux/prefetch.h>
81 #include <net/tcp.h>
82 #include <net/checksum.h>
83 
84 #include <asm/div64.h>
85 #include <asm/irq.h>
86 
87 /* local include */
88 #include "s2io.h"
89 #include "s2io-regs.h"
90 
91 #define DRV_VERSION "2.0.26.28"
92 
93 /* S2io Driver name & version. */
94 static const char s2io_driver_name[] = "Neterion";
95 static const char s2io_driver_version[] = DRV_VERSION;
96 
97 static const int rxd_size[2] = {32, 48};
98 static const int rxd_count[2] = {127, 85};
99 
100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101 {
102 	int ret;
103 
104 	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105 	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
106 
107 	return ret;
108 }
109 
110 /*
111  * Cards with following subsystem_id have a link state indication
112  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113  * macro below identifies these cards given the subsystem_id.
114  */
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
116 	(dev_type == XFRAME_I_DEVICE) ?					\
117 	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
118 	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
119 
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
122 
123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
124 {
125 	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 }
127 
128 /* Ethtool related variables and Macros. */
129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130 	"Register test\t(offline)",
131 	"Eeprom test\t(offline)",
132 	"Link test\t(online)",
133 	"RLDRAM test\t(offline)",
134 	"BIST Test\t(offline)"
135 };
136 
137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138 	{"tmac_frms"},
139 	{"tmac_data_octets"},
140 	{"tmac_drop_frms"},
141 	{"tmac_mcst_frms"},
142 	{"tmac_bcst_frms"},
143 	{"tmac_pause_ctrl_frms"},
144 	{"tmac_ttl_octets"},
145 	{"tmac_ucst_frms"},
146 	{"tmac_nucst_frms"},
147 	{"tmac_any_err_frms"},
148 	{"tmac_ttl_less_fb_octets"},
149 	{"tmac_vld_ip_octets"},
150 	{"tmac_vld_ip"},
151 	{"tmac_drop_ip"},
152 	{"tmac_icmp"},
153 	{"tmac_rst_tcp"},
154 	{"tmac_tcp"},
155 	{"tmac_udp"},
156 	{"rmac_vld_frms"},
157 	{"rmac_data_octets"},
158 	{"rmac_fcs_err_frms"},
159 	{"rmac_drop_frms"},
160 	{"rmac_vld_mcst_frms"},
161 	{"rmac_vld_bcst_frms"},
162 	{"rmac_in_rng_len_err_frms"},
163 	{"rmac_out_rng_len_err_frms"},
164 	{"rmac_long_frms"},
165 	{"rmac_pause_ctrl_frms"},
166 	{"rmac_unsup_ctrl_frms"},
167 	{"rmac_ttl_octets"},
168 	{"rmac_accepted_ucst_frms"},
169 	{"rmac_accepted_nucst_frms"},
170 	{"rmac_discarded_frms"},
171 	{"rmac_drop_events"},
172 	{"rmac_ttl_less_fb_octets"},
173 	{"rmac_ttl_frms"},
174 	{"rmac_usized_frms"},
175 	{"rmac_osized_frms"},
176 	{"rmac_frag_frms"},
177 	{"rmac_jabber_frms"},
178 	{"rmac_ttl_64_frms"},
179 	{"rmac_ttl_65_127_frms"},
180 	{"rmac_ttl_128_255_frms"},
181 	{"rmac_ttl_256_511_frms"},
182 	{"rmac_ttl_512_1023_frms"},
183 	{"rmac_ttl_1024_1518_frms"},
184 	{"rmac_ip"},
185 	{"rmac_ip_octets"},
186 	{"rmac_hdr_err_ip"},
187 	{"rmac_drop_ip"},
188 	{"rmac_icmp"},
189 	{"rmac_tcp"},
190 	{"rmac_udp"},
191 	{"rmac_err_drp_udp"},
192 	{"rmac_xgmii_err_sym"},
193 	{"rmac_frms_q0"},
194 	{"rmac_frms_q1"},
195 	{"rmac_frms_q2"},
196 	{"rmac_frms_q3"},
197 	{"rmac_frms_q4"},
198 	{"rmac_frms_q5"},
199 	{"rmac_frms_q6"},
200 	{"rmac_frms_q7"},
201 	{"rmac_full_q0"},
202 	{"rmac_full_q1"},
203 	{"rmac_full_q2"},
204 	{"rmac_full_q3"},
205 	{"rmac_full_q4"},
206 	{"rmac_full_q5"},
207 	{"rmac_full_q6"},
208 	{"rmac_full_q7"},
209 	{"rmac_pause_cnt"},
210 	{"rmac_xgmii_data_err_cnt"},
211 	{"rmac_xgmii_ctrl_err_cnt"},
212 	{"rmac_accepted_ip"},
213 	{"rmac_err_tcp"},
214 	{"rd_req_cnt"},
215 	{"new_rd_req_cnt"},
216 	{"new_rd_req_rtry_cnt"},
217 	{"rd_rtry_cnt"},
218 	{"wr_rtry_rd_ack_cnt"},
219 	{"wr_req_cnt"},
220 	{"new_wr_req_cnt"},
221 	{"new_wr_req_rtry_cnt"},
222 	{"wr_rtry_cnt"},
223 	{"wr_disc_cnt"},
224 	{"rd_rtry_wr_ack_cnt"},
225 	{"txp_wr_cnt"},
226 	{"txd_rd_cnt"},
227 	{"txd_wr_cnt"},
228 	{"rxd_rd_cnt"},
229 	{"rxd_wr_cnt"},
230 	{"txf_rd_cnt"},
231 	{"rxf_wr_cnt"}
232 };
233 
234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235 	{"rmac_ttl_1519_4095_frms"},
236 	{"rmac_ttl_4096_8191_frms"},
237 	{"rmac_ttl_8192_max_frms"},
238 	{"rmac_ttl_gt_max_frms"},
239 	{"rmac_osized_alt_frms"},
240 	{"rmac_jabber_alt_frms"},
241 	{"rmac_gt_max_alt_frms"},
242 	{"rmac_vlan_frms"},
243 	{"rmac_len_discard"},
244 	{"rmac_fcs_discard"},
245 	{"rmac_pf_discard"},
246 	{"rmac_da_discard"},
247 	{"rmac_red_discard"},
248 	{"rmac_rts_discard"},
249 	{"rmac_ingm_full_discard"},
250 	{"link_fault_cnt"}
251 };
252 
253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254 	{"\n DRIVER STATISTICS"},
255 	{"single_bit_ecc_errs"},
256 	{"double_bit_ecc_errs"},
257 	{"parity_err_cnt"},
258 	{"serious_err_cnt"},
259 	{"soft_reset_cnt"},
260 	{"fifo_full_cnt"},
261 	{"ring_0_full_cnt"},
262 	{"ring_1_full_cnt"},
263 	{"ring_2_full_cnt"},
264 	{"ring_3_full_cnt"},
265 	{"ring_4_full_cnt"},
266 	{"ring_5_full_cnt"},
267 	{"ring_6_full_cnt"},
268 	{"ring_7_full_cnt"},
269 	{"alarm_transceiver_temp_high"},
270 	{"alarm_transceiver_temp_low"},
271 	{"alarm_laser_bias_current_high"},
272 	{"alarm_laser_bias_current_low"},
273 	{"alarm_laser_output_power_high"},
274 	{"alarm_laser_output_power_low"},
275 	{"warn_transceiver_temp_high"},
276 	{"warn_transceiver_temp_low"},
277 	{"warn_laser_bias_current_high"},
278 	{"warn_laser_bias_current_low"},
279 	{"warn_laser_output_power_high"},
280 	{"warn_laser_output_power_low"},
281 	{"lro_aggregated_pkts"},
282 	{"lro_flush_both_count"},
283 	{"lro_out_of_sequence_pkts"},
284 	{"lro_flush_due_to_max_pkts"},
285 	{"lro_avg_aggr_pkts"},
286 	{"mem_alloc_fail_cnt"},
287 	{"pci_map_fail_cnt"},
288 	{"watchdog_timer_cnt"},
289 	{"mem_allocated"},
290 	{"mem_freed"},
291 	{"link_up_cnt"},
292 	{"link_down_cnt"},
293 	{"link_up_time"},
294 	{"link_down_time"},
295 	{"tx_tcode_buf_abort_cnt"},
296 	{"tx_tcode_desc_abort_cnt"},
297 	{"tx_tcode_parity_err_cnt"},
298 	{"tx_tcode_link_loss_cnt"},
299 	{"tx_tcode_list_proc_err_cnt"},
300 	{"rx_tcode_parity_err_cnt"},
301 	{"rx_tcode_abort_cnt"},
302 	{"rx_tcode_parity_abort_cnt"},
303 	{"rx_tcode_rda_fail_cnt"},
304 	{"rx_tcode_unkn_prot_cnt"},
305 	{"rx_tcode_fcs_err_cnt"},
306 	{"rx_tcode_buf_size_err_cnt"},
307 	{"rx_tcode_rxd_corrupt_cnt"},
308 	{"rx_tcode_unkn_err_cnt"},
309 	{"tda_err_cnt"},
310 	{"pfc_err_cnt"},
311 	{"pcc_err_cnt"},
312 	{"tti_err_cnt"},
313 	{"tpa_err_cnt"},
314 	{"sm_err_cnt"},
315 	{"lso_err_cnt"},
316 	{"mac_tmac_err_cnt"},
317 	{"mac_rmac_err_cnt"},
318 	{"xgxs_txgxs_err_cnt"},
319 	{"xgxs_rxgxs_err_cnt"},
320 	{"rc_err_cnt"},
321 	{"prc_pcix_err_cnt"},
322 	{"rpa_err_cnt"},
323 	{"rda_err_cnt"},
324 	{"rti_err_cnt"},
325 	{"mc_err_cnt"}
326 };
327 
328 #define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
331 
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
334 
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
337 
338 #define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
340 
341 /* copy mac addr to def_mac_addr array */
342 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
343 {
344 	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
345 	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
346 	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
347 	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
348 	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
349 	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
350 }
351 
352 /*
353  * Constants to be programmed into the Xena's registers, to configure
354  * the XAUI.
355  */
356 
357 #define	END_SIGN	0x0
358 static const u64 herc_act_dtx_cfg[] = {
359 	/* Set address */
360 	0x8000051536750000ULL, 0x80000515367500E0ULL,
361 	/* Write data */
362 	0x8000051536750004ULL, 0x80000515367500E4ULL,
363 	/* Set address */
364 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
365 	/* Write data */
366 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
367 	/* Set address */
368 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
369 	/* Write data */
370 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
371 	/* Set address */
372 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
373 	/* Write data */
374 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
375 	/* Done */
376 	END_SIGN
377 };
378 
379 static const u64 xena_dtx_cfg[] = {
380 	/* Set address */
381 	0x8000051500000000ULL, 0x80000515000000E0ULL,
382 	/* Write data */
383 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
384 	/* Set address */
385 	0x8001051500000000ULL, 0x80010515000000E0ULL,
386 	/* Write data */
387 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
388 	/* Set address */
389 	0x8002051500000000ULL, 0x80020515000000E0ULL,
390 	/* Write data */
391 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
392 	END_SIGN
393 };
394 
395 /*
396  * Constants for Fixing the MacAddress problem seen mostly on
397  * Alpha machines.
398  */
399 static const u64 fix_mac[] = {
400 	0x0060000000000000ULL, 0x0060600000000000ULL,
401 	0x0040600000000000ULL, 0x0000600000000000ULL,
402 	0x0020600000000000ULL, 0x0060600000000000ULL,
403 	0x0020600000000000ULL, 0x0060600000000000ULL,
404 	0x0020600000000000ULL, 0x0060600000000000ULL,
405 	0x0020600000000000ULL, 0x0060600000000000ULL,
406 	0x0020600000000000ULL, 0x0060600000000000ULL,
407 	0x0020600000000000ULL, 0x0060600000000000ULL,
408 	0x0020600000000000ULL, 0x0060600000000000ULL,
409 	0x0020600000000000ULL, 0x0060600000000000ULL,
410 	0x0020600000000000ULL, 0x0060600000000000ULL,
411 	0x0020600000000000ULL, 0x0060600000000000ULL,
412 	0x0020600000000000ULL, 0x0000600000000000ULL,
413 	0x0040600000000000ULL, 0x0060600000000000ULL,
414 	END_SIGN
415 };
416 
417 MODULE_LICENSE("GPL");
418 MODULE_VERSION(DRV_VERSION);
419 
420 
421 /* Module Loadable parameters. */
422 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
423 S2IO_PARM_INT(rx_ring_num, 1);
424 S2IO_PARM_INT(multiq, 0);
425 S2IO_PARM_INT(rx_ring_mode, 1);
426 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
427 S2IO_PARM_INT(rmac_pause_time, 0x100);
428 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
429 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
430 S2IO_PARM_INT(shared_splits, 0);
431 S2IO_PARM_INT(tmac_util_period, 5);
432 S2IO_PARM_INT(rmac_util_period, 5);
433 S2IO_PARM_INT(l3l4hdr_size, 128);
434 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
435 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
436 /* Frequency of Rx desc syncs expressed as power of 2 */
437 S2IO_PARM_INT(rxsync_frequency, 3);
438 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
439 S2IO_PARM_INT(intr_type, 2);
440 /* Large receive offload feature */
441 
442 /* Max pkts to be aggregated by LRO at one time. If not specified,
443  * aggregation happens until we hit max IP pkt size(64K)
444  */
445 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
446 S2IO_PARM_INT(indicate_max_pkts, 0);
447 
448 S2IO_PARM_INT(napi, 1);
449 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
450 
451 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
452 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
453 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
454 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
455 static unsigned int rts_frm_len[MAX_RX_RINGS] =
456 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
457 
458 module_param_array(tx_fifo_len, uint, NULL, 0);
459 module_param_array(rx_ring_sz, uint, NULL, 0);
460 module_param_array(rts_frm_len, uint, NULL, 0);
461 
462 /*
463  * S2IO device table.
464  * This table lists all the devices that this driver supports.
465  */
466 static const struct pci_device_id s2io_tbl[] = {
467 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
468 	 PCI_ANY_ID, PCI_ANY_ID},
469 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
470 	 PCI_ANY_ID, PCI_ANY_ID},
471 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
472 	 PCI_ANY_ID, PCI_ANY_ID},
473 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
474 	 PCI_ANY_ID, PCI_ANY_ID},
475 	{0,}
476 };
477 
478 MODULE_DEVICE_TABLE(pci, s2io_tbl);
479 
480 static const struct pci_error_handlers s2io_err_handler = {
481 	.error_detected = s2io_io_error_detected,
482 	.slot_reset = s2io_io_slot_reset,
483 	.resume = s2io_io_resume,
484 };
485 
486 static struct pci_driver s2io_driver = {
487 	.name = "S2IO",
488 	.id_table = s2io_tbl,
489 	.probe = s2io_init_nic,
490 	.remove = s2io_rem_nic,
491 	.err_handler = &s2io_err_handler,
492 };
493 
494 /* A simplifier macro used both by init and free shared_mem Fns(). */
495 #define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
496 
497 /* netqueue manipulation helper functions */
498 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
499 {
500 	if (!sp->config.multiq) {
501 		int i;
502 
503 		for (i = 0; i < sp->config.tx_fifo_num; i++)
504 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
505 	}
506 	netif_tx_stop_all_queues(sp->dev);
507 }
508 
509 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
510 {
511 	if (!sp->config.multiq)
512 		sp->mac_control.fifos[fifo_no].queue_state =
513 			FIFO_QUEUE_STOP;
514 
515 	netif_tx_stop_all_queues(sp->dev);
516 }
517 
518 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
519 {
520 	if (!sp->config.multiq) {
521 		int i;
522 
523 		for (i = 0; i < sp->config.tx_fifo_num; i++)
524 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
525 	}
526 	netif_tx_start_all_queues(sp->dev);
527 }
528 
529 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
530 {
531 	if (!sp->config.multiq) {
532 		int i;
533 
534 		for (i = 0; i < sp->config.tx_fifo_num; i++)
535 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
536 	}
537 	netif_tx_wake_all_queues(sp->dev);
538 }
539 
540 static inline void s2io_wake_tx_queue(
541 	struct fifo_info *fifo, int cnt, u8 multiq)
542 {
543 
544 	if (multiq) {
545 		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
546 			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
547 	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
548 		if (netif_queue_stopped(fifo->dev)) {
549 			fifo->queue_state = FIFO_QUEUE_START;
550 			netif_wake_queue(fifo->dev);
551 		}
552 	}
553 }
554 
555 /**
556  * init_shared_mem - Allocation and Initialization of Memory
557  * @nic: Device private variable.
558  * Description: The function allocates all the memory areas shared
559  * between the NIC and the driver. This includes Tx descriptors,
560  * Rx descriptors and the statistics block.
561  */
562 
563 static int init_shared_mem(struct s2io_nic *nic)
564 {
565 	u32 size;
566 	void *tmp_v_addr, *tmp_v_addr_next;
567 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
568 	struct RxD_block *pre_rxd_blk = NULL;
569 	int i, j, blk_cnt;
570 	int lst_size, lst_per_page;
571 	struct net_device *dev = nic->dev;
572 	unsigned long tmp;
573 	struct buffAdd *ba;
574 	struct config_param *config = &nic->config;
575 	struct mac_info *mac_control = &nic->mac_control;
576 	unsigned long long mem_allocated = 0;
577 
578 	/* Allocation and initialization of TXDLs in FIFOs */
579 	size = 0;
580 	for (i = 0; i < config->tx_fifo_num; i++) {
581 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
582 
583 		size += tx_cfg->fifo_len;
584 	}
585 	if (size > MAX_AVAILABLE_TXDS) {
586 		DBG_PRINT(ERR_DBG,
587 			  "Too many TxDs requested: %d, max supported: %d\n",
588 			  size, MAX_AVAILABLE_TXDS);
589 		return -EINVAL;
590 	}
591 
592 	size = 0;
593 	for (i = 0; i < config->tx_fifo_num; i++) {
594 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
595 
596 		size = tx_cfg->fifo_len;
597 		/*
598 		 * Legal values are from 2 to 8192
599 		 */
600 		if (size < 2) {
601 			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
602 				  "Valid lengths are 2 through 8192\n",
603 				  i, size);
604 			return -EINVAL;
605 		}
606 	}
607 
608 	lst_size = (sizeof(struct TxD) * config->max_txds);
609 	lst_per_page = PAGE_SIZE / lst_size;
610 
611 	for (i = 0; i < config->tx_fifo_num; i++) {
612 		struct fifo_info *fifo = &mac_control->fifos[i];
613 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
614 		int fifo_len = tx_cfg->fifo_len;
615 		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
616 
617 		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
618 		if (!fifo->list_info) {
619 			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
620 			return -ENOMEM;
621 		}
622 		mem_allocated += list_holder_size;
623 	}
624 	for (i = 0; i < config->tx_fifo_num; i++) {
625 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
626 						lst_per_page);
627 		struct fifo_info *fifo = &mac_control->fifos[i];
628 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
629 
630 		fifo->tx_curr_put_info.offset = 0;
631 		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
632 		fifo->tx_curr_get_info.offset = 0;
633 		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
634 		fifo->fifo_no = i;
635 		fifo->nic = nic;
636 		fifo->max_txds = MAX_SKB_FRAGS + 2;
637 		fifo->dev = dev;
638 
639 		for (j = 0; j < page_num; j++) {
640 			int k = 0;
641 			dma_addr_t tmp_p;
642 			void *tmp_v;
643 			tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
644 						   &tmp_p, GFP_KERNEL);
645 			if (!tmp_v) {
646 				DBG_PRINT(INFO_DBG,
647 					  "dma_alloc_coherent failed for TxDL\n");
648 				return -ENOMEM;
649 			}
650 			/* If we got a zero DMA address(can happen on
651 			 * certain platforms like PPC), reallocate.
652 			 * Store virtual address of page we don't want,
653 			 * to be freed later.
654 			 */
655 			if (!tmp_p) {
656 				mac_control->zerodma_virt_addr = tmp_v;
657 				DBG_PRINT(INIT_DBG,
658 					  "%s: Zero DMA address for TxDL. "
659 					  "Virtual address %p\n",
660 					  dev->name, tmp_v);
661 				tmp_v = dma_alloc_coherent(&nic->pdev->dev,
662 							   PAGE_SIZE, &tmp_p,
663 							   GFP_KERNEL);
664 				if (!tmp_v) {
665 					DBG_PRINT(INFO_DBG,
666 						  "dma_alloc_coherent failed for TxDL\n");
667 					return -ENOMEM;
668 				}
669 				mem_allocated += PAGE_SIZE;
670 			}
671 			while (k < lst_per_page) {
672 				int l = (j * lst_per_page) + k;
673 				if (l == tx_cfg->fifo_len)
674 					break;
675 				fifo->list_info[l].list_virt_addr =
676 					tmp_v + (k * lst_size);
677 				fifo->list_info[l].list_phy_addr =
678 					tmp_p + (k * lst_size);
679 				k++;
680 			}
681 		}
682 	}
683 
684 	for (i = 0; i < config->tx_fifo_num; i++) {
685 		struct fifo_info *fifo = &mac_control->fifos[i];
686 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
687 
688 		size = tx_cfg->fifo_len;
689 		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
690 		if (!fifo->ufo_in_band_v)
691 			return -ENOMEM;
692 		mem_allocated += (size * sizeof(u64));
693 	}
694 
695 	/* Allocation and initialization of RXDs in Rings */
696 	size = 0;
697 	for (i = 0; i < config->rx_ring_num; i++) {
698 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
699 		struct ring_info *ring = &mac_control->rings[i];
700 
701 		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
702 			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
703 				  "multiple of RxDs per Block\n",
704 				  dev->name, i);
705 			return FAILURE;
706 		}
707 		size += rx_cfg->num_rxd;
708 		ring->block_count = rx_cfg->num_rxd /
709 			(rxd_count[nic->rxd_mode] + 1);
710 		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
711 	}
712 	if (nic->rxd_mode == RXD_MODE_1)
713 		size = (size * (sizeof(struct RxD1)));
714 	else
715 		size = (size * (sizeof(struct RxD3)));
716 
717 	for (i = 0; i < config->rx_ring_num; i++) {
718 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
719 		struct ring_info *ring = &mac_control->rings[i];
720 
721 		ring->rx_curr_get_info.block_index = 0;
722 		ring->rx_curr_get_info.offset = 0;
723 		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
724 		ring->rx_curr_put_info.block_index = 0;
725 		ring->rx_curr_put_info.offset = 0;
726 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
727 		ring->nic = nic;
728 		ring->ring_no = i;
729 
730 		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
731 		/*  Allocating all the Rx blocks */
732 		for (j = 0; j < blk_cnt; j++) {
733 			struct rx_block_info *rx_blocks;
734 			int l;
735 
736 			rx_blocks = &ring->rx_blocks[j];
737 			size = SIZE_OF_BLOCK;	/* size is always page size */
738 			tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
739 							&tmp_p_addr, GFP_KERNEL);
740 			if (tmp_v_addr == NULL) {
741 				/*
742 				 * In case of failure, free_shared_mem()
743 				 * is called, which should free any
744 				 * memory that was alloced till the
745 				 * failure happened.
746 				 */
747 				rx_blocks->block_virt_addr = tmp_v_addr;
748 				return -ENOMEM;
749 			}
750 			mem_allocated += size;
751 
752 			size = sizeof(struct rxd_info) *
753 				rxd_count[nic->rxd_mode];
754 			rx_blocks->block_virt_addr = tmp_v_addr;
755 			rx_blocks->block_dma_addr = tmp_p_addr;
756 			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
757 			if (!rx_blocks->rxds)
758 				return -ENOMEM;
759 			mem_allocated += size;
760 			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
761 				rx_blocks->rxds[l].virt_addr =
762 					rx_blocks->block_virt_addr +
763 					(rxd_size[nic->rxd_mode] * l);
764 				rx_blocks->rxds[l].dma_addr =
765 					rx_blocks->block_dma_addr +
766 					(rxd_size[nic->rxd_mode] * l);
767 			}
768 		}
769 		/* Interlinking all Rx Blocks */
770 		for (j = 0; j < blk_cnt; j++) {
771 			int next = (j + 1) % blk_cnt;
772 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
773 			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
774 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
775 			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
776 
777 			pre_rxd_blk = tmp_v_addr;
778 			pre_rxd_blk->reserved_2_pNext_RxD_block =
779 				(unsigned long)tmp_v_addr_next;
780 			pre_rxd_blk->pNext_RxD_Blk_physical =
781 				(u64)tmp_p_addr_next;
782 		}
783 	}
784 	if (nic->rxd_mode == RXD_MODE_3B) {
785 		/*
786 		 * Allocation of Storages for buffer addresses in 2BUFF mode
787 		 * and the buffers as well.
788 		 */
789 		for (i = 0; i < config->rx_ring_num; i++) {
790 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
791 			struct ring_info *ring = &mac_control->rings[i];
792 
793 			blk_cnt = rx_cfg->num_rxd /
794 				(rxd_count[nic->rxd_mode] + 1);
795 			size = sizeof(struct buffAdd *) * blk_cnt;
796 			ring->ba = kmalloc(size, GFP_KERNEL);
797 			if (!ring->ba)
798 				return -ENOMEM;
799 			mem_allocated += size;
800 			for (j = 0; j < blk_cnt; j++) {
801 				int k = 0;
802 
803 				size = sizeof(struct buffAdd) *
804 					(rxd_count[nic->rxd_mode] + 1);
805 				ring->ba[j] = kmalloc(size, GFP_KERNEL);
806 				if (!ring->ba[j])
807 					return -ENOMEM;
808 				mem_allocated += size;
809 				while (k != rxd_count[nic->rxd_mode]) {
810 					ba = &ring->ba[j][k];
811 					size = BUF0_LEN + ALIGN_SIZE;
812 					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
813 					if (!ba->ba_0_org)
814 						return -ENOMEM;
815 					mem_allocated += size;
816 					tmp = (unsigned long)ba->ba_0_org;
817 					tmp += ALIGN_SIZE;
818 					tmp &= ~((unsigned long)ALIGN_SIZE);
819 					ba->ba_0 = (void *)tmp;
820 
821 					size = BUF1_LEN + ALIGN_SIZE;
822 					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
823 					if (!ba->ba_1_org)
824 						return -ENOMEM;
825 					mem_allocated += size;
826 					tmp = (unsigned long)ba->ba_1_org;
827 					tmp += ALIGN_SIZE;
828 					tmp &= ~((unsigned long)ALIGN_SIZE);
829 					ba->ba_1 = (void *)tmp;
830 					k++;
831 				}
832 			}
833 		}
834 	}
835 
836 	/* Allocation and initialization of Statistics block */
837 	size = sizeof(struct stat_block);
838 	mac_control->stats_mem =
839 		dma_alloc_coherent(&nic->pdev->dev, size,
840 				   &mac_control->stats_mem_phy, GFP_KERNEL);
841 
842 	if (!mac_control->stats_mem) {
843 		/*
844 		 * In case of failure, free_shared_mem() is called, which
845 		 * should free any memory that was alloced till the
846 		 * failure happened.
847 		 */
848 		return -ENOMEM;
849 	}
850 	mem_allocated += size;
851 	mac_control->stats_mem_sz = size;
852 
853 	tmp_v_addr = mac_control->stats_mem;
854 	mac_control->stats_info = tmp_v_addr;
855 	memset(tmp_v_addr, 0, size);
856 	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
857 		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
858 	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
859 	return SUCCESS;
860 }
861 
862 /**
863  * free_shared_mem - Free the allocated Memory
864  * @nic:  Device private variable.
865  * Description: This function is to free all memory locations allocated by
866  * the init_shared_mem() function and return it to the kernel.
867  */
868 
869 static void free_shared_mem(struct s2io_nic *nic)
870 {
871 	int i, j, blk_cnt, size;
872 	void *tmp_v_addr;
873 	dma_addr_t tmp_p_addr;
874 	int lst_size, lst_per_page;
875 	struct net_device *dev;
876 	int page_num = 0;
877 	struct config_param *config;
878 	struct mac_info *mac_control;
879 	struct stat_block *stats;
880 	struct swStat *swstats;
881 
882 	if (!nic)
883 		return;
884 
885 	dev = nic->dev;
886 
887 	config = &nic->config;
888 	mac_control = &nic->mac_control;
889 	stats = mac_control->stats_info;
890 	swstats = &stats->sw_stat;
891 
892 	lst_size = sizeof(struct TxD) * config->max_txds;
893 	lst_per_page = PAGE_SIZE / lst_size;
894 
895 	for (i = 0; i < config->tx_fifo_num; i++) {
896 		struct fifo_info *fifo = &mac_control->fifos[i];
897 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
898 
899 		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
900 		for (j = 0; j < page_num; j++) {
901 			int mem_blks = (j * lst_per_page);
902 			struct list_info_hold *fli;
903 
904 			if (!fifo->list_info)
905 				return;
906 
907 			fli = &fifo->list_info[mem_blks];
908 			if (!fli->list_virt_addr)
909 				break;
910 			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
911 					  fli->list_virt_addr,
912 					  fli->list_phy_addr);
913 			swstats->mem_freed += PAGE_SIZE;
914 		}
915 		/* If we got a zero DMA address during allocation,
916 		 * free the page now
917 		 */
918 		if (mac_control->zerodma_virt_addr) {
919 			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
920 					  mac_control->zerodma_virt_addr,
921 					  (dma_addr_t)0);
922 			DBG_PRINT(INIT_DBG,
923 				  "%s: Freeing TxDL with zero DMA address. "
924 				  "Virtual address %p\n",
925 				  dev->name, mac_control->zerodma_virt_addr);
926 			swstats->mem_freed += PAGE_SIZE;
927 		}
928 		kfree(fifo->list_info);
929 		swstats->mem_freed += tx_cfg->fifo_len *
930 			sizeof(struct list_info_hold);
931 	}
932 
933 	size = SIZE_OF_BLOCK;
934 	for (i = 0; i < config->rx_ring_num; i++) {
935 		struct ring_info *ring = &mac_control->rings[i];
936 
937 		blk_cnt = ring->block_count;
938 		for (j = 0; j < blk_cnt; j++) {
939 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
940 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
941 			if (tmp_v_addr == NULL)
942 				break;
943 			dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
944 					  tmp_p_addr);
945 			swstats->mem_freed += size;
946 			kfree(ring->rx_blocks[j].rxds);
947 			swstats->mem_freed += sizeof(struct rxd_info) *
948 				rxd_count[nic->rxd_mode];
949 		}
950 	}
951 
952 	if (nic->rxd_mode == RXD_MODE_3B) {
953 		/* Freeing buffer storage addresses in 2BUFF mode. */
954 		for (i = 0; i < config->rx_ring_num; i++) {
955 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
956 			struct ring_info *ring = &mac_control->rings[i];
957 
958 			blk_cnt = rx_cfg->num_rxd /
959 				(rxd_count[nic->rxd_mode] + 1);
960 			for (j = 0; j < blk_cnt; j++) {
961 				int k = 0;
962 				if (!ring->ba[j])
963 					continue;
964 				while (k != rxd_count[nic->rxd_mode]) {
965 					struct buffAdd *ba = &ring->ba[j][k];
966 					kfree(ba->ba_0_org);
967 					swstats->mem_freed +=
968 						BUF0_LEN + ALIGN_SIZE;
969 					kfree(ba->ba_1_org);
970 					swstats->mem_freed +=
971 						BUF1_LEN + ALIGN_SIZE;
972 					k++;
973 				}
974 				kfree(ring->ba[j]);
975 				swstats->mem_freed += sizeof(struct buffAdd) *
976 					(rxd_count[nic->rxd_mode] + 1);
977 			}
978 			kfree(ring->ba);
979 			swstats->mem_freed += sizeof(struct buffAdd *) *
980 				blk_cnt;
981 		}
982 	}
983 
984 	for (i = 0; i < nic->config.tx_fifo_num; i++) {
985 		struct fifo_info *fifo = &mac_control->fifos[i];
986 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
987 
988 		if (fifo->ufo_in_band_v) {
989 			swstats->mem_freed += tx_cfg->fifo_len *
990 				sizeof(u64);
991 			kfree(fifo->ufo_in_band_v);
992 		}
993 	}
994 
995 	if (mac_control->stats_mem) {
996 		swstats->mem_freed += mac_control->stats_mem_sz;
997 		dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
998 				  mac_control->stats_mem,
999 				  mac_control->stats_mem_phy);
1000 	}
1001 }
1002 
1003 /**
1004  * s2io_verify_pci_mode -
1005  */
1006 
1007 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1008 {
1009 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1010 	register u64 val64 = 0;
1011 	int     mode;
1012 
1013 	val64 = readq(&bar0->pci_mode);
1014 	mode = (u8)GET_PCI_MODE(val64);
1015 
1016 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1017 		return -1;      /* Unknown PCI mode */
1018 	return mode;
1019 }
1020 
1021 #define NEC_VENID   0x1033
1022 #define NEC_DEVID   0x0125
1023 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024 {
1025 	struct pci_dev *tdev = NULL;
1026 	for_each_pci_dev(tdev) {
1027 		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1028 			if (tdev->bus == s2io_pdev->bus->parent) {
1029 				pci_dev_put(tdev);
1030 				return 1;
1031 			}
1032 		}
1033 	}
1034 	return 0;
1035 }
1036 
1037 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1038 /**
1039  * s2io_print_pci_mode -
1040  */
1041 static int s2io_print_pci_mode(struct s2io_nic *nic)
1042 {
1043 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1044 	register u64 val64 = 0;
1045 	int	mode;
1046 	struct config_param *config = &nic->config;
1047 	const char *pcimode;
1048 
1049 	val64 = readq(&bar0->pci_mode);
1050 	mode = (u8)GET_PCI_MODE(val64);
1051 
1052 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1053 		return -1;	/* Unknown PCI mode */
1054 
1055 	config->bus_speed = bus_speed[mode];
1056 
1057 	if (s2io_on_nec_bridge(nic->pdev)) {
1058 		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1059 			  nic->dev->name);
1060 		return mode;
1061 	}
1062 
1063 	switch (mode) {
1064 	case PCI_MODE_PCI_33:
1065 		pcimode = "33MHz PCI bus";
1066 		break;
1067 	case PCI_MODE_PCI_66:
1068 		pcimode = "66MHz PCI bus";
1069 		break;
1070 	case PCI_MODE_PCIX_M1_66:
1071 		pcimode = "66MHz PCIX(M1) bus";
1072 		break;
1073 	case PCI_MODE_PCIX_M1_100:
1074 		pcimode = "100MHz PCIX(M1) bus";
1075 		break;
1076 	case PCI_MODE_PCIX_M1_133:
1077 		pcimode = "133MHz PCIX(M1) bus";
1078 		break;
1079 	case PCI_MODE_PCIX_M2_66:
1080 		pcimode = "133MHz PCIX(M2) bus";
1081 		break;
1082 	case PCI_MODE_PCIX_M2_100:
1083 		pcimode = "200MHz PCIX(M2) bus";
1084 		break;
1085 	case PCI_MODE_PCIX_M2_133:
1086 		pcimode = "266MHz PCIX(M2) bus";
1087 		break;
1088 	default:
1089 		pcimode = "unsupported bus!";
1090 		mode = -1;
1091 	}
1092 
1093 	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094 		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095 
1096 	return mode;
1097 }
1098 
1099 /**
1100  *  init_tti - Initialization transmit traffic interrupt scheme
1101  *  @nic: device private variable
1102  *  @link: link status (UP/DOWN) used to enable/disable continuous
1103  *  transmit interrupts
1104  *  Description: The function configures transmit traffic interrupts
1105  *  Return Value:  SUCCESS on success and
1106  *  '-1' on failure
1107  */
1108 
1109 static int init_tti(struct s2io_nic *nic, int link)
1110 {
1111 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1112 	register u64 val64 = 0;
1113 	int i;
1114 	struct config_param *config = &nic->config;
1115 
1116 	for (i = 0; i < config->tx_fifo_num; i++) {
1117 		/*
1118 		 * TTI Initialization. Default Tx timer gets us about
1119 		 * 250 interrupts per sec. Continuous interrupts are enabled
1120 		 * by default.
1121 		 */
1122 		if (nic->device_type == XFRAME_II_DEVICE) {
1123 			int count = (nic->config.bus_speed * 125)/2;
1124 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1125 		} else
1126 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1127 
1128 		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1129 			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1130 			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1131 			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1132 		if (i == 0)
1133 			if (use_continuous_tx_intrs && (link == LINK_UP))
1134 				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1135 		writeq(val64, &bar0->tti_data1_mem);
1136 
1137 		if (nic->config.intr_type == MSI_X) {
1138 			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1139 				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1140 				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1141 				TTI_DATA2_MEM_TX_UFC_D(0x300);
1142 		} else {
1143 			if ((nic->config.tx_steering_type ==
1144 			     TX_DEFAULT_STEERING) &&
1145 			    (config->tx_fifo_num > 1) &&
1146 			    (i >= nic->udp_fifo_idx) &&
1147 			    (i < (nic->udp_fifo_idx +
1148 				  nic->total_udp_fifos)))
1149 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1150 					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1151 					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1152 					TTI_DATA2_MEM_TX_UFC_D(0x120);
1153 			else
1154 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1155 					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1156 					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1157 					TTI_DATA2_MEM_TX_UFC_D(0x80);
1158 		}
1159 
1160 		writeq(val64, &bar0->tti_data2_mem);
1161 
1162 		val64 = TTI_CMD_MEM_WE |
1163 			TTI_CMD_MEM_STROBE_NEW_CMD |
1164 			TTI_CMD_MEM_OFFSET(i);
1165 		writeq(val64, &bar0->tti_command_mem);
1166 
1167 		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1168 					  TTI_CMD_MEM_STROBE_NEW_CMD,
1169 					  S2IO_BIT_RESET) != SUCCESS)
1170 			return FAILURE;
1171 	}
1172 
1173 	return SUCCESS;
1174 }
1175 
1176 /**
1177  *  init_nic - Initialization of hardware
1178  *  @nic: device private variable
1179  *  Description: The function sequentially configures every block
1180  *  of the H/W from their reset values.
1181  *  Return Value:  SUCCESS on success and
1182  *  '-1' on failure (endian settings incorrect).
1183  */
1184 
1185 static int init_nic(struct s2io_nic *nic)
1186 {
1187 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1188 	struct net_device *dev = nic->dev;
1189 	register u64 val64 = 0;
1190 	void __iomem *add;
1191 	u32 time;
1192 	int i, j;
1193 	int dtx_cnt = 0;
1194 	unsigned long long mem_share;
1195 	int mem_size;
1196 	struct config_param *config = &nic->config;
1197 	struct mac_info *mac_control = &nic->mac_control;
1198 
1199 	/* to set the swapper controle on the card */
1200 	if (s2io_set_swapper(nic)) {
1201 		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1202 		return -EIO;
1203 	}
1204 
1205 	/*
1206 	 * Herc requires EOI to be removed from reset before XGXS, so..
1207 	 */
1208 	if (nic->device_type & XFRAME_II_DEVICE) {
1209 		val64 = 0xA500000000ULL;
1210 		writeq(val64, &bar0->sw_reset);
1211 		msleep(500);
1212 		val64 = readq(&bar0->sw_reset);
1213 	}
1214 
1215 	/* Remove XGXS from reset state */
1216 	val64 = 0;
1217 	writeq(val64, &bar0->sw_reset);
1218 	msleep(500);
1219 	val64 = readq(&bar0->sw_reset);
1220 
1221 	/* Ensure that it's safe to access registers by checking
1222 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1223 	 */
1224 	if (nic->device_type == XFRAME_II_DEVICE) {
1225 		for (i = 0; i < 50; i++) {
1226 			val64 = readq(&bar0->adapter_status);
1227 			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1228 				break;
1229 			msleep(10);
1230 		}
1231 		if (i == 50)
1232 			return -ENODEV;
1233 	}
1234 
1235 	/*  Enable Receiving broadcasts */
1236 	add = &bar0->mac_cfg;
1237 	val64 = readq(&bar0->mac_cfg);
1238 	val64 |= MAC_RMAC_BCAST_ENABLE;
1239 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1240 	writel((u32)val64, add);
1241 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1242 	writel((u32) (val64 >> 32), (add + 4));
1243 
1244 	/* Read registers in all blocks */
1245 	val64 = readq(&bar0->mac_int_mask);
1246 	val64 = readq(&bar0->mc_int_mask);
1247 	val64 = readq(&bar0->xgxs_int_mask);
1248 
1249 	/*  Set MTU */
1250 	val64 = dev->mtu;
1251 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1252 
1253 	if (nic->device_type & XFRAME_II_DEVICE) {
1254 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1255 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1256 					  &bar0->dtx_control, UF);
1257 			if (dtx_cnt & 0x1)
1258 				msleep(1); /* Necessary!! */
1259 			dtx_cnt++;
1260 		}
1261 	} else {
1262 		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1263 			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1264 					  &bar0->dtx_control, UF);
1265 			val64 = readq(&bar0->dtx_control);
1266 			dtx_cnt++;
1267 		}
1268 	}
1269 
1270 	/*  Tx DMA Initialization */
1271 	val64 = 0;
1272 	writeq(val64, &bar0->tx_fifo_partition_0);
1273 	writeq(val64, &bar0->tx_fifo_partition_1);
1274 	writeq(val64, &bar0->tx_fifo_partition_2);
1275 	writeq(val64, &bar0->tx_fifo_partition_3);
1276 
1277 	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1278 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1279 
1280 		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1281 			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1282 
1283 		if (i == (config->tx_fifo_num - 1)) {
1284 			if (i % 2 == 0)
1285 				i++;
1286 		}
1287 
1288 		switch (i) {
1289 		case 1:
1290 			writeq(val64, &bar0->tx_fifo_partition_0);
1291 			val64 = 0;
1292 			j = 0;
1293 			break;
1294 		case 3:
1295 			writeq(val64, &bar0->tx_fifo_partition_1);
1296 			val64 = 0;
1297 			j = 0;
1298 			break;
1299 		case 5:
1300 			writeq(val64, &bar0->tx_fifo_partition_2);
1301 			val64 = 0;
1302 			j = 0;
1303 			break;
1304 		case 7:
1305 			writeq(val64, &bar0->tx_fifo_partition_3);
1306 			val64 = 0;
1307 			j = 0;
1308 			break;
1309 		default:
1310 			j++;
1311 			break;
1312 		}
1313 	}
1314 
1315 	/*
1316 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1317 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1318 	 */
1319 	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1320 		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1321 
1322 	val64 = readq(&bar0->tx_fifo_partition_0);
1323 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1324 		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1325 
1326 	/*
1327 	 * Initialization of Tx_PA_CONFIG register to ignore packet
1328 	 * integrity checking.
1329 	 */
1330 	val64 = readq(&bar0->tx_pa_cfg);
1331 	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1332 		TX_PA_CFG_IGNORE_SNAP_OUI |
1333 		TX_PA_CFG_IGNORE_LLC_CTRL |
1334 		TX_PA_CFG_IGNORE_L2_ERR;
1335 	writeq(val64, &bar0->tx_pa_cfg);
1336 
1337 	/* Rx DMA initialization. */
1338 	val64 = 0;
1339 	for (i = 0; i < config->rx_ring_num; i++) {
1340 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1341 
1342 		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1343 	}
1344 	writeq(val64, &bar0->rx_queue_priority);
1345 
1346 	/*
1347 	 * Allocating equal share of memory to all the
1348 	 * configured Rings.
1349 	 */
1350 	val64 = 0;
1351 	if (nic->device_type & XFRAME_II_DEVICE)
1352 		mem_size = 32;
1353 	else
1354 		mem_size = 64;
1355 
1356 	for (i = 0; i < config->rx_ring_num; i++) {
1357 		switch (i) {
1358 		case 0:
1359 			mem_share = (mem_size / config->rx_ring_num +
1360 				     mem_size % config->rx_ring_num);
1361 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1362 			continue;
1363 		case 1:
1364 			mem_share = (mem_size / config->rx_ring_num);
1365 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1366 			continue;
1367 		case 2:
1368 			mem_share = (mem_size / config->rx_ring_num);
1369 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1370 			continue;
1371 		case 3:
1372 			mem_share = (mem_size / config->rx_ring_num);
1373 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1374 			continue;
1375 		case 4:
1376 			mem_share = (mem_size / config->rx_ring_num);
1377 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1378 			continue;
1379 		case 5:
1380 			mem_share = (mem_size / config->rx_ring_num);
1381 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1382 			continue;
1383 		case 6:
1384 			mem_share = (mem_size / config->rx_ring_num);
1385 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1386 			continue;
1387 		case 7:
1388 			mem_share = (mem_size / config->rx_ring_num);
1389 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1390 			continue;
1391 		}
1392 	}
1393 	writeq(val64, &bar0->rx_queue_cfg);
1394 
1395 	/*
1396 	 * Filling Tx round robin registers
1397 	 * as per the number of FIFOs for equal scheduling priority
1398 	 */
1399 	switch (config->tx_fifo_num) {
1400 	case 1:
1401 		val64 = 0x0;
1402 		writeq(val64, &bar0->tx_w_round_robin_0);
1403 		writeq(val64, &bar0->tx_w_round_robin_1);
1404 		writeq(val64, &bar0->tx_w_round_robin_2);
1405 		writeq(val64, &bar0->tx_w_round_robin_3);
1406 		writeq(val64, &bar0->tx_w_round_robin_4);
1407 		break;
1408 	case 2:
1409 		val64 = 0x0001000100010001ULL;
1410 		writeq(val64, &bar0->tx_w_round_robin_0);
1411 		writeq(val64, &bar0->tx_w_round_robin_1);
1412 		writeq(val64, &bar0->tx_w_round_robin_2);
1413 		writeq(val64, &bar0->tx_w_round_robin_3);
1414 		val64 = 0x0001000100000000ULL;
1415 		writeq(val64, &bar0->tx_w_round_robin_4);
1416 		break;
1417 	case 3:
1418 		val64 = 0x0001020001020001ULL;
1419 		writeq(val64, &bar0->tx_w_round_robin_0);
1420 		val64 = 0x0200010200010200ULL;
1421 		writeq(val64, &bar0->tx_w_round_robin_1);
1422 		val64 = 0x0102000102000102ULL;
1423 		writeq(val64, &bar0->tx_w_round_robin_2);
1424 		val64 = 0x0001020001020001ULL;
1425 		writeq(val64, &bar0->tx_w_round_robin_3);
1426 		val64 = 0x0200010200000000ULL;
1427 		writeq(val64, &bar0->tx_w_round_robin_4);
1428 		break;
1429 	case 4:
1430 		val64 = 0x0001020300010203ULL;
1431 		writeq(val64, &bar0->tx_w_round_robin_0);
1432 		writeq(val64, &bar0->tx_w_round_robin_1);
1433 		writeq(val64, &bar0->tx_w_round_robin_2);
1434 		writeq(val64, &bar0->tx_w_round_robin_3);
1435 		val64 = 0x0001020300000000ULL;
1436 		writeq(val64, &bar0->tx_w_round_robin_4);
1437 		break;
1438 	case 5:
1439 		val64 = 0x0001020304000102ULL;
1440 		writeq(val64, &bar0->tx_w_round_robin_0);
1441 		val64 = 0x0304000102030400ULL;
1442 		writeq(val64, &bar0->tx_w_round_robin_1);
1443 		val64 = 0x0102030400010203ULL;
1444 		writeq(val64, &bar0->tx_w_round_robin_2);
1445 		val64 = 0x0400010203040001ULL;
1446 		writeq(val64, &bar0->tx_w_round_robin_3);
1447 		val64 = 0x0203040000000000ULL;
1448 		writeq(val64, &bar0->tx_w_round_robin_4);
1449 		break;
1450 	case 6:
1451 		val64 = 0x0001020304050001ULL;
1452 		writeq(val64, &bar0->tx_w_round_robin_0);
1453 		val64 = 0x0203040500010203ULL;
1454 		writeq(val64, &bar0->tx_w_round_robin_1);
1455 		val64 = 0x0405000102030405ULL;
1456 		writeq(val64, &bar0->tx_w_round_robin_2);
1457 		val64 = 0x0001020304050001ULL;
1458 		writeq(val64, &bar0->tx_w_round_robin_3);
1459 		val64 = 0x0203040500000000ULL;
1460 		writeq(val64, &bar0->tx_w_round_robin_4);
1461 		break;
1462 	case 7:
1463 		val64 = 0x0001020304050600ULL;
1464 		writeq(val64, &bar0->tx_w_round_robin_0);
1465 		val64 = 0x0102030405060001ULL;
1466 		writeq(val64, &bar0->tx_w_round_robin_1);
1467 		val64 = 0x0203040506000102ULL;
1468 		writeq(val64, &bar0->tx_w_round_robin_2);
1469 		val64 = 0x0304050600010203ULL;
1470 		writeq(val64, &bar0->tx_w_round_robin_3);
1471 		val64 = 0x0405060000000000ULL;
1472 		writeq(val64, &bar0->tx_w_round_robin_4);
1473 		break;
1474 	case 8:
1475 		val64 = 0x0001020304050607ULL;
1476 		writeq(val64, &bar0->tx_w_round_robin_0);
1477 		writeq(val64, &bar0->tx_w_round_robin_1);
1478 		writeq(val64, &bar0->tx_w_round_robin_2);
1479 		writeq(val64, &bar0->tx_w_round_robin_3);
1480 		val64 = 0x0001020300000000ULL;
1481 		writeq(val64, &bar0->tx_w_round_robin_4);
1482 		break;
1483 	}
1484 
1485 	/* Enable all configured Tx FIFO partitions */
1486 	val64 = readq(&bar0->tx_fifo_partition_0);
1487 	val64 |= (TX_FIFO_PARTITION_EN);
1488 	writeq(val64, &bar0->tx_fifo_partition_0);
1489 
1490 	/* Filling the Rx round robin registers as per the
1491 	 * number of Rings and steering based on QoS with
1492 	 * equal priority.
1493 	 */
1494 	switch (config->rx_ring_num) {
1495 	case 1:
1496 		val64 = 0x0;
1497 		writeq(val64, &bar0->rx_w_round_robin_0);
1498 		writeq(val64, &bar0->rx_w_round_robin_1);
1499 		writeq(val64, &bar0->rx_w_round_robin_2);
1500 		writeq(val64, &bar0->rx_w_round_robin_3);
1501 		writeq(val64, &bar0->rx_w_round_robin_4);
1502 
1503 		val64 = 0x8080808080808080ULL;
1504 		writeq(val64, &bar0->rts_qos_steering);
1505 		break;
1506 	case 2:
1507 		val64 = 0x0001000100010001ULL;
1508 		writeq(val64, &bar0->rx_w_round_robin_0);
1509 		writeq(val64, &bar0->rx_w_round_robin_1);
1510 		writeq(val64, &bar0->rx_w_round_robin_2);
1511 		writeq(val64, &bar0->rx_w_round_robin_3);
1512 		val64 = 0x0001000100000000ULL;
1513 		writeq(val64, &bar0->rx_w_round_robin_4);
1514 
1515 		val64 = 0x8080808040404040ULL;
1516 		writeq(val64, &bar0->rts_qos_steering);
1517 		break;
1518 	case 3:
1519 		val64 = 0x0001020001020001ULL;
1520 		writeq(val64, &bar0->rx_w_round_robin_0);
1521 		val64 = 0x0200010200010200ULL;
1522 		writeq(val64, &bar0->rx_w_round_robin_1);
1523 		val64 = 0x0102000102000102ULL;
1524 		writeq(val64, &bar0->rx_w_round_robin_2);
1525 		val64 = 0x0001020001020001ULL;
1526 		writeq(val64, &bar0->rx_w_round_robin_3);
1527 		val64 = 0x0200010200000000ULL;
1528 		writeq(val64, &bar0->rx_w_round_robin_4);
1529 
1530 		val64 = 0x8080804040402020ULL;
1531 		writeq(val64, &bar0->rts_qos_steering);
1532 		break;
1533 	case 4:
1534 		val64 = 0x0001020300010203ULL;
1535 		writeq(val64, &bar0->rx_w_round_robin_0);
1536 		writeq(val64, &bar0->rx_w_round_robin_1);
1537 		writeq(val64, &bar0->rx_w_round_robin_2);
1538 		writeq(val64, &bar0->rx_w_round_robin_3);
1539 		val64 = 0x0001020300000000ULL;
1540 		writeq(val64, &bar0->rx_w_round_robin_4);
1541 
1542 		val64 = 0x8080404020201010ULL;
1543 		writeq(val64, &bar0->rts_qos_steering);
1544 		break;
1545 	case 5:
1546 		val64 = 0x0001020304000102ULL;
1547 		writeq(val64, &bar0->rx_w_round_robin_0);
1548 		val64 = 0x0304000102030400ULL;
1549 		writeq(val64, &bar0->rx_w_round_robin_1);
1550 		val64 = 0x0102030400010203ULL;
1551 		writeq(val64, &bar0->rx_w_round_robin_2);
1552 		val64 = 0x0400010203040001ULL;
1553 		writeq(val64, &bar0->rx_w_round_robin_3);
1554 		val64 = 0x0203040000000000ULL;
1555 		writeq(val64, &bar0->rx_w_round_robin_4);
1556 
1557 		val64 = 0x8080404020201008ULL;
1558 		writeq(val64, &bar0->rts_qos_steering);
1559 		break;
1560 	case 6:
1561 		val64 = 0x0001020304050001ULL;
1562 		writeq(val64, &bar0->rx_w_round_robin_0);
1563 		val64 = 0x0203040500010203ULL;
1564 		writeq(val64, &bar0->rx_w_round_robin_1);
1565 		val64 = 0x0405000102030405ULL;
1566 		writeq(val64, &bar0->rx_w_round_robin_2);
1567 		val64 = 0x0001020304050001ULL;
1568 		writeq(val64, &bar0->rx_w_round_robin_3);
1569 		val64 = 0x0203040500000000ULL;
1570 		writeq(val64, &bar0->rx_w_round_robin_4);
1571 
1572 		val64 = 0x8080404020100804ULL;
1573 		writeq(val64, &bar0->rts_qos_steering);
1574 		break;
1575 	case 7:
1576 		val64 = 0x0001020304050600ULL;
1577 		writeq(val64, &bar0->rx_w_round_robin_0);
1578 		val64 = 0x0102030405060001ULL;
1579 		writeq(val64, &bar0->rx_w_round_robin_1);
1580 		val64 = 0x0203040506000102ULL;
1581 		writeq(val64, &bar0->rx_w_round_robin_2);
1582 		val64 = 0x0304050600010203ULL;
1583 		writeq(val64, &bar0->rx_w_round_robin_3);
1584 		val64 = 0x0405060000000000ULL;
1585 		writeq(val64, &bar0->rx_w_round_robin_4);
1586 
1587 		val64 = 0x8080402010080402ULL;
1588 		writeq(val64, &bar0->rts_qos_steering);
1589 		break;
1590 	case 8:
1591 		val64 = 0x0001020304050607ULL;
1592 		writeq(val64, &bar0->rx_w_round_robin_0);
1593 		writeq(val64, &bar0->rx_w_round_robin_1);
1594 		writeq(val64, &bar0->rx_w_round_robin_2);
1595 		writeq(val64, &bar0->rx_w_round_robin_3);
1596 		val64 = 0x0001020300000000ULL;
1597 		writeq(val64, &bar0->rx_w_round_robin_4);
1598 
1599 		val64 = 0x8040201008040201ULL;
1600 		writeq(val64, &bar0->rts_qos_steering);
1601 		break;
1602 	}
1603 
1604 	/* UDP Fix */
1605 	val64 = 0;
1606 	for (i = 0; i < 8; i++)
1607 		writeq(val64, &bar0->rts_frm_len_n[i]);
1608 
1609 	/* Set the default rts frame length for the rings configured */
1610 	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1611 	for (i = 0 ; i < config->rx_ring_num ; i++)
1612 		writeq(val64, &bar0->rts_frm_len_n[i]);
1613 
1614 	/* Set the frame length for the configured rings
1615 	 * desired by the user
1616 	 */
1617 	for (i = 0; i < config->rx_ring_num; i++) {
1618 		/* If rts_frm_len[i] == 0 then it is assumed that user not
1619 		 * specified frame length steering.
1620 		 * If the user provides the frame length then program
1621 		 * the rts_frm_len register for those values or else
1622 		 * leave it as it is.
1623 		 */
1624 		if (rts_frm_len[i] != 0) {
1625 			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1626 			       &bar0->rts_frm_len_n[i]);
1627 		}
1628 	}
1629 
1630 	/* Disable differentiated services steering logic */
1631 	for (i = 0; i < 64; i++) {
1632 		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1633 			DBG_PRINT(ERR_DBG,
1634 				  "%s: rts_ds_steer failed on codepoint %d\n",
1635 				  dev->name, i);
1636 			return -ENODEV;
1637 		}
1638 	}
1639 
1640 	/* Program statistics memory */
1641 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1642 
1643 	if (nic->device_type == XFRAME_II_DEVICE) {
1644 		val64 = STAT_BC(0x320);
1645 		writeq(val64, &bar0->stat_byte_cnt);
1646 	}
1647 
1648 	/*
1649 	 * Initializing the sampling rate for the device to calculate the
1650 	 * bandwidth utilization.
1651 	 */
1652 	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1653 		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1654 	writeq(val64, &bar0->mac_link_util);
1655 
1656 	/*
1657 	 * Initializing the Transmit and Receive Traffic Interrupt
1658 	 * Scheme.
1659 	 */
1660 
1661 	/* Initialize TTI */
1662 	if (SUCCESS != init_tti(nic, nic->last_link_state))
1663 		return -ENODEV;
1664 
1665 	/* RTI Initialization */
1666 	if (nic->device_type == XFRAME_II_DEVICE) {
1667 		/*
1668 		 * Programmed to generate Apprx 500 Intrs per
1669 		 * second
1670 		 */
1671 		int count = (nic->config.bus_speed * 125)/4;
1672 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1673 	} else
1674 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1675 	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1676 		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1677 		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1678 		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1679 
1680 	writeq(val64, &bar0->rti_data1_mem);
1681 
1682 	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1683 		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1684 	if (nic->config.intr_type == MSI_X)
1685 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1686 			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1687 	else
1688 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1689 			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1690 	writeq(val64, &bar0->rti_data2_mem);
1691 
1692 	for (i = 0; i < config->rx_ring_num; i++) {
1693 		val64 = RTI_CMD_MEM_WE |
1694 			RTI_CMD_MEM_STROBE_NEW_CMD |
1695 			RTI_CMD_MEM_OFFSET(i);
1696 		writeq(val64, &bar0->rti_command_mem);
1697 
1698 		/*
1699 		 * Once the operation completes, the Strobe bit of the
1700 		 * command register will be reset. We poll for this
1701 		 * particular condition. We wait for a maximum of 500ms
1702 		 * for the operation to complete, if it's not complete
1703 		 * by then we return error.
1704 		 */
1705 		time = 0;
1706 		while (true) {
1707 			val64 = readq(&bar0->rti_command_mem);
1708 			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1709 				break;
1710 
1711 			if (time > 10) {
1712 				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1713 					  dev->name);
1714 				return -ENODEV;
1715 			}
1716 			time++;
1717 			msleep(50);
1718 		}
1719 	}
1720 
1721 	/*
1722 	 * Initializing proper values as Pause threshold into all
1723 	 * the 8 Queues on Rx side.
1724 	 */
1725 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1726 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1727 
1728 	/* Disable RMAC PAD STRIPPING */
1729 	add = &bar0->mac_cfg;
1730 	val64 = readq(&bar0->mac_cfg);
1731 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1732 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1733 	writel((u32) (val64), add);
1734 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735 	writel((u32) (val64 >> 32), (add + 4));
1736 	val64 = readq(&bar0->mac_cfg);
1737 
1738 	/* Enable FCS stripping by adapter */
1739 	add = &bar0->mac_cfg;
1740 	val64 = readq(&bar0->mac_cfg);
1741 	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1742 	if (nic->device_type == XFRAME_II_DEVICE)
1743 		writeq(val64, &bar0->mac_cfg);
1744 	else {
1745 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1746 		writel((u32) (val64), add);
1747 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748 		writel((u32) (val64 >> 32), (add + 4));
1749 	}
1750 
1751 	/*
1752 	 * Set the time value to be inserted in the pause frame
1753 	 * generated by xena.
1754 	 */
1755 	val64 = readq(&bar0->rmac_pause_cfg);
1756 	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1757 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1758 	writeq(val64, &bar0->rmac_pause_cfg);
1759 
1760 	/*
1761 	 * Set the Threshold Limit for Generating the pause frame
1762 	 * If the amount of data in any Queue exceeds ratio of
1763 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1764 	 * pause frame is generated
1765 	 */
1766 	val64 = 0;
1767 	for (i = 0; i < 4; i++) {
1768 		val64 |= (((u64)0xFF00 |
1769 			   nic->mac_control.mc_pause_threshold_q0q3)
1770 			  << (i * 2 * 8));
1771 	}
1772 	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1773 
1774 	val64 = 0;
1775 	for (i = 0; i < 4; i++) {
1776 		val64 |= (((u64)0xFF00 |
1777 			   nic->mac_control.mc_pause_threshold_q4q7)
1778 			  << (i * 2 * 8));
1779 	}
1780 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1781 
1782 	/*
1783 	 * TxDMA will stop Read request if the number of read split has
1784 	 * exceeded the limit pointed by shared_splits
1785 	 */
1786 	val64 = readq(&bar0->pic_control);
1787 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1788 	writeq(val64, &bar0->pic_control);
1789 
1790 	if (nic->config.bus_speed == 266) {
1791 		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1792 		writeq(0x0, &bar0->read_retry_delay);
1793 		writeq(0x0, &bar0->write_retry_delay);
1794 	}
1795 
1796 	/*
1797 	 * Programming the Herc to split every write transaction
1798 	 * that does not start on an ADB to reduce disconnects.
1799 	 */
1800 	if (nic->device_type == XFRAME_II_DEVICE) {
1801 		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1802 			MISC_LINK_STABILITY_PRD(3);
1803 		writeq(val64, &bar0->misc_control);
1804 		val64 = readq(&bar0->pic_control2);
1805 		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1806 		writeq(val64, &bar0->pic_control2);
1807 	}
1808 	if (strstr(nic->product_name, "CX4")) {
1809 		val64 = TMAC_AVG_IPG(0x17);
1810 		writeq(val64, &bar0->tmac_avg_ipg);
1811 	}
1812 
1813 	return SUCCESS;
1814 }
1815 #define LINK_UP_DOWN_INTERRUPT		1
1816 #define MAC_RMAC_ERR_TIMER		2
1817 
1818 static int s2io_link_fault_indication(struct s2io_nic *nic)
1819 {
1820 	if (nic->device_type == XFRAME_II_DEVICE)
1821 		return LINK_UP_DOWN_INTERRUPT;
1822 	else
1823 		return MAC_RMAC_ERR_TIMER;
1824 }
1825 
1826 /**
1827  *  do_s2io_write_bits -  update alarm bits in alarm register
1828  *  @value: alarm bits
1829  *  @flag: interrupt status
1830  *  @addr: address value
1831  *  Description: update alarm bits in alarm register
1832  *  Return Value:
1833  *  NONE.
1834  */
1835 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1836 {
1837 	u64 temp64;
1838 
1839 	temp64 = readq(addr);
1840 
1841 	if (flag == ENABLE_INTRS)
1842 		temp64 &= ~((u64)value);
1843 	else
1844 		temp64 |= ((u64)value);
1845 	writeq(temp64, addr);
1846 }
1847 
1848 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1849 {
1850 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1851 	register u64 gen_int_mask = 0;
1852 	u64 interruptible;
1853 
1854 	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1855 	if (mask & TX_DMA_INTR) {
1856 		gen_int_mask |= TXDMA_INT_M;
1857 
1858 		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1859 				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1860 				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1861 				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1862 
1863 		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1864 				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1865 				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1866 				   &bar0->pfc_err_mask);
1867 
1868 		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1869 				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1870 				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1871 
1872 		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1873 				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1874 				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1875 				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1876 				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1877 				   PCC_TXB_ECC_SG_ERR,
1878 				   flag, &bar0->pcc_err_mask);
1879 
1880 		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1881 				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1882 
1883 		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1884 				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1885 				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1886 				   flag, &bar0->lso_err_mask);
1887 
1888 		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1889 				   flag, &bar0->tpa_err_mask);
1890 
1891 		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1892 	}
1893 
1894 	if (mask & TX_MAC_INTR) {
1895 		gen_int_mask |= TXMAC_INT_M;
1896 		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1897 				   &bar0->mac_int_mask);
1898 		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1899 				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1900 				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1901 				   flag, &bar0->mac_tmac_err_mask);
1902 	}
1903 
1904 	if (mask & TX_XGXS_INTR) {
1905 		gen_int_mask |= TXXGXS_INT_M;
1906 		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1907 				   &bar0->xgxs_int_mask);
1908 		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1909 				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1910 				   flag, &bar0->xgxs_txgxs_err_mask);
1911 	}
1912 
1913 	if (mask & RX_DMA_INTR) {
1914 		gen_int_mask |= RXDMA_INT_M;
1915 		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1916 				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1917 				   flag, &bar0->rxdma_int_mask);
1918 		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1919 				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1920 				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1921 				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1922 		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1923 				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1924 				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1925 				   &bar0->prc_pcix_err_mask);
1926 		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1927 				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1928 				   &bar0->rpa_err_mask);
1929 		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1930 				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1931 				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1932 				   RDA_FRM_ECC_SG_ERR |
1933 				   RDA_MISC_ERR|RDA_PCIX_ERR,
1934 				   flag, &bar0->rda_err_mask);
1935 		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1936 				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1937 				   flag, &bar0->rti_err_mask);
1938 	}
1939 
1940 	if (mask & RX_MAC_INTR) {
1941 		gen_int_mask |= RXMAC_INT_M;
1942 		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1943 				   &bar0->mac_int_mask);
1944 		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1945 				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1946 				 RMAC_DOUBLE_ECC_ERR);
1947 		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1948 			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1949 		do_s2io_write_bits(interruptible,
1950 				   flag, &bar0->mac_rmac_err_mask);
1951 	}
1952 
1953 	if (mask & RX_XGXS_INTR) {
1954 		gen_int_mask |= RXXGXS_INT_M;
1955 		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1956 				   &bar0->xgxs_int_mask);
1957 		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1958 				   &bar0->xgxs_rxgxs_err_mask);
1959 	}
1960 
1961 	if (mask & MC_INTR) {
1962 		gen_int_mask |= MC_INT_M;
1963 		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1964 				   flag, &bar0->mc_int_mask);
1965 		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1966 				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1967 				   &bar0->mc_err_mask);
1968 	}
1969 	nic->general_int_mask = gen_int_mask;
1970 
1971 	/* Remove this line when alarm interrupts are enabled */
1972 	nic->general_int_mask = 0;
1973 }
1974 
1975 /**
1976  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1977  *  @nic: device private variable,
1978  *  @mask: A mask indicating which Intr block must be modified and,
1979  *  @flag: A flag indicating whether to enable or disable the Intrs.
1980  *  Description: This function will either disable or enable the interrupts
1981  *  depending on the flag argument. The mask argument can be used to
1982  *  enable/disable any Intr block.
1983  *  Return Value: NONE.
1984  */
1985 
1986 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1987 {
1988 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1989 	register u64 temp64 = 0, intr_mask = 0;
1990 
1991 	intr_mask = nic->general_int_mask;
1992 
1993 	/*  Top level interrupt classification */
1994 	/*  PIC Interrupts */
1995 	if (mask & TX_PIC_INTR) {
1996 		/*  Enable PIC Intrs in the general intr mask register */
1997 		intr_mask |= TXPIC_INT_M;
1998 		if (flag == ENABLE_INTRS) {
1999 			/*
2000 			 * If Hercules adapter enable GPIO otherwise
2001 			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2002 			 * interrupts for now.
2003 			 * TODO
2004 			 */
2005 			if (s2io_link_fault_indication(nic) ==
2006 			    LINK_UP_DOWN_INTERRUPT) {
2007 				do_s2io_write_bits(PIC_INT_GPIO, flag,
2008 						   &bar0->pic_int_mask);
2009 				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2010 						   &bar0->gpio_int_mask);
2011 			} else
2012 				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2013 		} else if (flag == DISABLE_INTRS) {
2014 			/*
2015 			 * Disable PIC Intrs in the general
2016 			 * intr mask register
2017 			 */
2018 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2019 		}
2020 	}
2021 
2022 	/*  Tx traffic interrupts */
2023 	if (mask & TX_TRAFFIC_INTR) {
2024 		intr_mask |= TXTRAFFIC_INT_M;
2025 		if (flag == ENABLE_INTRS) {
2026 			/*
2027 			 * Enable all the Tx side interrupts
2028 			 * writing 0 Enables all 64 TX interrupt levels
2029 			 */
2030 			writeq(0x0, &bar0->tx_traffic_mask);
2031 		} else if (flag == DISABLE_INTRS) {
2032 			/*
2033 			 * Disable Tx Traffic Intrs in the general intr mask
2034 			 * register.
2035 			 */
2036 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2037 		}
2038 	}
2039 
2040 	/*  Rx traffic interrupts */
2041 	if (mask & RX_TRAFFIC_INTR) {
2042 		intr_mask |= RXTRAFFIC_INT_M;
2043 		if (flag == ENABLE_INTRS) {
2044 			/* writing 0 Enables all 8 RX interrupt levels */
2045 			writeq(0x0, &bar0->rx_traffic_mask);
2046 		} else if (flag == DISABLE_INTRS) {
2047 			/*
2048 			 * Disable Rx Traffic Intrs in the general intr mask
2049 			 * register.
2050 			 */
2051 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2052 		}
2053 	}
2054 
2055 	temp64 = readq(&bar0->general_int_mask);
2056 	if (flag == ENABLE_INTRS)
2057 		temp64 &= ~((u64)intr_mask);
2058 	else
2059 		temp64 = DISABLE_ALL_INTRS;
2060 	writeq(temp64, &bar0->general_int_mask);
2061 
2062 	nic->general_int_mask = readq(&bar0->general_int_mask);
2063 }
2064 
2065 /**
2066  *  verify_pcc_quiescent- Checks for PCC quiescent state
2067  *  Return: 1 If PCC is quiescence
2068  *          0 If PCC is not quiescence
2069  */
2070 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2071 {
2072 	int ret = 0, herc;
2073 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2074 	u64 val64 = readq(&bar0->adapter_status);
2075 
2076 	herc = (sp->device_type == XFRAME_II_DEVICE);
2077 
2078 	if (flag == false) {
2079 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2080 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2081 				ret = 1;
2082 		} else {
2083 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2084 				ret = 1;
2085 		}
2086 	} else {
2087 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2088 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2089 			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2090 				ret = 1;
2091 		} else {
2092 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2093 			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2094 				ret = 1;
2095 		}
2096 	}
2097 
2098 	return ret;
2099 }
2100 /**
2101  *  verify_xena_quiescence - Checks whether the H/W is ready
2102  *  Description: Returns whether the H/W is ready to go or not. Depending
2103  *  on whether adapter enable bit was written or not the comparison
2104  *  differs and the calling function passes the input argument flag to
2105  *  indicate this.
2106  *  Return: 1 If xena is quiescence
2107  *          0 If Xena is not quiescence
2108  */
2109 
2110 static int verify_xena_quiescence(struct s2io_nic *sp)
2111 {
2112 	int  mode;
2113 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2114 	u64 val64 = readq(&bar0->adapter_status);
2115 	mode = s2io_verify_pci_mode(sp);
2116 
2117 	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2118 		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2119 		return 0;
2120 	}
2121 	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2122 		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2123 		return 0;
2124 	}
2125 	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2126 		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2127 		return 0;
2128 	}
2129 	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2130 		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2131 		return 0;
2132 	}
2133 	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2134 		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2135 		return 0;
2136 	}
2137 	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2138 		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2139 		return 0;
2140 	}
2141 	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2142 		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2143 		return 0;
2144 	}
2145 	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2146 		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2147 		return 0;
2148 	}
2149 
2150 	/*
2151 	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2152 	 * the the P_PLL_LOCK bit in the adapter_status register will
2153 	 * not be asserted.
2154 	 */
2155 	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2156 	    sp->device_type == XFRAME_II_DEVICE &&
2157 	    mode != PCI_MODE_PCI_33) {
2158 		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2159 		return 0;
2160 	}
2161 	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2162 	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2163 		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2164 		return 0;
2165 	}
2166 	return 1;
2167 }
2168 
2169 /**
2170  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2171  * @sp: Pointer to device specifc structure
2172  * Description :
2173  * New procedure to clear mac address reading  problems on Alpha platforms
2174  *
2175  */
2176 
2177 static void fix_mac_address(struct s2io_nic *sp)
2178 {
2179 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2180 	int i = 0;
2181 
2182 	while (fix_mac[i] != END_SIGN) {
2183 		writeq(fix_mac[i++], &bar0->gpio_control);
2184 		udelay(10);
2185 		(void) readq(&bar0->gpio_control);
2186 	}
2187 }
2188 
2189 /**
2190  *  start_nic - Turns the device on
2191  *  @nic : device private variable.
2192  *  Description:
2193  *  This function actually turns the device on. Before this  function is
2194  *  called,all Registers are configured from their reset states
2195  *  and shared memory is allocated but the NIC is still quiescent. On
2196  *  calling this function, the device interrupts are cleared and the NIC is
2197  *  literally switched on by writing into the adapter control register.
2198  *  Return Value:
2199  *  SUCCESS on success and -1 on failure.
2200  */
2201 
2202 static int start_nic(struct s2io_nic *nic)
2203 {
2204 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2205 	struct net_device *dev = nic->dev;
2206 	register u64 val64 = 0;
2207 	u16 subid, i;
2208 	struct config_param *config = &nic->config;
2209 	struct mac_info *mac_control = &nic->mac_control;
2210 
2211 	/*  PRC Initialization and configuration */
2212 	for (i = 0; i < config->rx_ring_num; i++) {
2213 		struct ring_info *ring = &mac_control->rings[i];
2214 
2215 		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2216 		       &bar0->prc_rxd0_n[i]);
2217 
2218 		val64 = readq(&bar0->prc_ctrl_n[i]);
2219 		if (nic->rxd_mode == RXD_MODE_1)
2220 			val64 |= PRC_CTRL_RC_ENABLED;
2221 		else
2222 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2223 		if (nic->device_type == XFRAME_II_DEVICE)
2224 			val64 |= PRC_CTRL_GROUP_READS;
2225 		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2226 		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2227 		writeq(val64, &bar0->prc_ctrl_n[i]);
2228 	}
2229 
2230 	if (nic->rxd_mode == RXD_MODE_3B) {
2231 		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2232 		val64 = readq(&bar0->rx_pa_cfg);
2233 		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2234 		writeq(val64, &bar0->rx_pa_cfg);
2235 	}
2236 
2237 	if (vlan_tag_strip == 0) {
2238 		val64 = readq(&bar0->rx_pa_cfg);
2239 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2240 		writeq(val64, &bar0->rx_pa_cfg);
2241 		nic->vlan_strip_flag = 0;
2242 	}
2243 
2244 	/*
2245 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2246 	 * for around 100ms, which is approximately the time required
2247 	 * for the device to be ready for operation.
2248 	 */
2249 	val64 = readq(&bar0->mc_rldram_mrs);
2250 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2251 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2252 	val64 = readq(&bar0->mc_rldram_mrs);
2253 
2254 	msleep(100);	/* Delay by around 100 ms. */
2255 
2256 	/* Enabling ECC Protection. */
2257 	val64 = readq(&bar0->adapter_control);
2258 	val64 &= ~ADAPTER_ECC_EN;
2259 	writeq(val64, &bar0->adapter_control);
2260 
2261 	/*
2262 	 * Verify if the device is ready to be enabled, if so enable
2263 	 * it.
2264 	 */
2265 	val64 = readq(&bar0->adapter_status);
2266 	if (!verify_xena_quiescence(nic)) {
2267 		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2268 			  "Adapter status reads: 0x%llx\n",
2269 			  dev->name, (unsigned long long)val64);
2270 		return FAILURE;
2271 	}
2272 
2273 	/*
2274 	 * With some switches, link might be already up at this point.
2275 	 * Because of this weird behavior, when we enable laser,
2276 	 * we may not get link. We need to handle this. We cannot
2277 	 * figure out which switch is misbehaving. So we are forced to
2278 	 * make a global change.
2279 	 */
2280 
2281 	/* Enabling Laser. */
2282 	val64 = readq(&bar0->adapter_control);
2283 	val64 |= ADAPTER_EOI_TX_ON;
2284 	writeq(val64, &bar0->adapter_control);
2285 
2286 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2287 		/*
2288 		 * Dont see link state interrupts initially on some switches,
2289 		 * so directly scheduling the link state task here.
2290 		 */
2291 		schedule_work(&nic->set_link_task);
2292 	}
2293 	/* SXE-002: Initialize link and activity LED */
2294 	subid = nic->pdev->subsystem_device;
2295 	if (((subid & 0xFF) >= 0x07) &&
2296 	    (nic->device_type == XFRAME_I_DEVICE)) {
2297 		val64 = readq(&bar0->gpio_control);
2298 		val64 |= 0x0000800000000000ULL;
2299 		writeq(val64, &bar0->gpio_control);
2300 		val64 = 0x0411040400000000ULL;
2301 		writeq(val64, (void __iomem *)bar0 + 0x2700);
2302 	}
2303 
2304 	return SUCCESS;
2305 }
2306 /**
2307  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2308  */
2309 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2310 					struct TxD *txdlp, int get_off)
2311 {
2312 	struct s2io_nic *nic = fifo_data->nic;
2313 	struct sk_buff *skb;
2314 	struct TxD *txds;
2315 	u16 j, frg_cnt;
2316 
2317 	txds = txdlp;
2318 	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2319 		dma_unmap_single(&nic->pdev->dev,
2320 				 (dma_addr_t)txds->Buffer_Pointer,
2321 				 sizeof(u64), DMA_TO_DEVICE);
2322 		txds++;
2323 	}
2324 
2325 	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2326 	if (!skb) {
2327 		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2328 		return NULL;
2329 	}
2330 	dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
2331 			 skb_headlen(skb), DMA_TO_DEVICE);
2332 	frg_cnt = skb_shinfo(skb)->nr_frags;
2333 	if (frg_cnt) {
2334 		txds++;
2335 		for (j = 0; j < frg_cnt; j++, txds++) {
2336 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2337 			if (!txds->Buffer_Pointer)
2338 				break;
2339 			dma_unmap_page(&nic->pdev->dev,
2340 				       (dma_addr_t)txds->Buffer_Pointer,
2341 				       skb_frag_size(frag), DMA_TO_DEVICE);
2342 		}
2343 	}
2344 	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2345 	return skb;
2346 }
2347 
2348 /**
2349  *  free_tx_buffers - Free all queued Tx buffers
2350  *  @nic : device private variable.
2351  *  Description:
2352  *  Free all queued Tx buffers.
2353  *  Return Value: void
2354  */
2355 
2356 static void free_tx_buffers(struct s2io_nic *nic)
2357 {
2358 	struct net_device *dev = nic->dev;
2359 	struct sk_buff *skb;
2360 	struct TxD *txdp;
2361 	int i, j;
2362 	int cnt = 0;
2363 	struct config_param *config = &nic->config;
2364 	struct mac_info *mac_control = &nic->mac_control;
2365 	struct stat_block *stats = mac_control->stats_info;
2366 	struct swStat *swstats = &stats->sw_stat;
2367 
2368 	for (i = 0; i < config->tx_fifo_num; i++) {
2369 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2370 		struct fifo_info *fifo = &mac_control->fifos[i];
2371 		unsigned long flags;
2372 
2373 		spin_lock_irqsave(&fifo->tx_lock, flags);
2374 		for (j = 0; j < tx_cfg->fifo_len; j++) {
2375 			txdp = fifo->list_info[j].list_virt_addr;
2376 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2377 			if (skb) {
2378 				swstats->mem_freed += skb->truesize;
2379 				dev_kfree_skb(skb);
2380 				cnt++;
2381 			}
2382 		}
2383 		DBG_PRINT(INTR_DBG,
2384 			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2385 			  dev->name, cnt, i);
2386 		fifo->tx_curr_get_info.offset = 0;
2387 		fifo->tx_curr_put_info.offset = 0;
2388 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2389 	}
2390 }
2391 
2392 /**
2393  *   stop_nic -  To stop the nic
2394  *   @nic ; device private variable.
2395  *   Description:
2396  *   This function does exactly the opposite of what the start_nic()
2397  *   function does. This function is called to stop the device.
2398  *   Return Value:
2399  *   void.
2400  */
2401 
2402 static void stop_nic(struct s2io_nic *nic)
2403 {
2404 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2405 	register u64 val64 = 0;
2406 	u16 interruptible;
2407 
2408 	/*  Disable all interrupts */
2409 	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2410 	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2411 	interruptible |= TX_PIC_INTR;
2412 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2413 
2414 	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2415 	val64 = readq(&bar0->adapter_control);
2416 	val64 &= ~(ADAPTER_CNTL_EN);
2417 	writeq(val64, &bar0->adapter_control);
2418 }
2419 
2420 /**
2421  *  fill_rx_buffers - Allocates the Rx side skbs
2422  *  @ring_info: per ring structure
2423  *  @from_card_up: If this is true, we will map the buffer to get
2424  *     the dma address for buf0 and buf1 to give it to the card.
2425  *     Else we will sync the already mapped buffer to give it to the card.
2426  *  Description:
2427  *  The function allocates Rx side skbs and puts the physical
2428  *  address of these buffers into the RxD buffer pointers, so that the NIC
2429  *  can DMA the received frame into these locations.
2430  *  The NIC supports 3 receive modes, viz
2431  *  1. single buffer,
2432  *  2. three buffer and
2433  *  3. Five buffer modes.
2434  *  Each mode defines how many fragments the received frame will be split
2435  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2436  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2437  *  is split into 3 fragments. As of now only single buffer mode is
2438  *  supported.
2439  *   Return Value:
2440  *  SUCCESS on success or an appropriate -ve value on failure.
2441  */
2442 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2443 			   int from_card_up)
2444 {
2445 	struct sk_buff *skb;
2446 	struct RxD_t *rxdp;
2447 	int off, size, block_no, block_no1;
2448 	u32 alloc_tab = 0;
2449 	u32 alloc_cnt;
2450 	u64 tmp;
2451 	struct buffAdd *ba;
2452 	struct RxD_t *first_rxdp = NULL;
2453 	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2454 	struct RxD1 *rxdp1;
2455 	struct RxD3 *rxdp3;
2456 	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2457 
2458 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2459 
2460 	block_no1 = ring->rx_curr_get_info.block_index;
2461 	while (alloc_tab < alloc_cnt) {
2462 		block_no = ring->rx_curr_put_info.block_index;
2463 
2464 		off = ring->rx_curr_put_info.offset;
2465 
2466 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2467 
2468 		if ((block_no == block_no1) &&
2469 		    (off == ring->rx_curr_get_info.offset) &&
2470 		    (rxdp->Host_Control)) {
2471 			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2472 				  ring->dev->name);
2473 			goto end;
2474 		}
2475 		if (off && (off == ring->rxd_count)) {
2476 			ring->rx_curr_put_info.block_index++;
2477 			if (ring->rx_curr_put_info.block_index ==
2478 			    ring->block_count)
2479 				ring->rx_curr_put_info.block_index = 0;
2480 			block_no = ring->rx_curr_put_info.block_index;
2481 			off = 0;
2482 			ring->rx_curr_put_info.offset = off;
2483 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2484 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2485 				  ring->dev->name, rxdp);
2486 
2487 		}
2488 
2489 		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2490 		    ((ring->rxd_mode == RXD_MODE_3B) &&
2491 		     (rxdp->Control_2 & s2BIT(0)))) {
2492 			ring->rx_curr_put_info.offset = off;
2493 			goto end;
2494 		}
2495 		/* calculate size of skb based on ring mode */
2496 		size = ring->mtu +
2497 			HEADER_ETHERNET_II_802_3_SIZE +
2498 			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2499 		if (ring->rxd_mode == RXD_MODE_1)
2500 			size += NET_IP_ALIGN;
2501 		else
2502 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2503 
2504 		/* allocate skb */
2505 		skb = netdev_alloc_skb(nic->dev, size);
2506 		if (!skb) {
2507 			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2508 				  ring->dev->name);
2509 			if (first_rxdp) {
2510 				dma_wmb();
2511 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2512 			}
2513 			swstats->mem_alloc_fail_cnt++;
2514 
2515 			return -ENOMEM ;
2516 		}
2517 		swstats->mem_allocated += skb->truesize;
2518 
2519 		if (ring->rxd_mode == RXD_MODE_1) {
2520 			/* 1 buffer mode - normal operation mode */
2521 			rxdp1 = (struct RxD1 *)rxdp;
2522 			memset(rxdp, 0, sizeof(struct RxD1));
2523 			skb_reserve(skb, NET_IP_ALIGN);
2524 			rxdp1->Buffer0_ptr =
2525 				dma_map_single(&ring->pdev->dev, skb->data,
2526 					       size - NET_IP_ALIGN,
2527 					       DMA_FROM_DEVICE);
2528 			if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
2529 				goto pci_map_failed;
2530 
2531 			rxdp->Control_2 =
2532 				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2533 			rxdp->Host_Control = (unsigned long)skb;
2534 		} else if (ring->rxd_mode == RXD_MODE_3B) {
2535 			/*
2536 			 * 2 buffer mode -
2537 			 * 2 buffer mode provides 128
2538 			 * byte aligned receive buffers.
2539 			 */
2540 
2541 			rxdp3 = (struct RxD3 *)rxdp;
2542 			/* save buffer pointers to avoid frequent dma mapping */
2543 			Buffer0_ptr = rxdp3->Buffer0_ptr;
2544 			Buffer1_ptr = rxdp3->Buffer1_ptr;
2545 			memset(rxdp, 0, sizeof(struct RxD3));
2546 			/* restore the buffer pointers for dma sync*/
2547 			rxdp3->Buffer0_ptr = Buffer0_ptr;
2548 			rxdp3->Buffer1_ptr = Buffer1_ptr;
2549 
2550 			ba = &ring->ba[block_no][off];
2551 			skb_reserve(skb, BUF0_LEN);
2552 			tmp = (u64)(unsigned long)skb->data;
2553 			tmp += ALIGN_SIZE;
2554 			tmp &= ~ALIGN_SIZE;
2555 			skb->data = (void *) (unsigned long)tmp;
2556 			skb_reset_tail_pointer(skb);
2557 
2558 			if (from_card_up) {
2559 				rxdp3->Buffer0_ptr =
2560 					dma_map_single(&ring->pdev->dev,
2561 						       ba->ba_0, BUF0_LEN,
2562 						       DMA_FROM_DEVICE);
2563 				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
2564 					goto pci_map_failed;
2565 			} else
2566 				dma_sync_single_for_device(&ring->pdev->dev,
2567 							   (dma_addr_t)rxdp3->Buffer0_ptr,
2568 							   BUF0_LEN,
2569 							   DMA_FROM_DEVICE);
2570 
2571 			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2572 			if (ring->rxd_mode == RXD_MODE_3B) {
2573 				/* Two buffer mode */
2574 
2575 				/*
2576 				 * Buffer2 will have L3/L4 header plus
2577 				 * L4 payload
2578 				 */
2579 				rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
2580 								    skb->data,
2581 								    ring->mtu + 4,
2582 								    DMA_FROM_DEVICE);
2583 
2584 				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
2585 					goto pci_map_failed;
2586 
2587 				if (from_card_up) {
2588 					rxdp3->Buffer1_ptr =
2589 						dma_map_single(&ring->pdev->dev,
2590 							       ba->ba_1,
2591 							       BUF1_LEN,
2592 							       DMA_FROM_DEVICE);
2593 
2594 					if (dma_mapping_error(&nic->pdev->dev,
2595 							      rxdp3->Buffer1_ptr)) {
2596 						dma_unmap_single(&ring->pdev->dev,
2597 								 (dma_addr_t)(unsigned long)
2598 								 skb->data,
2599 								 ring->mtu + 4,
2600 								 DMA_FROM_DEVICE);
2601 						goto pci_map_failed;
2602 					}
2603 				}
2604 				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2605 				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2606 					(ring->mtu + 4);
2607 			}
2608 			rxdp->Control_2 |= s2BIT(0);
2609 			rxdp->Host_Control = (unsigned long) (skb);
2610 		}
2611 		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2612 			rxdp->Control_1 |= RXD_OWN_XENA;
2613 		off++;
2614 		if (off == (ring->rxd_count + 1))
2615 			off = 0;
2616 		ring->rx_curr_put_info.offset = off;
2617 
2618 		rxdp->Control_2 |= SET_RXD_MARKER;
2619 		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2620 			if (first_rxdp) {
2621 				dma_wmb();
2622 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2623 			}
2624 			first_rxdp = rxdp;
2625 		}
2626 		ring->rx_bufs_left += 1;
2627 		alloc_tab++;
2628 	}
2629 
2630 end:
2631 	/* Transfer ownership of first descriptor to adapter just before
2632 	 * exiting. Before that, use memory barrier so that ownership
2633 	 * and other fields are seen by adapter correctly.
2634 	 */
2635 	if (first_rxdp) {
2636 		dma_wmb();
2637 		first_rxdp->Control_1 |= RXD_OWN_XENA;
2638 	}
2639 
2640 	return SUCCESS;
2641 
2642 pci_map_failed:
2643 	swstats->pci_map_fail_cnt++;
2644 	swstats->mem_freed += skb->truesize;
2645 	dev_kfree_skb_irq(skb);
2646 	return -ENOMEM;
2647 }
2648 
2649 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2650 {
2651 	struct net_device *dev = sp->dev;
2652 	int j;
2653 	struct sk_buff *skb;
2654 	struct RxD_t *rxdp;
2655 	struct RxD1 *rxdp1;
2656 	struct RxD3 *rxdp3;
2657 	struct mac_info *mac_control = &sp->mac_control;
2658 	struct stat_block *stats = mac_control->stats_info;
2659 	struct swStat *swstats = &stats->sw_stat;
2660 
2661 	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2662 		rxdp = mac_control->rings[ring_no].
2663 			rx_blocks[blk].rxds[j].virt_addr;
2664 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2665 		if (!skb)
2666 			continue;
2667 		if (sp->rxd_mode == RXD_MODE_1) {
2668 			rxdp1 = (struct RxD1 *)rxdp;
2669 			dma_unmap_single(&sp->pdev->dev,
2670 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2671 					 dev->mtu +
2672 					 HEADER_ETHERNET_II_802_3_SIZE +
2673 					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2674 					 DMA_FROM_DEVICE);
2675 			memset(rxdp, 0, sizeof(struct RxD1));
2676 		} else if (sp->rxd_mode == RXD_MODE_3B) {
2677 			rxdp3 = (struct RxD3 *)rxdp;
2678 			dma_unmap_single(&sp->pdev->dev,
2679 					 (dma_addr_t)rxdp3->Buffer0_ptr,
2680 					 BUF0_LEN, DMA_FROM_DEVICE);
2681 			dma_unmap_single(&sp->pdev->dev,
2682 					 (dma_addr_t)rxdp3->Buffer1_ptr,
2683 					 BUF1_LEN, DMA_FROM_DEVICE);
2684 			dma_unmap_single(&sp->pdev->dev,
2685 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2686 					 dev->mtu + 4, DMA_FROM_DEVICE);
2687 			memset(rxdp, 0, sizeof(struct RxD3));
2688 		}
2689 		swstats->mem_freed += skb->truesize;
2690 		dev_kfree_skb(skb);
2691 		mac_control->rings[ring_no].rx_bufs_left -= 1;
2692 	}
2693 }
2694 
2695 /**
2696  *  free_rx_buffers - Frees all Rx buffers
2697  *  @sp: device private variable.
2698  *  Description:
2699  *  This function will free all Rx buffers allocated by host.
2700  *  Return Value:
2701  *  NONE.
2702  */
2703 
2704 static void free_rx_buffers(struct s2io_nic *sp)
2705 {
2706 	struct net_device *dev = sp->dev;
2707 	int i, blk = 0, buf_cnt = 0;
2708 	struct config_param *config = &sp->config;
2709 	struct mac_info *mac_control = &sp->mac_control;
2710 
2711 	for (i = 0; i < config->rx_ring_num; i++) {
2712 		struct ring_info *ring = &mac_control->rings[i];
2713 
2714 		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2715 			free_rxd_blk(sp, i, blk);
2716 
2717 		ring->rx_curr_put_info.block_index = 0;
2718 		ring->rx_curr_get_info.block_index = 0;
2719 		ring->rx_curr_put_info.offset = 0;
2720 		ring->rx_curr_get_info.offset = 0;
2721 		ring->rx_bufs_left = 0;
2722 		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2723 			  dev->name, buf_cnt, i);
2724 	}
2725 }
2726 
2727 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2728 {
2729 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2730 		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2731 			  ring->dev->name);
2732 	}
2733 	return 0;
2734 }
2735 
2736 /**
2737  * s2io_poll - Rx interrupt handler for NAPI support
2738  * @napi : pointer to the napi structure.
2739  * @budget : The number of packets that were budgeted to be processed
2740  * during  one pass through the 'Poll" function.
2741  * Description:
2742  * Comes into picture only if NAPI support has been incorporated. It does
2743  * the same thing that rx_intr_handler does, but not in a interrupt context
2744  * also It will process only a given number of packets.
2745  * Return value:
2746  * 0 on success and 1 if there are No Rx packets to be processed.
2747  */
2748 
2749 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2750 {
2751 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2752 	struct net_device *dev = ring->dev;
2753 	int pkts_processed = 0;
2754 	u8 __iomem *addr = NULL;
2755 	u8 val8 = 0;
2756 	struct s2io_nic *nic = netdev_priv(dev);
2757 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2758 	int budget_org = budget;
2759 
2760 	if (unlikely(!is_s2io_card_up(nic)))
2761 		return 0;
2762 
2763 	pkts_processed = rx_intr_handler(ring, budget);
2764 	s2io_chk_rx_buffers(nic, ring);
2765 
2766 	if (pkts_processed < budget_org) {
2767 		napi_complete_done(napi, pkts_processed);
2768 		/*Re Enable MSI-Rx Vector*/
2769 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2770 		addr += 7 - ring->ring_no;
2771 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2772 		writeb(val8, addr);
2773 		val8 = readb(addr);
2774 	}
2775 	return pkts_processed;
2776 }
2777 
2778 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2779 {
2780 	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2781 	int pkts_processed = 0;
2782 	int ring_pkts_processed, i;
2783 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2784 	int budget_org = budget;
2785 	struct config_param *config = &nic->config;
2786 	struct mac_info *mac_control = &nic->mac_control;
2787 
2788 	if (unlikely(!is_s2io_card_up(nic)))
2789 		return 0;
2790 
2791 	for (i = 0; i < config->rx_ring_num; i++) {
2792 		struct ring_info *ring = &mac_control->rings[i];
2793 		ring_pkts_processed = rx_intr_handler(ring, budget);
2794 		s2io_chk_rx_buffers(nic, ring);
2795 		pkts_processed += ring_pkts_processed;
2796 		budget -= ring_pkts_processed;
2797 		if (budget <= 0)
2798 			break;
2799 	}
2800 	if (pkts_processed < budget_org) {
2801 		napi_complete_done(napi, pkts_processed);
2802 		/* Re enable the Rx interrupts for the ring */
2803 		writeq(0, &bar0->rx_traffic_mask);
2804 		readl(&bar0->rx_traffic_mask);
2805 	}
2806 	return pkts_processed;
2807 }
2808 
2809 #ifdef CONFIG_NET_POLL_CONTROLLER
2810 /**
2811  * s2io_netpoll - netpoll event handler entry point
2812  * @dev : pointer to the device structure.
2813  * Description:
2814  * 	This function will be called by upper layer to check for events on the
2815  * interface in situations where interrupts are disabled. It is used for
2816  * specific in-kernel networking tasks, such as remote consoles and kernel
2817  * debugging over the network (example netdump in RedHat).
2818  */
2819 static void s2io_netpoll(struct net_device *dev)
2820 {
2821 	struct s2io_nic *nic = netdev_priv(dev);
2822 	const int irq = nic->pdev->irq;
2823 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2824 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2825 	int i;
2826 	struct config_param *config = &nic->config;
2827 	struct mac_info *mac_control = &nic->mac_control;
2828 
2829 	if (pci_channel_offline(nic->pdev))
2830 		return;
2831 
2832 	disable_irq(irq);
2833 
2834 	writeq(val64, &bar0->rx_traffic_int);
2835 	writeq(val64, &bar0->tx_traffic_int);
2836 
2837 	/* we need to free up the transmitted skbufs or else netpoll will
2838 	 * run out of skbs and will fail and eventually netpoll application such
2839 	 * as netdump will fail.
2840 	 */
2841 	for (i = 0; i < config->tx_fifo_num; i++)
2842 		tx_intr_handler(&mac_control->fifos[i]);
2843 
2844 	/* check for received packet and indicate up to network */
2845 	for (i = 0; i < config->rx_ring_num; i++) {
2846 		struct ring_info *ring = &mac_control->rings[i];
2847 
2848 		rx_intr_handler(ring, 0);
2849 	}
2850 
2851 	for (i = 0; i < config->rx_ring_num; i++) {
2852 		struct ring_info *ring = &mac_control->rings[i];
2853 
2854 		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2855 			DBG_PRINT(INFO_DBG,
2856 				  "%s: Out of memory in Rx Netpoll!!\n",
2857 				  dev->name);
2858 			break;
2859 		}
2860 	}
2861 	enable_irq(irq);
2862 }
2863 #endif
2864 
2865 /**
2866  *  rx_intr_handler - Rx interrupt handler
2867  *  @ring_info: per ring structure.
2868  *  @budget: budget for napi processing.
2869  *  Description:
2870  *  If the interrupt is because of a received frame or if the
2871  *  receive ring contains fresh as yet un-processed frames,this function is
2872  *  called. It picks out the RxD at which place the last Rx processing had
2873  *  stopped and sends the skb to the OSM's Rx handler and then increments
2874  *  the offset.
2875  *  Return Value:
2876  *  No. of napi packets processed.
2877  */
2878 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2879 {
2880 	int get_block, put_block;
2881 	struct rx_curr_get_info get_info, put_info;
2882 	struct RxD_t *rxdp;
2883 	struct sk_buff *skb;
2884 	int pkt_cnt = 0, napi_pkts = 0;
2885 	int i;
2886 	struct RxD1 *rxdp1;
2887 	struct RxD3 *rxdp3;
2888 
2889 	if (budget <= 0)
2890 		return napi_pkts;
2891 
2892 	get_info = ring_data->rx_curr_get_info;
2893 	get_block = get_info.block_index;
2894 	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2895 	put_block = put_info.block_index;
2896 	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2897 
2898 	while (RXD_IS_UP2DT(rxdp)) {
2899 		/*
2900 		 * If your are next to put index then it's
2901 		 * FIFO full condition
2902 		 */
2903 		if ((get_block == put_block) &&
2904 		    (get_info.offset + 1) == put_info.offset) {
2905 			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2906 				  ring_data->dev->name);
2907 			break;
2908 		}
2909 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2910 		if (skb == NULL) {
2911 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2912 				  ring_data->dev->name);
2913 			return 0;
2914 		}
2915 		if (ring_data->rxd_mode == RXD_MODE_1) {
2916 			rxdp1 = (struct RxD1 *)rxdp;
2917 			dma_unmap_single(&ring_data->pdev->dev,
2918 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2919 					 ring_data->mtu +
2920 					 HEADER_ETHERNET_II_802_3_SIZE +
2921 					 HEADER_802_2_SIZE +
2922 					 HEADER_SNAP_SIZE,
2923 					 DMA_FROM_DEVICE);
2924 		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2925 			rxdp3 = (struct RxD3 *)rxdp;
2926 			dma_sync_single_for_cpu(&ring_data->pdev->dev,
2927 						(dma_addr_t)rxdp3->Buffer0_ptr,
2928 						BUF0_LEN, DMA_FROM_DEVICE);
2929 			dma_unmap_single(&ring_data->pdev->dev,
2930 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2931 					 ring_data->mtu + 4, DMA_FROM_DEVICE);
2932 		}
2933 		prefetch(skb->data);
2934 		rx_osm_handler(ring_data, rxdp);
2935 		get_info.offset++;
2936 		ring_data->rx_curr_get_info.offset = get_info.offset;
2937 		rxdp = ring_data->rx_blocks[get_block].
2938 			rxds[get_info.offset].virt_addr;
2939 		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2940 			get_info.offset = 0;
2941 			ring_data->rx_curr_get_info.offset = get_info.offset;
2942 			get_block++;
2943 			if (get_block == ring_data->block_count)
2944 				get_block = 0;
2945 			ring_data->rx_curr_get_info.block_index = get_block;
2946 			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2947 		}
2948 
2949 		if (ring_data->nic->config.napi) {
2950 			budget--;
2951 			napi_pkts++;
2952 			if (!budget)
2953 				break;
2954 		}
2955 		pkt_cnt++;
2956 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2957 			break;
2958 	}
2959 	if (ring_data->lro) {
2960 		/* Clear all LRO sessions before exiting */
2961 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2962 			struct lro *lro = &ring_data->lro0_n[i];
2963 			if (lro->in_use) {
2964 				update_L3L4_header(ring_data->nic, lro);
2965 				queue_rx_frame(lro->parent, lro->vlan_tag);
2966 				clear_lro_session(lro);
2967 			}
2968 		}
2969 	}
2970 	return napi_pkts;
2971 }
2972 
2973 /**
2974  *  tx_intr_handler - Transmit interrupt handler
2975  *  @nic : device private variable
2976  *  Description:
2977  *  If an interrupt was raised to indicate DMA complete of the
2978  *  Tx packet, this function is called. It identifies the last TxD
2979  *  whose buffer was freed and frees all skbs whose data have already
2980  *  DMA'ed into the NICs internal memory.
2981  *  Return Value:
2982  *  NONE
2983  */
2984 
2985 static void tx_intr_handler(struct fifo_info *fifo_data)
2986 {
2987 	struct s2io_nic *nic = fifo_data->nic;
2988 	struct tx_curr_get_info get_info, put_info;
2989 	struct sk_buff *skb = NULL;
2990 	struct TxD *txdlp;
2991 	int pkt_cnt = 0;
2992 	unsigned long flags = 0;
2993 	u8 err_mask;
2994 	struct stat_block *stats = nic->mac_control.stats_info;
2995 	struct swStat *swstats = &stats->sw_stat;
2996 
2997 	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
2998 		return;
2999 
3000 	get_info = fifo_data->tx_curr_get_info;
3001 	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3002 	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3003 	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3004 	       (get_info.offset != put_info.offset) &&
3005 	       (txdlp->Host_Control)) {
3006 		/* Check for TxD errors */
3007 		if (txdlp->Control_1 & TXD_T_CODE) {
3008 			unsigned long long err;
3009 			err = txdlp->Control_1 & TXD_T_CODE;
3010 			if (err & 0x1) {
3011 				swstats->parity_err_cnt++;
3012 			}
3013 
3014 			/* update t_code statistics */
3015 			err_mask = err >> 48;
3016 			switch (err_mask) {
3017 			case 2:
3018 				swstats->tx_buf_abort_cnt++;
3019 				break;
3020 
3021 			case 3:
3022 				swstats->tx_desc_abort_cnt++;
3023 				break;
3024 
3025 			case 7:
3026 				swstats->tx_parity_err_cnt++;
3027 				break;
3028 
3029 			case 10:
3030 				swstats->tx_link_loss_cnt++;
3031 				break;
3032 
3033 			case 15:
3034 				swstats->tx_list_proc_err_cnt++;
3035 				break;
3036 			}
3037 		}
3038 
3039 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3040 		if (skb == NULL) {
3041 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3042 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3043 				  __func__);
3044 			return;
3045 		}
3046 		pkt_cnt++;
3047 
3048 		/* Updating the statistics block */
3049 		swstats->mem_freed += skb->truesize;
3050 		dev_consume_skb_irq(skb);
3051 
3052 		get_info.offset++;
3053 		if (get_info.offset == get_info.fifo_len + 1)
3054 			get_info.offset = 0;
3055 		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3056 		fifo_data->tx_curr_get_info.offset = get_info.offset;
3057 	}
3058 
3059 	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3060 
3061 	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3062 }
3063 
3064 /**
3065  *  s2io_mdio_write - Function to write in to MDIO registers
3066  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3067  *  @addr     : address value
3068  *  @value    : data value
3069  *  @dev      : pointer to net_device structure
3070  *  Description:
3071  *  This function is used to write values to the MDIO registers
3072  *  NONE
3073  */
3074 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3075 			    struct net_device *dev)
3076 {
3077 	u64 val64;
3078 	struct s2io_nic *sp = netdev_priv(dev);
3079 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3080 
3081 	/* address transaction */
3082 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3083 		MDIO_MMD_DEV_ADDR(mmd_type) |
3084 		MDIO_MMS_PRT_ADDR(0x0);
3085 	writeq(val64, &bar0->mdio_control);
3086 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3087 	writeq(val64, &bar0->mdio_control);
3088 	udelay(100);
3089 
3090 	/* Data transaction */
3091 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3092 		MDIO_MMD_DEV_ADDR(mmd_type) |
3093 		MDIO_MMS_PRT_ADDR(0x0) |
3094 		MDIO_MDIO_DATA(value) |
3095 		MDIO_OP(MDIO_OP_WRITE_TRANS);
3096 	writeq(val64, &bar0->mdio_control);
3097 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3098 	writeq(val64, &bar0->mdio_control);
3099 	udelay(100);
3100 
3101 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3102 		MDIO_MMD_DEV_ADDR(mmd_type) |
3103 		MDIO_MMS_PRT_ADDR(0x0) |
3104 		MDIO_OP(MDIO_OP_READ_TRANS);
3105 	writeq(val64, &bar0->mdio_control);
3106 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3107 	writeq(val64, &bar0->mdio_control);
3108 	udelay(100);
3109 }
3110 
3111 /**
3112  *  s2io_mdio_read - Function to write in to MDIO registers
3113  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3114  *  @addr     : address value
3115  *  @dev      : pointer to net_device structure
3116  *  Description:
3117  *  This function is used to read values to the MDIO registers
3118  *  NONE
3119  */
3120 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3121 {
3122 	u64 val64 = 0x0;
3123 	u64 rval64 = 0x0;
3124 	struct s2io_nic *sp = netdev_priv(dev);
3125 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3126 
3127 	/* address transaction */
3128 	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3129 			 | MDIO_MMD_DEV_ADDR(mmd_type)
3130 			 | MDIO_MMS_PRT_ADDR(0x0));
3131 	writeq(val64, &bar0->mdio_control);
3132 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3133 	writeq(val64, &bar0->mdio_control);
3134 	udelay(100);
3135 
3136 	/* Data transaction */
3137 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3138 		MDIO_MMD_DEV_ADDR(mmd_type) |
3139 		MDIO_MMS_PRT_ADDR(0x0) |
3140 		MDIO_OP(MDIO_OP_READ_TRANS);
3141 	writeq(val64, &bar0->mdio_control);
3142 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3143 	writeq(val64, &bar0->mdio_control);
3144 	udelay(100);
3145 
3146 	/* Read the value from regs */
3147 	rval64 = readq(&bar0->mdio_control);
3148 	rval64 = rval64 & 0xFFFF0000;
3149 	rval64 = rval64 >> 16;
3150 	return rval64;
3151 }
3152 
3153 /**
3154  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3155  *  @counter      : counter value to be updated
3156  *  @flag         : flag to indicate the status
3157  *  @type         : counter type
3158  *  Description:
3159  *  This function is to check the status of the xpak counters value
3160  *  NONE
3161  */
3162 
3163 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3164 				  u16 flag, u16 type)
3165 {
3166 	u64 mask = 0x3;
3167 	u64 val64;
3168 	int i;
3169 	for (i = 0; i < index; i++)
3170 		mask = mask << 0x2;
3171 
3172 	if (flag > 0) {
3173 		*counter = *counter + 1;
3174 		val64 = *regs_stat & mask;
3175 		val64 = val64 >> (index * 0x2);
3176 		val64 = val64 + 1;
3177 		if (val64 == 3) {
3178 			switch (type) {
3179 			case 1:
3180 				DBG_PRINT(ERR_DBG,
3181 					  "Take Xframe NIC out of service.\n");
3182 				DBG_PRINT(ERR_DBG,
3183 "Excessive temperatures may result in premature transceiver failure.\n");
3184 				break;
3185 			case 2:
3186 				DBG_PRINT(ERR_DBG,
3187 					  "Take Xframe NIC out of service.\n");
3188 				DBG_PRINT(ERR_DBG,
3189 "Excessive bias currents may indicate imminent laser diode failure.\n");
3190 				break;
3191 			case 3:
3192 				DBG_PRINT(ERR_DBG,
3193 					  "Take Xframe NIC out of service.\n");
3194 				DBG_PRINT(ERR_DBG,
3195 "Excessive laser output power may saturate far-end receiver.\n");
3196 				break;
3197 			default:
3198 				DBG_PRINT(ERR_DBG,
3199 					  "Incorrect XPAK Alarm type\n");
3200 			}
3201 			val64 = 0x0;
3202 		}
3203 		val64 = val64 << (index * 0x2);
3204 		*regs_stat = (*regs_stat & (~mask)) | (val64);
3205 
3206 	} else {
3207 		*regs_stat = *regs_stat & (~mask);
3208 	}
3209 }
3210 
3211 /**
3212  *  s2io_updt_xpak_counter - Function to update the xpak counters
3213  *  @dev         : pointer to net_device struct
3214  *  Description:
3215  *  This function is to upate the status of the xpak counters value
3216  *  NONE
3217  */
3218 static void s2io_updt_xpak_counter(struct net_device *dev)
3219 {
3220 	u16 flag  = 0x0;
3221 	u16 type  = 0x0;
3222 	u16 val16 = 0x0;
3223 	u64 val64 = 0x0;
3224 	u64 addr  = 0x0;
3225 
3226 	struct s2io_nic *sp = netdev_priv(dev);
3227 	struct stat_block *stats = sp->mac_control.stats_info;
3228 	struct xpakStat *xstats = &stats->xpak_stat;
3229 
3230 	/* Check the communication with the MDIO slave */
3231 	addr = MDIO_CTRL1;
3232 	val64 = 0x0;
3233 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3234 	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3235 		DBG_PRINT(ERR_DBG,
3236 			  "ERR: MDIO slave access failed - Returned %llx\n",
3237 			  (unsigned long long)val64);
3238 		return;
3239 	}
3240 
3241 	/* Check for the expected value of control reg 1 */
3242 	if (val64 != MDIO_CTRL1_SPEED10G) {
3243 		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3244 			  "Returned: %llx- Expected: 0x%x\n",
3245 			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3246 		return;
3247 	}
3248 
3249 	/* Loading the DOM register to MDIO register */
3250 	addr = 0xA100;
3251 	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3252 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3253 
3254 	/* Reading the Alarm flags */
3255 	addr = 0xA070;
3256 	val64 = 0x0;
3257 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3258 
3259 	flag = CHECKBIT(val64, 0x7);
3260 	type = 1;
3261 	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3262 			      &xstats->xpak_regs_stat,
3263 			      0x0, flag, type);
3264 
3265 	if (CHECKBIT(val64, 0x6))
3266 		xstats->alarm_transceiver_temp_low++;
3267 
3268 	flag = CHECKBIT(val64, 0x3);
3269 	type = 2;
3270 	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3271 			      &xstats->xpak_regs_stat,
3272 			      0x2, flag, type);
3273 
3274 	if (CHECKBIT(val64, 0x2))
3275 		xstats->alarm_laser_bias_current_low++;
3276 
3277 	flag = CHECKBIT(val64, 0x1);
3278 	type = 3;
3279 	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3280 			      &xstats->xpak_regs_stat,
3281 			      0x4, flag, type);
3282 
3283 	if (CHECKBIT(val64, 0x0))
3284 		xstats->alarm_laser_output_power_low++;
3285 
3286 	/* Reading the Warning flags */
3287 	addr = 0xA074;
3288 	val64 = 0x0;
3289 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3290 
3291 	if (CHECKBIT(val64, 0x7))
3292 		xstats->warn_transceiver_temp_high++;
3293 
3294 	if (CHECKBIT(val64, 0x6))
3295 		xstats->warn_transceiver_temp_low++;
3296 
3297 	if (CHECKBIT(val64, 0x3))
3298 		xstats->warn_laser_bias_current_high++;
3299 
3300 	if (CHECKBIT(val64, 0x2))
3301 		xstats->warn_laser_bias_current_low++;
3302 
3303 	if (CHECKBIT(val64, 0x1))
3304 		xstats->warn_laser_output_power_high++;
3305 
3306 	if (CHECKBIT(val64, 0x0))
3307 		xstats->warn_laser_output_power_low++;
3308 }
3309 
3310 /**
3311  *  wait_for_cmd_complete - waits for a command to complete.
3312  *  @sp : private member of the device structure, which is a pointer to the
3313  *  s2io_nic structure.
3314  *  Description: Function that waits for a command to Write into RMAC
3315  *  ADDR DATA registers to be completed and returns either success or
3316  *  error depending on whether the command was complete or not.
3317  *  Return value:
3318  *   SUCCESS on success and FAILURE on failure.
3319  */
3320 
3321 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3322 				 int bit_state)
3323 {
3324 	int ret = FAILURE, cnt = 0, delay = 1;
3325 	u64 val64;
3326 
3327 	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3328 		return FAILURE;
3329 
3330 	do {
3331 		val64 = readq(addr);
3332 		if (bit_state == S2IO_BIT_RESET) {
3333 			if (!(val64 & busy_bit)) {
3334 				ret = SUCCESS;
3335 				break;
3336 			}
3337 		} else {
3338 			if (val64 & busy_bit) {
3339 				ret = SUCCESS;
3340 				break;
3341 			}
3342 		}
3343 
3344 		if (in_interrupt())
3345 			mdelay(delay);
3346 		else
3347 			msleep(delay);
3348 
3349 		if (++cnt >= 10)
3350 			delay = 50;
3351 	} while (cnt < 20);
3352 	return ret;
3353 }
3354 /**
3355  * check_pci_device_id - Checks if the device id is supported
3356  * @id : device id
3357  * Description: Function to check if the pci device id is supported by driver.
3358  * Return value: Actual device id if supported else PCI_ANY_ID
3359  */
3360 static u16 check_pci_device_id(u16 id)
3361 {
3362 	switch (id) {
3363 	case PCI_DEVICE_ID_HERC_WIN:
3364 	case PCI_DEVICE_ID_HERC_UNI:
3365 		return XFRAME_II_DEVICE;
3366 	case PCI_DEVICE_ID_S2IO_UNI:
3367 	case PCI_DEVICE_ID_S2IO_WIN:
3368 		return XFRAME_I_DEVICE;
3369 	default:
3370 		return PCI_ANY_ID;
3371 	}
3372 }
3373 
3374 /**
3375  *  s2io_reset - Resets the card.
3376  *  @sp : private member of the device structure.
3377  *  Description: Function to Reset the card. This function then also
3378  *  restores the previously saved PCI configuration space registers as
3379  *  the card reset also resets the configuration space.
3380  *  Return value:
3381  *  void.
3382  */
3383 
3384 static void s2io_reset(struct s2io_nic *sp)
3385 {
3386 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3387 	u64 val64;
3388 	u16 subid, pci_cmd;
3389 	int i;
3390 	u16 val16;
3391 	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3392 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3393 	struct stat_block *stats;
3394 	struct swStat *swstats;
3395 
3396 	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3397 		  __func__, pci_name(sp->pdev));
3398 
3399 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3400 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3401 
3402 	val64 = SW_RESET_ALL;
3403 	writeq(val64, &bar0->sw_reset);
3404 	if (strstr(sp->product_name, "CX4"))
3405 		msleep(750);
3406 	msleep(250);
3407 	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3408 
3409 		/* Restore the PCI state saved during initialization. */
3410 		pci_restore_state(sp->pdev);
3411 		pci_save_state(sp->pdev);
3412 		pci_read_config_word(sp->pdev, 0x2, &val16);
3413 		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3414 			break;
3415 		msleep(200);
3416 	}
3417 
3418 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3419 		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3420 
3421 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3422 
3423 	s2io_init_pci(sp);
3424 
3425 	/* Set swapper to enable I/O register access */
3426 	s2io_set_swapper(sp);
3427 
3428 	/* restore mac_addr entries */
3429 	do_s2io_restore_unicast_mc(sp);
3430 
3431 	/* Restore the MSIX table entries from local variables */
3432 	restore_xmsi_data(sp);
3433 
3434 	/* Clear certain PCI/PCI-X fields after reset */
3435 	if (sp->device_type == XFRAME_II_DEVICE) {
3436 		/* Clear "detected parity error" bit */
3437 		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3438 
3439 		/* Clearing PCIX Ecc status register */
3440 		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3441 
3442 		/* Clearing PCI_STATUS error reflected here */
3443 		writeq(s2BIT(62), &bar0->txpic_int_reg);
3444 	}
3445 
3446 	/* Reset device statistics maintained by OS */
3447 	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3448 
3449 	stats = sp->mac_control.stats_info;
3450 	swstats = &stats->sw_stat;
3451 
3452 	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3453 	up_cnt = swstats->link_up_cnt;
3454 	down_cnt = swstats->link_down_cnt;
3455 	up_time = swstats->link_up_time;
3456 	down_time = swstats->link_down_time;
3457 	reset_cnt = swstats->soft_reset_cnt;
3458 	mem_alloc_cnt = swstats->mem_allocated;
3459 	mem_free_cnt = swstats->mem_freed;
3460 	watchdog_cnt = swstats->watchdog_timer_cnt;
3461 
3462 	memset(stats, 0, sizeof(struct stat_block));
3463 
3464 	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3465 	swstats->link_up_cnt = up_cnt;
3466 	swstats->link_down_cnt = down_cnt;
3467 	swstats->link_up_time = up_time;
3468 	swstats->link_down_time = down_time;
3469 	swstats->soft_reset_cnt = reset_cnt;
3470 	swstats->mem_allocated = mem_alloc_cnt;
3471 	swstats->mem_freed = mem_free_cnt;
3472 	swstats->watchdog_timer_cnt = watchdog_cnt;
3473 
3474 	/* SXE-002: Configure link and activity LED to turn it off */
3475 	subid = sp->pdev->subsystem_device;
3476 	if (((subid & 0xFF) >= 0x07) &&
3477 	    (sp->device_type == XFRAME_I_DEVICE)) {
3478 		val64 = readq(&bar0->gpio_control);
3479 		val64 |= 0x0000800000000000ULL;
3480 		writeq(val64, &bar0->gpio_control);
3481 		val64 = 0x0411040400000000ULL;
3482 		writeq(val64, (void __iomem *)bar0 + 0x2700);
3483 	}
3484 
3485 	/*
3486 	 * Clear spurious ECC interrupts that would have occurred on
3487 	 * XFRAME II cards after reset.
3488 	 */
3489 	if (sp->device_type == XFRAME_II_DEVICE) {
3490 		val64 = readq(&bar0->pcc_err_reg);
3491 		writeq(val64, &bar0->pcc_err_reg);
3492 	}
3493 
3494 	sp->device_enabled_once = false;
3495 }
3496 
3497 /**
3498  *  s2io_set_swapper - to set the swapper controle on the card
3499  *  @sp : private member of the device structure,
3500  *  pointer to the s2io_nic structure.
3501  *  Description: Function to set the swapper control on the card
3502  *  correctly depending on the 'endianness' of the system.
3503  *  Return value:
3504  *  SUCCESS on success and FAILURE on failure.
3505  */
3506 
3507 static int s2io_set_swapper(struct s2io_nic *sp)
3508 {
3509 	struct net_device *dev = sp->dev;
3510 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3511 	u64 val64, valt, valr;
3512 
3513 	/*
3514 	 * Set proper endian settings and verify the same by reading
3515 	 * the PIF Feed-back register.
3516 	 */
3517 
3518 	val64 = readq(&bar0->pif_rd_swapper_fb);
3519 	if (val64 != 0x0123456789ABCDEFULL) {
3520 		int i = 0;
3521 		static const u64 value[] = {
3522 			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3523 			0x8100008181000081ULL,	/* FE=1, SE=0 */
3524 			0x4200004242000042ULL,	/* FE=0, SE=1 */
3525 			0			/* FE=0, SE=0 */
3526 		};
3527 
3528 		while (i < 4) {
3529 			writeq(value[i], &bar0->swapper_ctrl);
3530 			val64 = readq(&bar0->pif_rd_swapper_fb);
3531 			if (val64 == 0x0123456789ABCDEFULL)
3532 				break;
3533 			i++;
3534 		}
3535 		if (i == 4) {
3536 			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3537 				  "feedback read %llx\n",
3538 				  dev->name, (unsigned long long)val64);
3539 			return FAILURE;
3540 		}
3541 		valr = value[i];
3542 	} else {
3543 		valr = readq(&bar0->swapper_ctrl);
3544 	}
3545 
3546 	valt = 0x0123456789ABCDEFULL;
3547 	writeq(valt, &bar0->xmsi_address);
3548 	val64 = readq(&bar0->xmsi_address);
3549 
3550 	if (val64 != valt) {
3551 		int i = 0;
3552 		static const u64 value[] = {
3553 			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3554 			0x0081810000818100ULL,	/* FE=1, SE=0 */
3555 			0x0042420000424200ULL,	/* FE=0, SE=1 */
3556 			0			/* FE=0, SE=0 */
3557 		};
3558 
3559 		while (i < 4) {
3560 			writeq((value[i] | valr), &bar0->swapper_ctrl);
3561 			writeq(valt, &bar0->xmsi_address);
3562 			val64 = readq(&bar0->xmsi_address);
3563 			if (val64 == valt)
3564 				break;
3565 			i++;
3566 		}
3567 		if (i == 4) {
3568 			unsigned long long x = val64;
3569 			DBG_PRINT(ERR_DBG,
3570 				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3571 			return FAILURE;
3572 		}
3573 	}
3574 	val64 = readq(&bar0->swapper_ctrl);
3575 	val64 &= 0xFFFF000000000000ULL;
3576 
3577 #ifdef __BIG_ENDIAN
3578 	/*
3579 	 * The device by default set to a big endian format, so a
3580 	 * big endian driver need not set anything.
3581 	 */
3582 	val64 |= (SWAPPER_CTRL_TXP_FE |
3583 		  SWAPPER_CTRL_TXP_SE |
3584 		  SWAPPER_CTRL_TXD_R_FE |
3585 		  SWAPPER_CTRL_TXD_W_FE |
3586 		  SWAPPER_CTRL_TXF_R_FE |
3587 		  SWAPPER_CTRL_RXD_R_FE |
3588 		  SWAPPER_CTRL_RXD_W_FE |
3589 		  SWAPPER_CTRL_RXF_W_FE |
3590 		  SWAPPER_CTRL_XMSI_FE |
3591 		  SWAPPER_CTRL_STATS_FE |
3592 		  SWAPPER_CTRL_STATS_SE);
3593 	if (sp->config.intr_type == INTA)
3594 		val64 |= SWAPPER_CTRL_XMSI_SE;
3595 	writeq(val64, &bar0->swapper_ctrl);
3596 #else
3597 	/*
3598 	 * Initially we enable all bits to make it accessible by the
3599 	 * driver, then we selectively enable only those bits that
3600 	 * we want to set.
3601 	 */
3602 	val64 |= (SWAPPER_CTRL_TXP_FE |
3603 		  SWAPPER_CTRL_TXP_SE |
3604 		  SWAPPER_CTRL_TXD_R_FE |
3605 		  SWAPPER_CTRL_TXD_R_SE |
3606 		  SWAPPER_CTRL_TXD_W_FE |
3607 		  SWAPPER_CTRL_TXD_W_SE |
3608 		  SWAPPER_CTRL_TXF_R_FE |
3609 		  SWAPPER_CTRL_RXD_R_FE |
3610 		  SWAPPER_CTRL_RXD_R_SE |
3611 		  SWAPPER_CTRL_RXD_W_FE |
3612 		  SWAPPER_CTRL_RXD_W_SE |
3613 		  SWAPPER_CTRL_RXF_W_FE |
3614 		  SWAPPER_CTRL_XMSI_FE |
3615 		  SWAPPER_CTRL_STATS_FE |
3616 		  SWAPPER_CTRL_STATS_SE);
3617 	if (sp->config.intr_type == INTA)
3618 		val64 |= SWAPPER_CTRL_XMSI_SE;
3619 	writeq(val64, &bar0->swapper_ctrl);
3620 #endif
3621 	val64 = readq(&bar0->swapper_ctrl);
3622 
3623 	/*
3624 	 * Verifying if endian settings are accurate by reading a
3625 	 * feedback register.
3626 	 */
3627 	val64 = readq(&bar0->pif_rd_swapper_fb);
3628 	if (val64 != 0x0123456789ABCDEFULL) {
3629 		/* Endian settings are incorrect, calls for another dekko. */
3630 		DBG_PRINT(ERR_DBG,
3631 			  "%s: Endian settings are wrong, feedback read %llx\n",
3632 			  dev->name, (unsigned long long)val64);
3633 		return FAILURE;
3634 	}
3635 
3636 	return SUCCESS;
3637 }
3638 
3639 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3640 {
3641 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3642 	u64 val64;
3643 	int ret = 0, cnt = 0;
3644 
3645 	do {
3646 		val64 = readq(&bar0->xmsi_access);
3647 		if (!(val64 & s2BIT(15)))
3648 			break;
3649 		mdelay(1);
3650 		cnt++;
3651 	} while (cnt < 5);
3652 	if (cnt == 5) {
3653 		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3654 		ret = 1;
3655 	}
3656 
3657 	return ret;
3658 }
3659 
3660 static void restore_xmsi_data(struct s2io_nic *nic)
3661 {
3662 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3663 	u64 val64;
3664 	int i, msix_index;
3665 
3666 	if (nic->device_type == XFRAME_I_DEVICE)
3667 		return;
3668 
3669 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3670 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3671 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3672 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3673 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3674 		writeq(val64, &bar0->xmsi_access);
3675 		if (wait_for_msix_trans(nic, msix_index))
3676 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3677 				  __func__, msix_index);
3678 	}
3679 }
3680 
3681 static void store_xmsi_data(struct s2io_nic *nic)
3682 {
3683 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3684 	u64 val64, addr, data;
3685 	int i, msix_index;
3686 
3687 	if (nic->device_type == XFRAME_I_DEVICE)
3688 		return;
3689 
3690 	/* Store and display */
3691 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3692 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3693 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3694 		writeq(val64, &bar0->xmsi_access);
3695 		if (wait_for_msix_trans(nic, msix_index)) {
3696 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3697 				  __func__, msix_index);
3698 			continue;
3699 		}
3700 		addr = readq(&bar0->xmsi_address);
3701 		data = readq(&bar0->xmsi_data);
3702 		if (addr && data) {
3703 			nic->msix_info[i].addr = addr;
3704 			nic->msix_info[i].data = data;
3705 		}
3706 	}
3707 }
3708 
3709 static int s2io_enable_msi_x(struct s2io_nic *nic)
3710 {
3711 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3712 	u64 rx_mat;
3713 	u16 msi_control; /* Temp variable */
3714 	int ret, i, j, msix_indx = 1;
3715 	int size;
3716 	struct stat_block *stats = nic->mac_control.stats_info;
3717 	struct swStat *swstats = &stats->sw_stat;
3718 
3719 	size = nic->num_entries * sizeof(struct msix_entry);
3720 	nic->entries = kzalloc(size, GFP_KERNEL);
3721 	if (!nic->entries) {
3722 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3723 			  __func__);
3724 		swstats->mem_alloc_fail_cnt++;
3725 		return -ENOMEM;
3726 	}
3727 	swstats->mem_allocated += size;
3728 
3729 	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3730 	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3731 	if (!nic->s2io_entries) {
3732 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3733 			  __func__);
3734 		swstats->mem_alloc_fail_cnt++;
3735 		kfree(nic->entries);
3736 		swstats->mem_freed
3737 			+= (nic->num_entries * sizeof(struct msix_entry));
3738 		return -ENOMEM;
3739 	}
3740 	swstats->mem_allocated += size;
3741 
3742 	nic->entries[0].entry = 0;
3743 	nic->s2io_entries[0].entry = 0;
3744 	nic->s2io_entries[0].in_use = MSIX_FLG;
3745 	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3746 	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3747 
3748 	for (i = 1; i < nic->num_entries; i++) {
3749 		nic->entries[i].entry = ((i - 1) * 8) + 1;
3750 		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3751 		nic->s2io_entries[i].arg = NULL;
3752 		nic->s2io_entries[i].in_use = 0;
3753 	}
3754 
3755 	rx_mat = readq(&bar0->rx_mat);
3756 	for (j = 0; j < nic->config.rx_ring_num; j++) {
3757 		rx_mat |= RX_MAT_SET(j, msix_indx);
3758 		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3759 		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3760 		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3761 		msix_indx += 8;
3762 	}
3763 	writeq(rx_mat, &bar0->rx_mat);
3764 	readq(&bar0->rx_mat);
3765 
3766 	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3767 				    nic->num_entries, nic->num_entries);
3768 	/* We fail init if error or we get less vectors than min required */
3769 	if (ret < 0) {
3770 		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3771 		kfree(nic->entries);
3772 		swstats->mem_freed += nic->num_entries *
3773 			sizeof(struct msix_entry);
3774 		kfree(nic->s2io_entries);
3775 		swstats->mem_freed += nic->num_entries *
3776 			sizeof(struct s2io_msix_entry);
3777 		nic->entries = NULL;
3778 		nic->s2io_entries = NULL;
3779 		return -ENOMEM;
3780 	}
3781 
3782 	/*
3783 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3784 	 * in the herc NIC. (Temp change, needs to be removed later)
3785 	 */
3786 	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3787 	msi_control |= 0x1; /* Enable MSI */
3788 	pci_write_config_word(nic->pdev, 0x42, msi_control);
3789 
3790 	return 0;
3791 }
3792 
3793 /* Handle software interrupt used during MSI(X) test */
3794 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3795 {
3796 	struct s2io_nic *sp = dev_id;
3797 
3798 	sp->msi_detected = 1;
3799 	wake_up(&sp->msi_wait);
3800 
3801 	return IRQ_HANDLED;
3802 }
3803 
3804 /* Test interrupt path by forcing a a software IRQ */
3805 static int s2io_test_msi(struct s2io_nic *sp)
3806 {
3807 	struct pci_dev *pdev = sp->pdev;
3808 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3809 	int err;
3810 	u64 val64, saved64;
3811 
3812 	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3813 			  sp->name, sp);
3814 	if (err) {
3815 		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3816 			  sp->dev->name, pci_name(pdev), pdev->irq);
3817 		return err;
3818 	}
3819 
3820 	init_waitqueue_head(&sp->msi_wait);
3821 	sp->msi_detected = 0;
3822 
3823 	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3824 	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3825 	val64 |= SCHED_INT_CTRL_TIMER_EN;
3826 	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3827 	writeq(val64, &bar0->scheduled_int_ctrl);
3828 
3829 	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3830 
3831 	if (!sp->msi_detected) {
3832 		/* MSI(X) test failed, go back to INTx mode */
3833 		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3834 			  "using MSI(X) during test\n",
3835 			  sp->dev->name, pci_name(pdev));
3836 
3837 		err = -EOPNOTSUPP;
3838 	}
3839 
3840 	free_irq(sp->entries[1].vector, sp);
3841 
3842 	writeq(saved64, &bar0->scheduled_int_ctrl);
3843 
3844 	return err;
3845 }
3846 
3847 static void remove_msix_isr(struct s2io_nic *sp)
3848 {
3849 	int i;
3850 	u16 msi_control;
3851 
3852 	for (i = 0; i < sp->num_entries; i++) {
3853 		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3854 			int vector = sp->entries[i].vector;
3855 			void *arg = sp->s2io_entries[i].arg;
3856 			free_irq(vector, arg);
3857 		}
3858 	}
3859 
3860 	kfree(sp->entries);
3861 	kfree(sp->s2io_entries);
3862 	sp->entries = NULL;
3863 	sp->s2io_entries = NULL;
3864 
3865 	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3866 	msi_control &= 0xFFFE; /* Disable MSI */
3867 	pci_write_config_word(sp->pdev, 0x42, msi_control);
3868 
3869 	pci_disable_msix(sp->pdev);
3870 }
3871 
3872 static void remove_inta_isr(struct s2io_nic *sp)
3873 {
3874 	free_irq(sp->pdev->irq, sp->dev);
3875 }
3876 
3877 /* ********************************************************* *
3878  * Functions defined below concern the OS part of the driver *
3879  * ********************************************************* */
3880 
3881 /**
3882  *  s2io_open - open entry point of the driver
3883  *  @dev : pointer to the device structure.
3884  *  Description:
3885  *  This function is the open entry point of the driver. It mainly calls a
3886  *  function to allocate Rx buffers and inserts them into the buffer
3887  *  descriptors and then enables the Rx part of the NIC.
3888  *  Return value:
3889  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3890  *   file on failure.
3891  */
3892 
3893 static int s2io_open(struct net_device *dev)
3894 {
3895 	struct s2io_nic *sp = netdev_priv(dev);
3896 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3897 	int err = 0;
3898 
3899 	/*
3900 	 * Make sure you have link off by default every time
3901 	 * Nic is initialized
3902 	 */
3903 	netif_carrier_off(dev);
3904 	sp->last_link_state = 0;
3905 
3906 	/* Initialize H/W and enable interrupts */
3907 	err = s2io_card_up(sp);
3908 	if (err) {
3909 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3910 			  dev->name);
3911 		goto hw_init_failed;
3912 	}
3913 
3914 	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3915 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3916 		s2io_card_down(sp);
3917 		err = -ENODEV;
3918 		goto hw_init_failed;
3919 	}
3920 	s2io_start_all_tx_queue(sp);
3921 	return 0;
3922 
3923 hw_init_failed:
3924 	if (sp->config.intr_type == MSI_X) {
3925 		if (sp->entries) {
3926 			kfree(sp->entries);
3927 			swstats->mem_freed += sp->num_entries *
3928 				sizeof(struct msix_entry);
3929 		}
3930 		if (sp->s2io_entries) {
3931 			kfree(sp->s2io_entries);
3932 			swstats->mem_freed += sp->num_entries *
3933 				sizeof(struct s2io_msix_entry);
3934 		}
3935 	}
3936 	return err;
3937 }
3938 
3939 /**
3940  *  s2io_close -close entry point of the driver
3941  *  @dev : device pointer.
3942  *  Description:
3943  *  This is the stop entry point of the driver. It needs to undo exactly
3944  *  whatever was done by the open entry point,thus it's usually referred to
3945  *  as the close function.Among other things this function mainly stops the
3946  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3947  *  Return value:
3948  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3949  *  file on failure.
3950  */
3951 
3952 static int s2io_close(struct net_device *dev)
3953 {
3954 	struct s2io_nic *sp = netdev_priv(dev);
3955 	struct config_param *config = &sp->config;
3956 	u64 tmp64;
3957 	int offset;
3958 
3959 	/* Return if the device is already closed               *
3960 	 *  Can happen when s2io_card_up failed in change_mtu    *
3961 	 */
3962 	if (!is_s2io_card_up(sp))
3963 		return 0;
3964 
3965 	s2io_stop_all_tx_queue(sp);
3966 	/* delete all populated mac entries */
3967 	for (offset = 1; offset < config->max_mc_addr; offset++) {
3968 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3969 		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3970 			do_s2io_delete_unicast_mc(sp, tmp64);
3971 	}
3972 
3973 	s2io_card_down(sp);
3974 
3975 	return 0;
3976 }
3977 
3978 /**
3979  *  s2io_xmit - Tx entry point of te driver
3980  *  @skb : the socket buffer containing the Tx data.
3981  *  @dev : device pointer.
3982  *  Description :
3983  *  This function is the Tx entry point of the driver. S2IO NIC supports
3984  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3985  *  NOTE: when device can't queue the pkt,just the trans_start variable will
3986  *  not be upadted.
3987  *  Return value:
3988  *  0 on success & 1 on failure.
3989  */
3990 
3991 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3992 {
3993 	struct s2io_nic *sp = netdev_priv(dev);
3994 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3995 	register u64 val64;
3996 	struct TxD *txdp;
3997 	struct TxFIFO_element __iomem *tx_fifo;
3998 	unsigned long flags = 0;
3999 	u16 vlan_tag = 0;
4000 	struct fifo_info *fifo = NULL;
4001 	int offload_type;
4002 	int enable_per_list_interrupt = 0;
4003 	struct config_param *config = &sp->config;
4004 	struct mac_info *mac_control = &sp->mac_control;
4005 	struct stat_block *stats = mac_control->stats_info;
4006 	struct swStat *swstats = &stats->sw_stat;
4007 
4008 	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4009 
4010 	if (unlikely(skb->len <= 0)) {
4011 		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4012 		dev_kfree_skb_any(skb);
4013 		return NETDEV_TX_OK;
4014 	}
4015 
4016 	if (!is_s2io_card_up(sp)) {
4017 		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4018 			  dev->name);
4019 		dev_kfree_skb_any(skb);
4020 		return NETDEV_TX_OK;
4021 	}
4022 
4023 	queue = 0;
4024 	if (skb_vlan_tag_present(skb))
4025 		vlan_tag = skb_vlan_tag_get(skb);
4026 	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4027 		if (skb->protocol == htons(ETH_P_IP)) {
4028 			struct iphdr *ip;
4029 			struct tcphdr *th;
4030 			ip = ip_hdr(skb);
4031 
4032 			if (!ip_is_fragment(ip)) {
4033 				th = (struct tcphdr *)(((unsigned char *)ip) +
4034 						       ip->ihl*4);
4035 
4036 				if (ip->protocol == IPPROTO_TCP) {
4037 					queue_len = sp->total_tcp_fifos;
4038 					queue = (ntohs(th->source) +
4039 						 ntohs(th->dest)) &
4040 						sp->fifo_selector[queue_len - 1];
4041 					if (queue >= queue_len)
4042 						queue = queue_len - 1;
4043 				} else if (ip->protocol == IPPROTO_UDP) {
4044 					queue_len = sp->total_udp_fifos;
4045 					queue = (ntohs(th->source) +
4046 						 ntohs(th->dest)) &
4047 						sp->fifo_selector[queue_len - 1];
4048 					if (queue >= queue_len)
4049 						queue = queue_len - 1;
4050 					queue += sp->udp_fifo_idx;
4051 					if (skb->len > 1024)
4052 						enable_per_list_interrupt = 1;
4053 				}
4054 			}
4055 		}
4056 	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4057 		/* get fifo number based on skb->priority value */
4058 		queue = config->fifo_mapping
4059 			[skb->priority & (MAX_TX_FIFOS - 1)];
4060 	fifo = &mac_control->fifos[queue];
4061 
4062 	spin_lock_irqsave(&fifo->tx_lock, flags);
4063 
4064 	if (sp->config.multiq) {
4065 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4066 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4067 			return NETDEV_TX_BUSY;
4068 		}
4069 	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4070 		if (netif_queue_stopped(dev)) {
4071 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4072 			return NETDEV_TX_BUSY;
4073 		}
4074 	}
4075 
4076 	put_off = (u16)fifo->tx_curr_put_info.offset;
4077 	get_off = (u16)fifo->tx_curr_get_info.offset;
4078 	txdp = fifo->list_info[put_off].list_virt_addr;
4079 
4080 	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4081 	/* Avoid "put" pointer going beyond "get" pointer */
4082 	if (txdp->Host_Control ||
4083 	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4084 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4085 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4086 		dev_kfree_skb_any(skb);
4087 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4088 		return NETDEV_TX_OK;
4089 	}
4090 
4091 	offload_type = s2io_offload_type(skb);
4092 	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4093 		txdp->Control_1 |= TXD_TCP_LSO_EN;
4094 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4095 	}
4096 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4097 		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4098 				    TXD_TX_CKO_TCP_EN |
4099 				    TXD_TX_CKO_UDP_EN);
4100 	}
4101 	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4102 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4103 	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4104 	if (enable_per_list_interrupt)
4105 		if (put_off & (queue_len >> 5))
4106 			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4107 	if (vlan_tag) {
4108 		txdp->Control_2 |= TXD_VLAN_ENABLE;
4109 		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4110 	}
4111 
4112 	frg_len = skb_headlen(skb);
4113 	txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
4114 					      frg_len, DMA_TO_DEVICE);
4115 	if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
4116 		goto pci_map_failed;
4117 
4118 	txdp->Host_Control = (unsigned long)skb;
4119 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4120 
4121 	frg_cnt = skb_shinfo(skb)->nr_frags;
4122 	/* For fragmented SKB. */
4123 	for (i = 0; i < frg_cnt; i++) {
4124 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4125 		/* A '0' length fragment will be ignored */
4126 		if (!skb_frag_size(frag))
4127 			continue;
4128 		txdp++;
4129 		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4130 							     frag, 0,
4131 							     skb_frag_size(frag),
4132 							     DMA_TO_DEVICE);
4133 		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4134 	}
4135 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4136 
4137 	tx_fifo = mac_control->tx_FIFO_start[queue];
4138 	val64 = fifo->list_info[put_off].list_phy_addr;
4139 	writeq(val64, &tx_fifo->TxDL_Pointer);
4140 
4141 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4142 		 TX_FIFO_LAST_LIST);
4143 	if (offload_type)
4144 		val64 |= TX_FIFO_SPECIAL_FUNC;
4145 
4146 	writeq(val64, &tx_fifo->List_Control);
4147 
4148 	put_off++;
4149 	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4150 		put_off = 0;
4151 	fifo->tx_curr_put_info.offset = put_off;
4152 
4153 	/* Avoid "put" pointer going beyond "get" pointer */
4154 	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4155 		swstats->fifo_full_cnt++;
4156 		DBG_PRINT(TX_DBG,
4157 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4158 			  put_off, get_off);
4159 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4160 	}
4161 	swstats->mem_allocated += skb->truesize;
4162 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4163 
4164 	if (sp->config.intr_type == MSI_X)
4165 		tx_intr_handler(fifo);
4166 
4167 	return NETDEV_TX_OK;
4168 
4169 pci_map_failed:
4170 	swstats->pci_map_fail_cnt++;
4171 	s2io_stop_tx_queue(sp, fifo->fifo_no);
4172 	swstats->mem_freed += skb->truesize;
4173 	dev_kfree_skb_any(skb);
4174 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4175 	return NETDEV_TX_OK;
4176 }
4177 
4178 static void
4179 s2io_alarm_handle(struct timer_list *t)
4180 {
4181 	struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4182 	struct net_device *dev = sp->dev;
4183 
4184 	s2io_handle_errors(dev);
4185 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4186 }
4187 
4188 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4189 {
4190 	struct ring_info *ring = (struct ring_info *)dev_id;
4191 	struct s2io_nic *sp = ring->nic;
4192 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4193 
4194 	if (unlikely(!is_s2io_card_up(sp)))
4195 		return IRQ_HANDLED;
4196 
4197 	if (sp->config.napi) {
4198 		u8 __iomem *addr = NULL;
4199 		u8 val8 = 0;
4200 
4201 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4202 		addr += (7 - ring->ring_no);
4203 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4204 		writeb(val8, addr);
4205 		val8 = readb(addr);
4206 		napi_schedule(&ring->napi);
4207 	} else {
4208 		rx_intr_handler(ring, 0);
4209 		s2io_chk_rx_buffers(sp, ring);
4210 	}
4211 
4212 	return IRQ_HANDLED;
4213 }
4214 
4215 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4216 {
4217 	int i;
4218 	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4219 	struct s2io_nic *sp = fifos->nic;
4220 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4221 	struct config_param *config  = &sp->config;
4222 	u64 reason;
4223 
4224 	if (unlikely(!is_s2io_card_up(sp)))
4225 		return IRQ_NONE;
4226 
4227 	reason = readq(&bar0->general_int_status);
4228 	if (unlikely(reason == S2IO_MINUS_ONE))
4229 		/* Nothing much can be done. Get out */
4230 		return IRQ_HANDLED;
4231 
4232 	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4233 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4234 
4235 		if (reason & GEN_INTR_TXPIC)
4236 			s2io_txpic_intr_handle(sp);
4237 
4238 		if (reason & GEN_INTR_TXTRAFFIC)
4239 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4240 
4241 		for (i = 0; i < config->tx_fifo_num; i++)
4242 			tx_intr_handler(&fifos[i]);
4243 
4244 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4245 		readl(&bar0->general_int_status);
4246 		return IRQ_HANDLED;
4247 	}
4248 	/* The interrupt was not raised by us */
4249 	return IRQ_NONE;
4250 }
4251 
4252 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4253 {
4254 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4255 	u64 val64;
4256 
4257 	val64 = readq(&bar0->pic_int_status);
4258 	if (val64 & PIC_INT_GPIO) {
4259 		val64 = readq(&bar0->gpio_int_reg);
4260 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4261 		    (val64 & GPIO_INT_REG_LINK_UP)) {
4262 			/*
4263 			 * This is unstable state so clear both up/down
4264 			 * interrupt and adapter to re-evaluate the link state.
4265 			 */
4266 			val64 |= GPIO_INT_REG_LINK_DOWN;
4267 			val64 |= GPIO_INT_REG_LINK_UP;
4268 			writeq(val64, &bar0->gpio_int_reg);
4269 			val64 = readq(&bar0->gpio_int_mask);
4270 			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4271 				   GPIO_INT_MASK_LINK_DOWN);
4272 			writeq(val64, &bar0->gpio_int_mask);
4273 		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4274 			val64 = readq(&bar0->adapter_status);
4275 			/* Enable Adapter */
4276 			val64 = readq(&bar0->adapter_control);
4277 			val64 |= ADAPTER_CNTL_EN;
4278 			writeq(val64, &bar0->adapter_control);
4279 			val64 |= ADAPTER_LED_ON;
4280 			writeq(val64, &bar0->adapter_control);
4281 			if (!sp->device_enabled_once)
4282 				sp->device_enabled_once = 1;
4283 
4284 			s2io_link(sp, LINK_UP);
4285 			/*
4286 			 * unmask link down interrupt and mask link-up
4287 			 * intr
4288 			 */
4289 			val64 = readq(&bar0->gpio_int_mask);
4290 			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4291 			val64 |= GPIO_INT_MASK_LINK_UP;
4292 			writeq(val64, &bar0->gpio_int_mask);
4293 
4294 		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4295 			val64 = readq(&bar0->adapter_status);
4296 			s2io_link(sp, LINK_DOWN);
4297 			/* Link is down so unmaks link up interrupt */
4298 			val64 = readq(&bar0->gpio_int_mask);
4299 			val64 &= ~GPIO_INT_MASK_LINK_UP;
4300 			val64 |= GPIO_INT_MASK_LINK_DOWN;
4301 			writeq(val64, &bar0->gpio_int_mask);
4302 
4303 			/* turn off LED */
4304 			val64 = readq(&bar0->adapter_control);
4305 			val64 = val64 & (~ADAPTER_LED_ON);
4306 			writeq(val64, &bar0->adapter_control);
4307 		}
4308 	}
4309 	val64 = readq(&bar0->gpio_int_mask);
4310 }
4311 
4312 /**
4313  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4314  *  @value: alarm bits
4315  *  @addr: address value
4316  *  @cnt: counter variable
4317  *  Description: Check for alarm and increment the counter
4318  *  Return Value:
4319  *  1 - if alarm bit set
4320  *  0 - if alarm bit is not set
4321  */
4322 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4323 				 unsigned long long *cnt)
4324 {
4325 	u64 val64;
4326 	val64 = readq(addr);
4327 	if (val64 & value) {
4328 		writeq(val64, addr);
4329 		(*cnt)++;
4330 		return 1;
4331 	}
4332 	return 0;
4333 
4334 }
4335 
4336 /**
4337  *  s2io_handle_errors - Xframe error indication handler
4338  *  @nic: device private variable
4339  *  Description: Handle alarms such as loss of link, single or
4340  *  double ECC errors, critical and serious errors.
4341  *  Return Value:
4342  *  NONE
4343  */
4344 static void s2io_handle_errors(void *dev_id)
4345 {
4346 	struct net_device *dev = (struct net_device *)dev_id;
4347 	struct s2io_nic *sp = netdev_priv(dev);
4348 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4349 	u64 temp64 = 0, val64 = 0;
4350 	int i = 0;
4351 
4352 	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4353 	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4354 
4355 	if (!is_s2io_card_up(sp))
4356 		return;
4357 
4358 	if (pci_channel_offline(sp->pdev))
4359 		return;
4360 
4361 	memset(&sw_stat->ring_full_cnt, 0,
4362 	       sizeof(sw_stat->ring_full_cnt));
4363 
4364 	/* Handling the XPAK counters update */
4365 	if (stats->xpak_timer_count < 72000) {
4366 		/* waiting for an hour */
4367 		stats->xpak_timer_count++;
4368 	} else {
4369 		s2io_updt_xpak_counter(dev);
4370 		/* reset the count to zero */
4371 		stats->xpak_timer_count = 0;
4372 	}
4373 
4374 	/* Handling link status change error Intr */
4375 	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4376 		val64 = readq(&bar0->mac_rmac_err_reg);
4377 		writeq(val64, &bar0->mac_rmac_err_reg);
4378 		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4379 			schedule_work(&sp->set_link_task);
4380 	}
4381 
4382 	/* In case of a serious error, the device will be Reset. */
4383 	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4384 				  &sw_stat->serious_err_cnt))
4385 		goto reset;
4386 
4387 	/* Check for data parity error */
4388 	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4389 				  &sw_stat->parity_err_cnt))
4390 		goto reset;
4391 
4392 	/* Check for ring full counter */
4393 	if (sp->device_type == XFRAME_II_DEVICE) {
4394 		val64 = readq(&bar0->ring_bump_counter1);
4395 		for (i = 0; i < 4; i++) {
4396 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4397 			temp64 >>= 64 - ((i+1)*16);
4398 			sw_stat->ring_full_cnt[i] += temp64;
4399 		}
4400 
4401 		val64 = readq(&bar0->ring_bump_counter2);
4402 		for (i = 0; i < 4; i++) {
4403 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4404 			temp64 >>= 64 - ((i+1)*16);
4405 			sw_stat->ring_full_cnt[i+4] += temp64;
4406 		}
4407 	}
4408 
4409 	val64 = readq(&bar0->txdma_int_status);
4410 	/*check for pfc_err*/
4411 	if (val64 & TXDMA_PFC_INT) {
4412 		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4413 					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4414 					  PFC_PCIX_ERR,
4415 					  &bar0->pfc_err_reg,
4416 					  &sw_stat->pfc_err_cnt))
4417 			goto reset;
4418 		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4419 				      &bar0->pfc_err_reg,
4420 				      &sw_stat->pfc_err_cnt);
4421 	}
4422 
4423 	/*check for tda_err*/
4424 	if (val64 & TXDMA_TDA_INT) {
4425 		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4426 					  TDA_SM0_ERR_ALARM |
4427 					  TDA_SM1_ERR_ALARM,
4428 					  &bar0->tda_err_reg,
4429 					  &sw_stat->tda_err_cnt))
4430 			goto reset;
4431 		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4432 				      &bar0->tda_err_reg,
4433 				      &sw_stat->tda_err_cnt);
4434 	}
4435 	/*check for pcc_err*/
4436 	if (val64 & TXDMA_PCC_INT) {
4437 		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4438 					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4439 					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4440 					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4441 					  PCC_TXB_ECC_DB_ERR,
4442 					  &bar0->pcc_err_reg,
4443 					  &sw_stat->pcc_err_cnt))
4444 			goto reset;
4445 		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4446 				      &bar0->pcc_err_reg,
4447 				      &sw_stat->pcc_err_cnt);
4448 	}
4449 
4450 	/*check for tti_err*/
4451 	if (val64 & TXDMA_TTI_INT) {
4452 		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4453 					  &bar0->tti_err_reg,
4454 					  &sw_stat->tti_err_cnt))
4455 			goto reset;
4456 		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4457 				      &bar0->tti_err_reg,
4458 				      &sw_stat->tti_err_cnt);
4459 	}
4460 
4461 	/*check for lso_err*/
4462 	if (val64 & TXDMA_LSO_INT) {
4463 		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4464 					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4465 					  &bar0->lso_err_reg,
4466 					  &sw_stat->lso_err_cnt))
4467 			goto reset;
4468 		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4469 				      &bar0->lso_err_reg,
4470 				      &sw_stat->lso_err_cnt);
4471 	}
4472 
4473 	/*check for tpa_err*/
4474 	if (val64 & TXDMA_TPA_INT) {
4475 		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4476 					  &bar0->tpa_err_reg,
4477 					  &sw_stat->tpa_err_cnt))
4478 			goto reset;
4479 		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4480 				      &bar0->tpa_err_reg,
4481 				      &sw_stat->tpa_err_cnt);
4482 	}
4483 
4484 	/*check for sm_err*/
4485 	if (val64 & TXDMA_SM_INT) {
4486 		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4487 					  &bar0->sm_err_reg,
4488 					  &sw_stat->sm_err_cnt))
4489 			goto reset;
4490 	}
4491 
4492 	val64 = readq(&bar0->mac_int_status);
4493 	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4494 		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4495 					  &bar0->mac_tmac_err_reg,
4496 					  &sw_stat->mac_tmac_err_cnt))
4497 			goto reset;
4498 		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4499 				      TMAC_DESC_ECC_SG_ERR |
4500 				      TMAC_DESC_ECC_DB_ERR,
4501 				      &bar0->mac_tmac_err_reg,
4502 				      &sw_stat->mac_tmac_err_cnt);
4503 	}
4504 
4505 	val64 = readq(&bar0->xgxs_int_status);
4506 	if (val64 & XGXS_INT_STATUS_TXGXS) {
4507 		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4508 					  &bar0->xgxs_txgxs_err_reg,
4509 					  &sw_stat->xgxs_txgxs_err_cnt))
4510 			goto reset;
4511 		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4512 				      &bar0->xgxs_txgxs_err_reg,
4513 				      &sw_stat->xgxs_txgxs_err_cnt);
4514 	}
4515 
4516 	val64 = readq(&bar0->rxdma_int_status);
4517 	if (val64 & RXDMA_INT_RC_INT_M) {
4518 		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4519 					  RC_FTC_ECC_DB_ERR |
4520 					  RC_PRCn_SM_ERR_ALARM |
4521 					  RC_FTC_SM_ERR_ALARM,
4522 					  &bar0->rc_err_reg,
4523 					  &sw_stat->rc_err_cnt))
4524 			goto reset;
4525 		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4526 				      RC_FTC_ECC_SG_ERR |
4527 				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4528 				      &sw_stat->rc_err_cnt);
4529 		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4530 					  PRC_PCI_AB_WR_Rn |
4531 					  PRC_PCI_AB_F_WR_Rn,
4532 					  &bar0->prc_pcix_err_reg,
4533 					  &sw_stat->prc_pcix_err_cnt))
4534 			goto reset;
4535 		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4536 				      PRC_PCI_DP_WR_Rn |
4537 				      PRC_PCI_DP_F_WR_Rn,
4538 				      &bar0->prc_pcix_err_reg,
4539 				      &sw_stat->prc_pcix_err_cnt);
4540 	}
4541 
4542 	if (val64 & RXDMA_INT_RPA_INT_M) {
4543 		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4544 					  &bar0->rpa_err_reg,
4545 					  &sw_stat->rpa_err_cnt))
4546 			goto reset;
4547 		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4548 				      &bar0->rpa_err_reg,
4549 				      &sw_stat->rpa_err_cnt);
4550 	}
4551 
4552 	if (val64 & RXDMA_INT_RDA_INT_M) {
4553 		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4554 					  RDA_FRM_ECC_DB_N_AERR |
4555 					  RDA_SM1_ERR_ALARM |
4556 					  RDA_SM0_ERR_ALARM |
4557 					  RDA_RXD_ECC_DB_SERR,
4558 					  &bar0->rda_err_reg,
4559 					  &sw_stat->rda_err_cnt))
4560 			goto reset;
4561 		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4562 				      RDA_FRM_ECC_SG_ERR |
4563 				      RDA_MISC_ERR |
4564 				      RDA_PCIX_ERR,
4565 				      &bar0->rda_err_reg,
4566 				      &sw_stat->rda_err_cnt);
4567 	}
4568 
4569 	if (val64 & RXDMA_INT_RTI_INT_M) {
4570 		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4571 					  &bar0->rti_err_reg,
4572 					  &sw_stat->rti_err_cnt))
4573 			goto reset;
4574 		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4575 				      &bar0->rti_err_reg,
4576 				      &sw_stat->rti_err_cnt);
4577 	}
4578 
4579 	val64 = readq(&bar0->mac_int_status);
4580 	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4581 		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4582 					  &bar0->mac_rmac_err_reg,
4583 					  &sw_stat->mac_rmac_err_cnt))
4584 			goto reset;
4585 		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4586 				      RMAC_SINGLE_ECC_ERR |
4587 				      RMAC_DOUBLE_ECC_ERR,
4588 				      &bar0->mac_rmac_err_reg,
4589 				      &sw_stat->mac_rmac_err_cnt);
4590 	}
4591 
4592 	val64 = readq(&bar0->xgxs_int_status);
4593 	if (val64 & XGXS_INT_STATUS_RXGXS) {
4594 		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4595 					  &bar0->xgxs_rxgxs_err_reg,
4596 					  &sw_stat->xgxs_rxgxs_err_cnt))
4597 			goto reset;
4598 	}
4599 
4600 	val64 = readq(&bar0->mc_int_status);
4601 	if (val64 & MC_INT_STATUS_MC_INT) {
4602 		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4603 					  &bar0->mc_err_reg,
4604 					  &sw_stat->mc_err_cnt))
4605 			goto reset;
4606 
4607 		/* Handling Ecc errors */
4608 		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4609 			writeq(val64, &bar0->mc_err_reg);
4610 			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4611 				sw_stat->double_ecc_errs++;
4612 				if (sp->device_type != XFRAME_II_DEVICE) {
4613 					/*
4614 					 * Reset XframeI only if critical error
4615 					 */
4616 					if (val64 &
4617 					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4618 					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4619 						goto reset;
4620 				}
4621 			} else
4622 				sw_stat->single_ecc_errs++;
4623 		}
4624 	}
4625 	return;
4626 
4627 reset:
4628 	s2io_stop_all_tx_queue(sp);
4629 	schedule_work(&sp->rst_timer_task);
4630 	sw_stat->soft_reset_cnt++;
4631 }
4632 
4633 /**
4634  *  s2io_isr - ISR handler of the device .
4635  *  @irq: the irq of the device.
4636  *  @dev_id: a void pointer to the dev structure of the NIC.
4637  *  Description:  This function is the ISR handler of the device. It
4638  *  identifies the reason for the interrupt and calls the relevant
4639  *  service routines. As a contongency measure, this ISR allocates the
4640  *  recv buffers, if their numbers are below the panic value which is
4641  *  presently set to 25% of the original number of rcv buffers allocated.
4642  *  Return value:
4643  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4644  *   IRQ_NONE: will be returned if interrupt is not from our device
4645  */
4646 static irqreturn_t s2io_isr(int irq, void *dev_id)
4647 {
4648 	struct net_device *dev = (struct net_device *)dev_id;
4649 	struct s2io_nic *sp = netdev_priv(dev);
4650 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4651 	int i;
4652 	u64 reason = 0;
4653 	struct mac_info *mac_control;
4654 	struct config_param *config;
4655 
4656 	/* Pretend we handled any irq's from a disconnected card */
4657 	if (pci_channel_offline(sp->pdev))
4658 		return IRQ_NONE;
4659 
4660 	if (!is_s2io_card_up(sp))
4661 		return IRQ_NONE;
4662 
4663 	config = &sp->config;
4664 	mac_control = &sp->mac_control;
4665 
4666 	/*
4667 	 * Identify the cause for interrupt and call the appropriate
4668 	 * interrupt handler. Causes for the interrupt could be;
4669 	 * 1. Rx of packet.
4670 	 * 2. Tx complete.
4671 	 * 3. Link down.
4672 	 */
4673 	reason = readq(&bar0->general_int_status);
4674 
4675 	if (unlikely(reason == S2IO_MINUS_ONE))
4676 		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4677 
4678 	if (reason &
4679 	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4680 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4681 
4682 		if (config->napi) {
4683 			if (reason & GEN_INTR_RXTRAFFIC) {
4684 				napi_schedule(&sp->napi);
4685 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4686 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4687 				readl(&bar0->rx_traffic_int);
4688 			}
4689 		} else {
4690 			/*
4691 			 * rx_traffic_int reg is an R1 register, writing all 1's
4692 			 * will ensure that the actual interrupt causing bit
4693 			 * get's cleared and hence a read can be avoided.
4694 			 */
4695 			if (reason & GEN_INTR_RXTRAFFIC)
4696 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4697 
4698 			for (i = 0; i < config->rx_ring_num; i++) {
4699 				struct ring_info *ring = &mac_control->rings[i];
4700 
4701 				rx_intr_handler(ring, 0);
4702 			}
4703 		}
4704 
4705 		/*
4706 		 * tx_traffic_int reg is an R1 register, writing all 1's
4707 		 * will ensure that the actual interrupt causing bit get's
4708 		 * cleared and hence a read can be avoided.
4709 		 */
4710 		if (reason & GEN_INTR_TXTRAFFIC)
4711 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4712 
4713 		for (i = 0; i < config->tx_fifo_num; i++)
4714 			tx_intr_handler(&mac_control->fifos[i]);
4715 
4716 		if (reason & GEN_INTR_TXPIC)
4717 			s2io_txpic_intr_handle(sp);
4718 
4719 		/*
4720 		 * Reallocate the buffers from the interrupt handler itself.
4721 		 */
4722 		if (!config->napi) {
4723 			for (i = 0; i < config->rx_ring_num; i++) {
4724 				struct ring_info *ring = &mac_control->rings[i];
4725 
4726 				s2io_chk_rx_buffers(sp, ring);
4727 			}
4728 		}
4729 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4730 		readl(&bar0->general_int_status);
4731 
4732 		return IRQ_HANDLED;
4733 
4734 	} else if (!reason) {
4735 		/* The interrupt was not raised by us */
4736 		return IRQ_NONE;
4737 	}
4738 
4739 	return IRQ_HANDLED;
4740 }
4741 
4742 /**
4743  * s2io_updt_stats -
4744  */
4745 static void s2io_updt_stats(struct s2io_nic *sp)
4746 {
4747 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4748 	u64 val64;
4749 	int cnt = 0;
4750 
4751 	if (is_s2io_card_up(sp)) {
4752 		/* Apprx 30us on a 133 MHz bus */
4753 		val64 = SET_UPDT_CLICKS(10) |
4754 			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4755 		writeq(val64, &bar0->stat_cfg);
4756 		do {
4757 			udelay(100);
4758 			val64 = readq(&bar0->stat_cfg);
4759 			if (!(val64 & s2BIT(0)))
4760 				break;
4761 			cnt++;
4762 			if (cnt == 5)
4763 				break; /* Updt failed */
4764 		} while (1);
4765 	}
4766 }
4767 
4768 /**
4769  *  s2io_get_stats - Updates the device statistics structure.
4770  *  @dev : pointer to the device structure.
4771  *  Description:
4772  *  This function updates the device statistics structure in the s2io_nic
4773  *  structure and returns a pointer to the same.
4774  *  Return value:
4775  *  pointer to the updated net_device_stats structure.
4776  */
4777 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4778 {
4779 	struct s2io_nic *sp = netdev_priv(dev);
4780 	struct mac_info *mac_control = &sp->mac_control;
4781 	struct stat_block *stats = mac_control->stats_info;
4782 	u64 delta;
4783 
4784 	/* Configure Stats for immediate updt */
4785 	s2io_updt_stats(sp);
4786 
4787 	/* A device reset will cause the on-adapter statistics to be zero'ed.
4788 	 * This can be done while running by changing the MTU.  To prevent the
4789 	 * system from having the stats zero'ed, the driver keeps a copy of the
4790 	 * last update to the system (which is also zero'ed on reset).  This
4791 	 * enables the driver to accurately know the delta between the last
4792 	 * update and the current update.
4793 	 */
4794 	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4795 		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4796 	sp->stats.rx_packets += delta;
4797 	dev->stats.rx_packets += delta;
4798 
4799 	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4800 		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4801 	sp->stats.tx_packets += delta;
4802 	dev->stats.tx_packets += delta;
4803 
4804 	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4805 		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4806 	sp->stats.rx_bytes += delta;
4807 	dev->stats.rx_bytes += delta;
4808 
4809 	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4810 		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4811 	sp->stats.tx_bytes += delta;
4812 	dev->stats.tx_bytes += delta;
4813 
4814 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4815 	sp->stats.rx_errors += delta;
4816 	dev->stats.rx_errors += delta;
4817 
4818 	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4819 		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4820 	sp->stats.tx_errors += delta;
4821 	dev->stats.tx_errors += delta;
4822 
4823 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4824 	sp->stats.rx_dropped += delta;
4825 	dev->stats.rx_dropped += delta;
4826 
4827 	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4828 	sp->stats.tx_dropped += delta;
4829 	dev->stats.tx_dropped += delta;
4830 
4831 	/* The adapter MAC interprets pause frames as multicast packets, but
4832 	 * does not pass them up.  This erroneously increases the multicast
4833 	 * packet count and needs to be deducted when the multicast frame count
4834 	 * is queried.
4835 	 */
4836 	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4837 		le32_to_cpu(stats->rmac_vld_mcst_frms);
4838 	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4839 	delta -= sp->stats.multicast;
4840 	sp->stats.multicast += delta;
4841 	dev->stats.multicast += delta;
4842 
4843 	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4844 		le32_to_cpu(stats->rmac_usized_frms)) +
4845 		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4846 	sp->stats.rx_length_errors += delta;
4847 	dev->stats.rx_length_errors += delta;
4848 
4849 	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4850 	sp->stats.rx_crc_errors += delta;
4851 	dev->stats.rx_crc_errors += delta;
4852 
4853 	return &dev->stats;
4854 }
4855 
4856 /**
4857  *  s2io_set_multicast - entry point for multicast address enable/disable.
4858  *  @dev : pointer to the device structure
4859  *  Description:
4860  *  This function is a driver entry point which gets called by the kernel
4861  *  whenever multicast addresses must be enabled/disabled. This also gets
4862  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4863  *  determine, if multicast address must be enabled or if promiscuous mode
4864  *  is to be disabled etc.
4865  *  Return value:
4866  *  void.
4867  */
4868 
4869 static void s2io_set_multicast(struct net_device *dev)
4870 {
4871 	int i, j, prev_cnt;
4872 	struct netdev_hw_addr *ha;
4873 	struct s2io_nic *sp = netdev_priv(dev);
4874 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4875 	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4876 		0xfeffffffffffULL;
4877 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4878 	void __iomem *add;
4879 	struct config_param *config = &sp->config;
4880 
4881 	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4882 		/*  Enable all Multicast addresses */
4883 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4884 		       &bar0->rmac_addr_data0_mem);
4885 		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4886 		       &bar0->rmac_addr_data1_mem);
4887 		val64 = RMAC_ADDR_CMD_MEM_WE |
4888 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4889 			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4890 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4891 		/* Wait till command completes */
4892 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4893 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4894 				      S2IO_BIT_RESET);
4895 
4896 		sp->m_cast_flg = 1;
4897 		sp->all_multi_pos = config->max_mc_addr - 1;
4898 	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4899 		/*  Disable all Multicast addresses */
4900 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4901 		       &bar0->rmac_addr_data0_mem);
4902 		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4903 		       &bar0->rmac_addr_data1_mem);
4904 		val64 = RMAC_ADDR_CMD_MEM_WE |
4905 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4906 			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4907 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4908 		/* Wait till command completes */
4909 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4910 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4911 				      S2IO_BIT_RESET);
4912 
4913 		sp->m_cast_flg = 0;
4914 		sp->all_multi_pos = 0;
4915 	}
4916 
4917 	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4918 		/*  Put the NIC into promiscuous mode */
4919 		add = &bar0->mac_cfg;
4920 		val64 = readq(&bar0->mac_cfg);
4921 		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4922 
4923 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4924 		writel((u32)val64, add);
4925 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4926 		writel((u32) (val64 >> 32), (add + 4));
4927 
4928 		if (vlan_tag_strip != 1) {
4929 			val64 = readq(&bar0->rx_pa_cfg);
4930 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4931 			writeq(val64, &bar0->rx_pa_cfg);
4932 			sp->vlan_strip_flag = 0;
4933 		}
4934 
4935 		val64 = readq(&bar0->mac_cfg);
4936 		sp->promisc_flg = 1;
4937 		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4938 			  dev->name);
4939 	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4940 		/*  Remove the NIC from promiscuous mode */
4941 		add = &bar0->mac_cfg;
4942 		val64 = readq(&bar0->mac_cfg);
4943 		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4944 
4945 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4946 		writel((u32)val64, add);
4947 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4948 		writel((u32) (val64 >> 32), (add + 4));
4949 
4950 		if (vlan_tag_strip != 0) {
4951 			val64 = readq(&bar0->rx_pa_cfg);
4952 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4953 			writeq(val64, &bar0->rx_pa_cfg);
4954 			sp->vlan_strip_flag = 1;
4955 		}
4956 
4957 		val64 = readq(&bar0->mac_cfg);
4958 		sp->promisc_flg = 0;
4959 		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4960 	}
4961 
4962 	/*  Update individual M_CAST address list */
4963 	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4964 		if (netdev_mc_count(dev) >
4965 		    (config->max_mc_addr - config->max_mac_addr)) {
4966 			DBG_PRINT(ERR_DBG,
4967 				  "%s: No more Rx filters can be added - "
4968 				  "please enable ALL_MULTI instead\n",
4969 				  dev->name);
4970 			return;
4971 		}
4972 
4973 		prev_cnt = sp->mc_addr_count;
4974 		sp->mc_addr_count = netdev_mc_count(dev);
4975 
4976 		/* Clear out the previous list of Mc in the H/W. */
4977 		for (i = 0; i < prev_cnt; i++) {
4978 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4979 			       &bar0->rmac_addr_data0_mem);
4980 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4981 			       &bar0->rmac_addr_data1_mem);
4982 			val64 = RMAC_ADDR_CMD_MEM_WE |
4983 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4984 				RMAC_ADDR_CMD_MEM_OFFSET
4985 				(config->mc_start_offset + i);
4986 			writeq(val64, &bar0->rmac_addr_cmd_mem);
4987 
4988 			/* Wait for command completes */
4989 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4990 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4991 						  S2IO_BIT_RESET)) {
4992 				DBG_PRINT(ERR_DBG,
4993 					  "%s: Adding Multicasts failed\n",
4994 					  dev->name);
4995 				return;
4996 			}
4997 		}
4998 
4999 		/* Create the new Rx filter list and update the same in H/W. */
5000 		i = 0;
5001 		netdev_for_each_mc_addr(ha, dev) {
5002 			mac_addr = 0;
5003 			for (j = 0; j < ETH_ALEN; j++) {
5004 				mac_addr |= ha->addr[j];
5005 				mac_addr <<= 8;
5006 			}
5007 			mac_addr >>= 8;
5008 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5009 			       &bar0->rmac_addr_data0_mem);
5010 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5011 			       &bar0->rmac_addr_data1_mem);
5012 			val64 = RMAC_ADDR_CMD_MEM_WE |
5013 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5014 				RMAC_ADDR_CMD_MEM_OFFSET
5015 				(i + config->mc_start_offset);
5016 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5017 
5018 			/* Wait for command completes */
5019 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5020 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5021 						  S2IO_BIT_RESET)) {
5022 				DBG_PRINT(ERR_DBG,
5023 					  "%s: Adding Multicasts failed\n",
5024 					  dev->name);
5025 				return;
5026 			}
5027 			i++;
5028 		}
5029 	}
5030 }
5031 
5032 /* read from CAM unicast & multicast addresses and store it in
5033  * def_mac_addr structure
5034  */
5035 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5036 {
5037 	int offset;
5038 	u64 mac_addr = 0x0;
5039 	struct config_param *config = &sp->config;
5040 
5041 	/* store unicast & multicast mac addresses */
5042 	for (offset = 0; offset < config->max_mc_addr; offset++) {
5043 		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5044 		/* if read fails disable the entry */
5045 		if (mac_addr == FAILURE)
5046 			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5047 		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5048 	}
5049 }
5050 
5051 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5052 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5053 {
5054 	int offset;
5055 	struct config_param *config = &sp->config;
5056 	/* restore unicast mac address */
5057 	for (offset = 0; offset < config->max_mac_addr; offset++)
5058 		do_s2io_prog_unicast(sp->dev,
5059 				     sp->def_mac_addr[offset].mac_addr);
5060 
5061 	/* restore multicast mac address */
5062 	for (offset = config->mc_start_offset;
5063 	     offset < config->max_mc_addr; offset++)
5064 		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5065 }
5066 
5067 /* add a multicast MAC address to CAM */
5068 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5069 {
5070 	int i;
5071 	u64 mac_addr = 0;
5072 	struct config_param *config = &sp->config;
5073 
5074 	for (i = 0; i < ETH_ALEN; i++) {
5075 		mac_addr <<= 8;
5076 		mac_addr |= addr[i];
5077 	}
5078 	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5079 		return SUCCESS;
5080 
5081 	/* check if the multicast mac already preset in CAM */
5082 	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5083 		u64 tmp64;
5084 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5085 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5086 			break;
5087 
5088 		if (tmp64 == mac_addr)
5089 			return SUCCESS;
5090 	}
5091 	if (i == config->max_mc_addr) {
5092 		DBG_PRINT(ERR_DBG,
5093 			  "CAM full no space left for multicast MAC\n");
5094 		return FAILURE;
5095 	}
5096 	/* Update the internal structure with this new mac address */
5097 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5098 
5099 	return do_s2io_add_mac(sp, mac_addr, i);
5100 }
5101 
5102 /* add MAC address to CAM */
5103 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5104 {
5105 	u64 val64;
5106 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5107 
5108 	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5109 	       &bar0->rmac_addr_data0_mem);
5110 
5111 	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5112 		RMAC_ADDR_CMD_MEM_OFFSET(off);
5113 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5114 
5115 	/* Wait till command completes */
5116 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5117 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5118 				  S2IO_BIT_RESET)) {
5119 		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5120 		return FAILURE;
5121 	}
5122 	return SUCCESS;
5123 }
5124 /* deletes a specified unicast/multicast mac entry from CAM */
5125 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5126 {
5127 	int offset;
5128 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5129 	struct config_param *config = &sp->config;
5130 
5131 	for (offset = 1;
5132 	     offset < config->max_mc_addr; offset++) {
5133 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5134 		if (tmp64 == addr) {
5135 			/* disable the entry by writing  0xffffffffffffULL */
5136 			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5137 				return FAILURE;
5138 			/* store the new mac list from CAM */
5139 			do_s2io_store_unicast_mc(sp);
5140 			return SUCCESS;
5141 		}
5142 	}
5143 	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5144 		  (unsigned long long)addr);
5145 	return FAILURE;
5146 }
5147 
5148 /* read mac entries from CAM */
5149 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5150 {
5151 	u64 tmp64, val64;
5152 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5153 
5154 	/* read mac addr */
5155 	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5156 		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5157 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5158 
5159 	/* Wait till command completes */
5160 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5161 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5162 				  S2IO_BIT_RESET)) {
5163 		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5164 		return FAILURE;
5165 	}
5166 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5167 
5168 	return tmp64 >> 16;
5169 }
5170 
5171 /**
5172  * s2io_set_mac_addr - driver entry point
5173  */
5174 
5175 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5176 {
5177 	struct sockaddr *addr = p;
5178 
5179 	if (!is_valid_ether_addr(addr->sa_data))
5180 		return -EADDRNOTAVAIL;
5181 
5182 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5183 
5184 	/* store the MAC address in CAM */
5185 	return do_s2io_prog_unicast(dev, dev->dev_addr);
5186 }
5187 /**
5188  *  do_s2io_prog_unicast - Programs the Xframe mac address
5189  *  @dev : pointer to the device structure.
5190  *  @addr: a uchar pointer to the new mac address which is to be set.
5191  *  Description : This procedure will program the Xframe to receive
5192  *  frames with new Mac Address
5193  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5194  *  as defined in errno.h file on failure.
5195  */
5196 
5197 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5198 {
5199 	struct s2io_nic *sp = netdev_priv(dev);
5200 	register u64 mac_addr = 0, perm_addr = 0;
5201 	int i;
5202 	u64 tmp64;
5203 	struct config_param *config = &sp->config;
5204 
5205 	/*
5206 	 * Set the new MAC address as the new unicast filter and reflect this
5207 	 * change on the device address registered with the OS. It will be
5208 	 * at offset 0.
5209 	 */
5210 	for (i = 0; i < ETH_ALEN; i++) {
5211 		mac_addr <<= 8;
5212 		mac_addr |= addr[i];
5213 		perm_addr <<= 8;
5214 		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5215 	}
5216 
5217 	/* check if the dev_addr is different than perm_addr */
5218 	if (mac_addr == perm_addr)
5219 		return SUCCESS;
5220 
5221 	/* check if the mac already preset in CAM */
5222 	for (i = 1; i < config->max_mac_addr; i++) {
5223 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5224 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5225 			break;
5226 
5227 		if (tmp64 == mac_addr) {
5228 			DBG_PRINT(INFO_DBG,
5229 				  "MAC addr:0x%llx already present in CAM\n",
5230 				  (unsigned long long)mac_addr);
5231 			return SUCCESS;
5232 		}
5233 	}
5234 	if (i == config->max_mac_addr) {
5235 		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5236 		return FAILURE;
5237 	}
5238 	/* Update the internal structure with this new mac address */
5239 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5240 
5241 	return do_s2io_add_mac(sp, mac_addr, i);
5242 }
5243 
5244 /**
5245  * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5246  * @sp : private member of the device structure, which is a pointer to the
5247  * s2io_nic structure.
5248  * @cmd: pointer to the structure with parameters given by ethtool to set
5249  * link information.
5250  * Description:
5251  * The function sets different link parameters provided by the user onto
5252  * the NIC.
5253  * Return value:
5254  * 0 on success.
5255  */
5256 
5257 static int
5258 s2io_ethtool_set_link_ksettings(struct net_device *dev,
5259 				const struct ethtool_link_ksettings *cmd)
5260 {
5261 	struct s2io_nic *sp = netdev_priv(dev);
5262 	if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5263 	    (cmd->base.speed != SPEED_10000) ||
5264 	    (cmd->base.duplex != DUPLEX_FULL))
5265 		return -EINVAL;
5266 	else {
5267 		s2io_close(sp->dev);
5268 		s2io_open(sp->dev);
5269 	}
5270 
5271 	return 0;
5272 }
5273 
5274 /**
5275  * s2io_ethtol_get_link_ksettings - Return link specific information.
5276  * @sp : private member of the device structure, pointer to the
5277  *      s2io_nic structure.
5278  * @cmd : pointer to the structure with parameters given by ethtool
5279  * to return link information.
5280  * Description:
5281  * Returns link specific information like speed, duplex etc.. to ethtool.
5282  * Return value :
5283  * return 0 on success.
5284  */
5285 
5286 static int
5287 s2io_ethtool_get_link_ksettings(struct net_device *dev,
5288 				struct ethtool_link_ksettings *cmd)
5289 {
5290 	struct s2io_nic *sp = netdev_priv(dev);
5291 
5292 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
5293 	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5294 	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5295 
5296 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5297 	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5298 	ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5299 
5300 	cmd->base.port = PORT_FIBRE;
5301 
5302 	if (netif_carrier_ok(sp->dev)) {
5303 		cmd->base.speed = SPEED_10000;
5304 		cmd->base.duplex = DUPLEX_FULL;
5305 	} else {
5306 		cmd->base.speed = SPEED_UNKNOWN;
5307 		cmd->base.duplex = DUPLEX_UNKNOWN;
5308 	}
5309 
5310 	cmd->base.autoneg = AUTONEG_DISABLE;
5311 	return 0;
5312 }
5313 
5314 /**
5315  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5316  * @sp : private member of the device structure, which is a pointer to the
5317  * s2io_nic structure.
5318  * @info : pointer to the structure with parameters given by ethtool to
5319  * return driver information.
5320  * Description:
5321  * Returns driver specefic information like name, version etc.. to ethtool.
5322  * Return value:
5323  *  void
5324  */
5325 
5326 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5327 				  struct ethtool_drvinfo *info)
5328 {
5329 	struct s2io_nic *sp = netdev_priv(dev);
5330 
5331 	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5332 	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5333 	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5334 }
5335 
5336 /**
5337  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5338  *  @sp: private member of the device structure, which is a pointer to the
5339  *  s2io_nic structure.
5340  *  @regs : pointer to the structure with parameters given by ethtool for
5341  *  dumping the registers.
5342  *  @reg_space: The input argument into which all the registers are dumped.
5343  *  Description:
5344  *  Dumps the entire register space of xFrame NIC into the user given
5345  *  buffer area.
5346  * Return value :
5347  * void .
5348  */
5349 
5350 static void s2io_ethtool_gregs(struct net_device *dev,
5351 			       struct ethtool_regs *regs, void *space)
5352 {
5353 	int i;
5354 	u64 reg;
5355 	u8 *reg_space = (u8 *)space;
5356 	struct s2io_nic *sp = netdev_priv(dev);
5357 
5358 	regs->len = XENA_REG_SPACE;
5359 	regs->version = sp->pdev->subsystem_device;
5360 
5361 	for (i = 0; i < regs->len; i += 8) {
5362 		reg = readq(sp->bar0 + i);
5363 		memcpy((reg_space + i), &reg, 8);
5364 	}
5365 }
5366 
5367 /*
5368  *  s2io_set_led - control NIC led
5369  */
5370 static void s2io_set_led(struct s2io_nic *sp, bool on)
5371 {
5372 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5373 	u16 subid = sp->pdev->subsystem_device;
5374 	u64 val64;
5375 
5376 	if ((sp->device_type == XFRAME_II_DEVICE) ||
5377 	    ((subid & 0xFF) >= 0x07)) {
5378 		val64 = readq(&bar0->gpio_control);
5379 		if (on)
5380 			val64 |= GPIO_CTRL_GPIO_0;
5381 		else
5382 			val64 &= ~GPIO_CTRL_GPIO_0;
5383 
5384 		writeq(val64, &bar0->gpio_control);
5385 	} else {
5386 		val64 = readq(&bar0->adapter_control);
5387 		if (on)
5388 			val64 |= ADAPTER_LED_ON;
5389 		else
5390 			val64 &= ~ADAPTER_LED_ON;
5391 
5392 		writeq(val64, &bar0->adapter_control);
5393 	}
5394 
5395 }
5396 
5397 /**
5398  * s2io_ethtool_set_led - To physically identify the nic on the system.
5399  * @dev : network device
5400  * @state: led setting
5401  *
5402  * Description: Used to physically identify the NIC on the system.
5403  * The Link LED will blink for a time specified by the user for
5404  * identification.
5405  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5406  * identification is possible only if it's link is up.
5407  */
5408 
5409 static int s2io_ethtool_set_led(struct net_device *dev,
5410 				enum ethtool_phys_id_state state)
5411 {
5412 	struct s2io_nic *sp = netdev_priv(dev);
5413 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5414 	u16 subid = sp->pdev->subsystem_device;
5415 
5416 	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5417 		u64 val64 = readq(&bar0->adapter_control);
5418 		if (!(val64 & ADAPTER_CNTL_EN)) {
5419 			pr_err("Adapter Link down, cannot blink LED\n");
5420 			return -EAGAIN;
5421 		}
5422 	}
5423 
5424 	switch (state) {
5425 	case ETHTOOL_ID_ACTIVE:
5426 		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5427 		return 1;	/* cycle on/off once per second */
5428 
5429 	case ETHTOOL_ID_ON:
5430 		s2io_set_led(sp, true);
5431 		break;
5432 
5433 	case ETHTOOL_ID_OFF:
5434 		s2io_set_led(sp, false);
5435 		break;
5436 
5437 	case ETHTOOL_ID_INACTIVE:
5438 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5439 			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5440 	}
5441 
5442 	return 0;
5443 }
5444 
5445 static void s2io_ethtool_gringparam(struct net_device *dev,
5446 				    struct ethtool_ringparam *ering)
5447 {
5448 	struct s2io_nic *sp = netdev_priv(dev);
5449 	int i, tx_desc_count = 0, rx_desc_count = 0;
5450 
5451 	if (sp->rxd_mode == RXD_MODE_1) {
5452 		ering->rx_max_pending = MAX_RX_DESC_1;
5453 		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5454 	} else {
5455 		ering->rx_max_pending = MAX_RX_DESC_2;
5456 		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5457 	}
5458 
5459 	ering->tx_max_pending = MAX_TX_DESC;
5460 
5461 	for (i = 0; i < sp->config.rx_ring_num; i++)
5462 		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5463 	ering->rx_pending = rx_desc_count;
5464 	ering->rx_jumbo_pending = rx_desc_count;
5465 
5466 	for (i = 0; i < sp->config.tx_fifo_num; i++)
5467 		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5468 	ering->tx_pending = tx_desc_count;
5469 	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5470 }
5471 
5472 /**
5473  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5474  * @sp : private member of the device structure, which is a pointer to the
5475  *	s2io_nic structure.
5476  * @ep : pointer to the structure with pause parameters given by ethtool.
5477  * Description:
5478  * Returns the Pause frame generation and reception capability of the NIC.
5479  * Return value:
5480  *  void
5481  */
5482 static void s2io_ethtool_getpause_data(struct net_device *dev,
5483 				       struct ethtool_pauseparam *ep)
5484 {
5485 	u64 val64;
5486 	struct s2io_nic *sp = netdev_priv(dev);
5487 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5488 
5489 	val64 = readq(&bar0->rmac_pause_cfg);
5490 	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5491 		ep->tx_pause = true;
5492 	if (val64 & RMAC_PAUSE_RX_ENABLE)
5493 		ep->rx_pause = true;
5494 	ep->autoneg = false;
5495 }
5496 
5497 /**
5498  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5499  * @sp : private member of the device structure, which is a pointer to the
5500  *      s2io_nic structure.
5501  * @ep : pointer to the structure with pause parameters given by ethtool.
5502  * Description:
5503  * It can be used to set or reset Pause frame generation or reception
5504  * support of the NIC.
5505  * Return value:
5506  * int, returns 0 on Success
5507  */
5508 
5509 static int s2io_ethtool_setpause_data(struct net_device *dev,
5510 				      struct ethtool_pauseparam *ep)
5511 {
5512 	u64 val64;
5513 	struct s2io_nic *sp = netdev_priv(dev);
5514 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5515 
5516 	val64 = readq(&bar0->rmac_pause_cfg);
5517 	if (ep->tx_pause)
5518 		val64 |= RMAC_PAUSE_GEN_ENABLE;
5519 	else
5520 		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5521 	if (ep->rx_pause)
5522 		val64 |= RMAC_PAUSE_RX_ENABLE;
5523 	else
5524 		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5525 	writeq(val64, &bar0->rmac_pause_cfg);
5526 	return 0;
5527 }
5528 
5529 /**
5530  * read_eeprom - reads 4 bytes of data from user given offset.
5531  * @sp : private member of the device structure, which is a pointer to the
5532  *      s2io_nic structure.
5533  * @off : offset at which the data must be written
5534  * @data : Its an output parameter where the data read at the given
5535  *	offset is stored.
5536  * Description:
5537  * Will read 4 bytes of data from the user given offset and return the
5538  * read data.
5539  * NOTE: Will allow to read only part of the EEPROM visible through the
5540  *   I2C bus.
5541  * Return value:
5542  *  -1 on failure and 0 on success.
5543  */
5544 
5545 #define S2IO_DEV_ID		5
5546 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5547 {
5548 	int ret = -1;
5549 	u32 exit_cnt = 0;
5550 	u64 val64;
5551 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5552 
5553 	if (sp->device_type == XFRAME_I_DEVICE) {
5554 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5555 			I2C_CONTROL_ADDR(off) |
5556 			I2C_CONTROL_BYTE_CNT(0x3) |
5557 			I2C_CONTROL_READ |
5558 			I2C_CONTROL_CNTL_START;
5559 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5560 
5561 		while (exit_cnt < 5) {
5562 			val64 = readq(&bar0->i2c_control);
5563 			if (I2C_CONTROL_CNTL_END(val64)) {
5564 				*data = I2C_CONTROL_GET_DATA(val64);
5565 				ret = 0;
5566 				break;
5567 			}
5568 			msleep(50);
5569 			exit_cnt++;
5570 		}
5571 	}
5572 
5573 	if (sp->device_type == XFRAME_II_DEVICE) {
5574 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5575 			SPI_CONTROL_BYTECNT(0x3) |
5576 			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5577 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5578 		val64 |= SPI_CONTROL_REQ;
5579 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5580 		while (exit_cnt < 5) {
5581 			val64 = readq(&bar0->spi_control);
5582 			if (val64 & SPI_CONTROL_NACK) {
5583 				ret = 1;
5584 				break;
5585 			} else if (val64 & SPI_CONTROL_DONE) {
5586 				*data = readq(&bar0->spi_data);
5587 				*data &= 0xffffff;
5588 				ret = 0;
5589 				break;
5590 			}
5591 			msleep(50);
5592 			exit_cnt++;
5593 		}
5594 	}
5595 	return ret;
5596 }
5597 
5598 /**
5599  *  write_eeprom - actually writes the relevant part of the data value.
5600  *  @sp : private member of the device structure, which is a pointer to the
5601  *       s2io_nic structure.
5602  *  @off : offset at which the data must be written
5603  *  @data : The data that is to be written
5604  *  @cnt : Number of bytes of the data that are actually to be written into
5605  *  the Eeprom. (max of 3)
5606  * Description:
5607  *  Actually writes the relevant part of the data value into the Eeprom
5608  *  through the I2C bus.
5609  * Return value:
5610  *  0 on success, -1 on failure.
5611  */
5612 
5613 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5614 {
5615 	int exit_cnt = 0, ret = -1;
5616 	u64 val64;
5617 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5618 
5619 	if (sp->device_type == XFRAME_I_DEVICE) {
5620 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5621 			I2C_CONTROL_ADDR(off) |
5622 			I2C_CONTROL_BYTE_CNT(cnt) |
5623 			I2C_CONTROL_SET_DATA((u32)data) |
5624 			I2C_CONTROL_CNTL_START;
5625 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5626 
5627 		while (exit_cnt < 5) {
5628 			val64 = readq(&bar0->i2c_control);
5629 			if (I2C_CONTROL_CNTL_END(val64)) {
5630 				if (!(val64 & I2C_CONTROL_NACK))
5631 					ret = 0;
5632 				break;
5633 			}
5634 			msleep(50);
5635 			exit_cnt++;
5636 		}
5637 	}
5638 
5639 	if (sp->device_type == XFRAME_II_DEVICE) {
5640 		int write_cnt = (cnt == 8) ? 0 : cnt;
5641 		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5642 
5643 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5644 			SPI_CONTROL_BYTECNT(write_cnt) |
5645 			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5646 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5647 		val64 |= SPI_CONTROL_REQ;
5648 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5649 		while (exit_cnt < 5) {
5650 			val64 = readq(&bar0->spi_control);
5651 			if (val64 & SPI_CONTROL_NACK) {
5652 				ret = 1;
5653 				break;
5654 			} else if (val64 & SPI_CONTROL_DONE) {
5655 				ret = 0;
5656 				break;
5657 			}
5658 			msleep(50);
5659 			exit_cnt++;
5660 		}
5661 	}
5662 	return ret;
5663 }
5664 static void s2io_vpd_read(struct s2io_nic *nic)
5665 {
5666 	u8 *vpd_data;
5667 	u8 data;
5668 	int i = 0, cnt, len, fail = 0;
5669 	int vpd_addr = 0x80;
5670 	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5671 
5672 	if (nic->device_type == XFRAME_II_DEVICE) {
5673 		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5674 		vpd_addr = 0x80;
5675 	} else {
5676 		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5677 		vpd_addr = 0x50;
5678 	}
5679 	strcpy(nic->serial_num, "NOT AVAILABLE");
5680 
5681 	vpd_data = kmalloc(256, GFP_KERNEL);
5682 	if (!vpd_data) {
5683 		swstats->mem_alloc_fail_cnt++;
5684 		return;
5685 	}
5686 	swstats->mem_allocated += 256;
5687 
5688 	for (i = 0; i < 256; i += 4) {
5689 		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5690 		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5691 		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5692 		for (cnt = 0; cnt < 5; cnt++) {
5693 			msleep(2);
5694 			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5695 			if (data == 0x80)
5696 				break;
5697 		}
5698 		if (cnt >= 5) {
5699 			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5700 			fail = 1;
5701 			break;
5702 		}
5703 		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5704 				      (u32 *)&vpd_data[i]);
5705 	}
5706 
5707 	if (!fail) {
5708 		/* read serial number of adapter */
5709 		for (cnt = 0; cnt < 252; cnt++) {
5710 			if ((vpd_data[cnt] == 'S') &&
5711 			    (vpd_data[cnt+1] == 'N')) {
5712 				len = vpd_data[cnt+2];
5713 				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5714 					memcpy(nic->serial_num,
5715 					       &vpd_data[cnt + 3],
5716 					       len);
5717 					memset(nic->serial_num+len,
5718 					       0,
5719 					       VPD_STRING_LEN-len);
5720 					break;
5721 				}
5722 			}
5723 		}
5724 	}
5725 
5726 	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5727 		len = vpd_data[1];
5728 		memcpy(nic->product_name, &vpd_data[3], len);
5729 		nic->product_name[len] = 0;
5730 	}
5731 	kfree(vpd_data);
5732 	swstats->mem_freed += 256;
5733 }
5734 
5735 /**
5736  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5737  *  @sp : private member of the device structure, which is a pointer to the
5738  *  s2io_nic structure.
5739  *  @eeprom : pointer to the user level structure provided by ethtool,
5740  *  containing all relevant information.
5741  *  @data_buf : user defined value to be written into Eeprom.
5742  *  Description: Reads the values stored in the Eeprom at given offset
5743  *  for a given length. Stores these values int the input argument data
5744  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5745  *  Return value:
5746  *  int  0 on success
5747  */
5748 
5749 static int s2io_ethtool_geeprom(struct net_device *dev,
5750 				struct ethtool_eeprom *eeprom, u8 * data_buf)
5751 {
5752 	u32 i, valid;
5753 	u64 data;
5754 	struct s2io_nic *sp = netdev_priv(dev);
5755 
5756 	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5757 
5758 	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5759 		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5760 
5761 	for (i = 0; i < eeprom->len; i += 4) {
5762 		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5763 			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5764 			return -EFAULT;
5765 		}
5766 		valid = INV(data);
5767 		memcpy((data_buf + i), &valid, 4);
5768 	}
5769 	return 0;
5770 }
5771 
5772 /**
5773  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5774  *  @sp : private member of the device structure, which is a pointer to the
5775  *  s2io_nic structure.
5776  *  @eeprom : pointer to the user level structure provided by ethtool,
5777  *  containing all relevant information.
5778  *  @data_buf ; user defined value to be written into Eeprom.
5779  *  Description:
5780  *  Tries to write the user provided value in the Eeprom, at the offset
5781  *  given by the user.
5782  *  Return value:
5783  *  0 on success, -EFAULT on failure.
5784  */
5785 
5786 static int s2io_ethtool_seeprom(struct net_device *dev,
5787 				struct ethtool_eeprom *eeprom,
5788 				u8 *data_buf)
5789 {
5790 	int len = eeprom->len, cnt = 0;
5791 	u64 valid = 0, data;
5792 	struct s2io_nic *sp = netdev_priv(dev);
5793 
5794 	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5795 		DBG_PRINT(ERR_DBG,
5796 			  "ETHTOOL_WRITE_EEPROM Err: "
5797 			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5798 			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5799 			  eeprom->magic);
5800 		return -EFAULT;
5801 	}
5802 
5803 	while (len) {
5804 		data = (u32)data_buf[cnt] & 0x000000FF;
5805 		if (data)
5806 			valid = (u32)(data << 24);
5807 		else
5808 			valid = data;
5809 
5810 		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5811 			DBG_PRINT(ERR_DBG,
5812 				  "ETHTOOL_WRITE_EEPROM Err: "
5813 				  "Cannot write into the specified offset\n");
5814 			return -EFAULT;
5815 		}
5816 		cnt++;
5817 		len--;
5818 	}
5819 
5820 	return 0;
5821 }
5822 
5823 /**
5824  * s2io_register_test - reads and writes into all clock domains.
5825  * @sp : private member of the device structure, which is a pointer to the
5826  * s2io_nic structure.
5827  * @data : variable that returns the result of each of the test conducted b
5828  * by the driver.
5829  * Description:
5830  * Read and write into all clock domains. The NIC has 3 clock domains,
5831  * see that registers in all the three regions are accessible.
5832  * Return value:
5833  * 0 on success.
5834  */
5835 
5836 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5837 {
5838 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5839 	u64 val64 = 0, exp_val;
5840 	int fail = 0;
5841 
5842 	val64 = readq(&bar0->pif_rd_swapper_fb);
5843 	if (val64 != 0x123456789abcdefULL) {
5844 		fail = 1;
5845 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5846 	}
5847 
5848 	val64 = readq(&bar0->rmac_pause_cfg);
5849 	if (val64 != 0xc000ffff00000000ULL) {
5850 		fail = 1;
5851 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5852 	}
5853 
5854 	val64 = readq(&bar0->rx_queue_cfg);
5855 	if (sp->device_type == XFRAME_II_DEVICE)
5856 		exp_val = 0x0404040404040404ULL;
5857 	else
5858 		exp_val = 0x0808080808080808ULL;
5859 	if (val64 != exp_val) {
5860 		fail = 1;
5861 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5862 	}
5863 
5864 	val64 = readq(&bar0->xgxs_efifo_cfg);
5865 	if (val64 != 0x000000001923141EULL) {
5866 		fail = 1;
5867 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5868 	}
5869 
5870 	val64 = 0x5A5A5A5A5A5A5A5AULL;
5871 	writeq(val64, &bar0->xmsi_data);
5872 	val64 = readq(&bar0->xmsi_data);
5873 	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5874 		fail = 1;
5875 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5876 	}
5877 
5878 	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5879 	writeq(val64, &bar0->xmsi_data);
5880 	val64 = readq(&bar0->xmsi_data);
5881 	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5882 		fail = 1;
5883 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5884 	}
5885 
5886 	*data = fail;
5887 	return fail;
5888 }
5889 
5890 /**
5891  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5892  * @sp : private member of the device structure, which is a pointer to the
5893  * s2io_nic structure.
5894  * @data:variable that returns the result of each of the test conducted by
5895  * the driver.
5896  * Description:
5897  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5898  * register.
5899  * Return value:
5900  * 0 on success.
5901  */
5902 
5903 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5904 {
5905 	int fail = 0;
5906 	u64 ret_data, org_4F0, org_7F0;
5907 	u8 saved_4F0 = 0, saved_7F0 = 0;
5908 	struct net_device *dev = sp->dev;
5909 
5910 	/* Test Write Error at offset 0 */
5911 	/* Note that SPI interface allows write access to all areas
5912 	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5913 	 */
5914 	if (sp->device_type == XFRAME_I_DEVICE)
5915 		if (!write_eeprom(sp, 0, 0, 3))
5916 			fail = 1;
5917 
5918 	/* Save current values at offsets 0x4F0 and 0x7F0 */
5919 	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5920 		saved_4F0 = 1;
5921 	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5922 		saved_7F0 = 1;
5923 
5924 	/* Test Write at offset 4f0 */
5925 	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5926 		fail = 1;
5927 	if (read_eeprom(sp, 0x4F0, &ret_data))
5928 		fail = 1;
5929 
5930 	if (ret_data != 0x012345) {
5931 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5932 			  "Data written %llx Data read %llx\n",
5933 			  dev->name, (unsigned long long)0x12345,
5934 			  (unsigned long long)ret_data);
5935 		fail = 1;
5936 	}
5937 
5938 	/* Reset the EEPROM data go FFFF */
5939 	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5940 
5941 	/* Test Write Request Error at offset 0x7c */
5942 	if (sp->device_type == XFRAME_I_DEVICE)
5943 		if (!write_eeprom(sp, 0x07C, 0, 3))
5944 			fail = 1;
5945 
5946 	/* Test Write Request at offset 0x7f0 */
5947 	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5948 		fail = 1;
5949 	if (read_eeprom(sp, 0x7F0, &ret_data))
5950 		fail = 1;
5951 
5952 	if (ret_data != 0x012345) {
5953 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5954 			  "Data written %llx Data read %llx\n",
5955 			  dev->name, (unsigned long long)0x12345,
5956 			  (unsigned long long)ret_data);
5957 		fail = 1;
5958 	}
5959 
5960 	/* Reset the EEPROM data go FFFF */
5961 	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5962 
5963 	if (sp->device_type == XFRAME_I_DEVICE) {
5964 		/* Test Write Error at offset 0x80 */
5965 		if (!write_eeprom(sp, 0x080, 0, 3))
5966 			fail = 1;
5967 
5968 		/* Test Write Error at offset 0xfc */
5969 		if (!write_eeprom(sp, 0x0FC, 0, 3))
5970 			fail = 1;
5971 
5972 		/* Test Write Error at offset 0x100 */
5973 		if (!write_eeprom(sp, 0x100, 0, 3))
5974 			fail = 1;
5975 
5976 		/* Test Write Error at offset 4ec */
5977 		if (!write_eeprom(sp, 0x4EC, 0, 3))
5978 			fail = 1;
5979 	}
5980 
5981 	/* Restore values at offsets 0x4F0 and 0x7F0 */
5982 	if (saved_4F0)
5983 		write_eeprom(sp, 0x4F0, org_4F0, 3);
5984 	if (saved_7F0)
5985 		write_eeprom(sp, 0x7F0, org_7F0, 3);
5986 
5987 	*data = fail;
5988 	return fail;
5989 }
5990 
5991 /**
5992  * s2io_bist_test - invokes the MemBist test of the card .
5993  * @sp : private member of the device structure, which is a pointer to the
5994  * s2io_nic structure.
5995  * @data:variable that returns the result of each of the test conducted by
5996  * the driver.
5997  * Description:
5998  * This invokes the MemBist test of the card. We give around
5999  * 2 secs time for the Test to complete. If it's still not complete
6000  * within this peiod, we consider that the test failed.
6001  * Return value:
6002  * 0 on success and -1 on failure.
6003  */
6004 
6005 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6006 {
6007 	u8 bist = 0;
6008 	int cnt = 0, ret = -1;
6009 
6010 	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6011 	bist |= PCI_BIST_START;
6012 	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6013 
6014 	while (cnt < 20) {
6015 		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6016 		if (!(bist & PCI_BIST_START)) {
6017 			*data = (bist & PCI_BIST_CODE_MASK);
6018 			ret = 0;
6019 			break;
6020 		}
6021 		msleep(100);
6022 		cnt++;
6023 	}
6024 
6025 	return ret;
6026 }
6027 
6028 /**
6029  * s2io_link_test - verifies the link state of the nic
6030  * @sp ; private member of the device structure, which is a pointer to the
6031  * s2io_nic structure.
6032  * @data: variable that returns the result of each of the test conducted by
6033  * the driver.
6034  * Description:
6035  * The function verifies the link state of the NIC and updates the input
6036  * argument 'data' appropriately.
6037  * Return value:
6038  * 0 on success.
6039  */
6040 
6041 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6042 {
6043 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6044 	u64 val64;
6045 
6046 	val64 = readq(&bar0->adapter_status);
6047 	if (!(LINK_IS_UP(val64)))
6048 		*data = 1;
6049 	else
6050 		*data = 0;
6051 
6052 	return *data;
6053 }
6054 
6055 /**
6056  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6057  * @sp: private member of the device structure, which is a pointer to the
6058  * s2io_nic structure.
6059  * @data: variable that returns the result of each of the test
6060  * conducted by the driver.
6061  * Description:
6062  *  This is one of the offline test that tests the read and write
6063  *  access to the RldRam chip on the NIC.
6064  * Return value:
6065  *  0 on success.
6066  */
6067 
6068 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6069 {
6070 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6071 	u64 val64;
6072 	int cnt, iteration = 0, test_fail = 0;
6073 
6074 	val64 = readq(&bar0->adapter_control);
6075 	val64 &= ~ADAPTER_ECC_EN;
6076 	writeq(val64, &bar0->adapter_control);
6077 
6078 	val64 = readq(&bar0->mc_rldram_test_ctrl);
6079 	val64 |= MC_RLDRAM_TEST_MODE;
6080 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6081 
6082 	val64 = readq(&bar0->mc_rldram_mrs);
6083 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6084 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6085 
6086 	val64 |= MC_RLDRAM_MRS_ENABLE;
6087 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6088 
6089 	while (iteration < 2) {
6090 		val64 = 0x55555555aaaa0000ULL;
6091 		if (iteration == 1)
6092 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6093 		writeq(val64, &bar0->mc_rldram_test_d0);
6094 
6095 		val64 = 0xaaaa5a5555550000ULL;
6096 		if (iteration == 1)
6097 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6098 		writeq(val64, &bar0->mc_rldram_test_d1);
6099 
6100 		val64 = 0x55aaaaaaaa5a0000ULL;
6101 		if (iteration == 1)
6102 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6103 		writeq(val64, &bar0->mc_rldram_test_d2);
6104 
6105 		val64 = (u64) (0x0000003ffffe0100ULL);
6106 		writeq(val64, &bar0->mc_rldram_test_add);
6107 
6108 		val64 = MC_RLDRAM_TEST_MODE |
6109 			MC_RLDRAM_TEST_WRITE |
6110 			MC_RLDRAM_TEST_GO;
6111 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6112 
6113 		for (cnt = 0; cnt < 5; cnt++) {
6114 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6115 			if (val64 & MC_RLDRAM_TEST_DONE)
6116 				break;
6117 			msleep(200);
6118 		}
6119 
6120 		if (cnt == 5)
6121 			break;
6122 
6123 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6124 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6125 
6126 		for (cnt = 0; cnt < 5; cnt++) {
6127 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6128 			if (val64 & MC_RLDRAM_TEST_DONE)
6129 				break;
6130 			msleep(500);
6131 		}
6132 
6133 		if (cnt == 5)
6134 			break;
6135 
6136 		val64 = readq(&bar0->mc_rldram_test_ctrl);
6137 		if (!(val64 & MC_RLDRAM_TEST_PASS))
6138 			test_fail = 1;
6139 
6140 		iteration++;
6141 	}
6142 
6143 	*data = test_fail;
6144 
6145 	/* Bring the adapter out of test mode */
6146 	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6147 
6148 	return test_fail;
6149 }
6150 
6151 /**
6152  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6153  *  @sp : private member of the device structure, which is a pointer to the
6154  *  s2io_nic structure.
6155  *  @ethtest : pointer to a ethtool command specific structure that will be
6156  *  returned to the user.
6157  *  @data : variable that returns the result of each of the test
6158  * conducted by the driver.
6159  * Description:
6160  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6161  *  the health of the card.
6162  * Return value:
6163  *  void
6164  */
6165 
6166 static void s2io_ethtool_test(struct net_device *dev,
6167 			      struct ethtool_test *ethtest,
6168 			      uint64_t *data)
6169 {
6170 	struct s2io_nic *sp = netdev_priv(dev);
6171 	int orig_state = netif_running(sp->dev);
6172 
6173 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6174 		/* Offline Tests. */
6175 		if (orig_state)
6176 			s2io_close(sp->dev);
6177 
6178 		if (s2io_register_test(sp, &data[0]))
6179 			ethtest->flags |= ETH_TEST_FL_FAILED;
6180 
6181 		s2io_reset(sp);
6182 
6183 		if (s2io_rldram_test(sp, &data[3]))
6184 			ethtest->flags |= ETH_TEST_FL_FAILED;
6185 
6186 		s2io_reset(sp);
6187 
6188 		if (s2io_eeprom_test(sp, &data[1]))
6189 			ethtest->flags |= ETH_TEST_FL_FAILED;
6190 
6191 		if (s2io_bist_test(sp, &data[4]))
6192 			ethtest->flags |= ETH_TEST_FL_FAILED;
6193 
6194 		if (orig_state)
6195 			s2io_open(sp->dev);
6196 
6197 		data[2] = 0;
6198 	} else {
6199 		/* Online Tests. */
6200 		if (!orig_state) {
6201 			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6202 				  dev->name);
6203 			data[0] = -1;
6204 			data[1] = -1;
6205 			data[2] = -1;
6206 			data[3] = -1;
6207 			data[4] = -1;
6208 		}
6209 
6210 		if (s2io_link_test(sp, &data[2]))
6211 			ethtest->flags |= ETH_TEST_FL_FAILED;
6212 
6213 		data[0] = 0;
6214 		data[1] = 0;
6215 		data[3] = 0;
6216 		data[4] = 0;
6217 	}
6218 }
6219 
6220 static void s2io_get_ethtool_stats(struct net_device *dev,
6221 				   struct ethtool_stats *estats,
6222 				   u64 *tmp_stats)
6223 {
6224 	int i = 0, k;
6225 	struct s2io_nic *sp = netdev_priv(dev);
6226 	struct stat_block *stats = sp->mac_control.stats_info;
6227 	struct swStat *swstats = &stats->sw_stat;
6228 	struct xpakStat *xstats = &stats->xpak_stat;
6229 
6230 	s2io_updt_stats(sp);
6231 	tmp_stats[i++] =
6232 		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6233 		le32_to_cpu(stats->tmac_frms);
6234 	tmp_stats[i++] =
6235 		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6236 		le32_to_cpu(stats->tmac_data_octets);
6237 	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6238 	tmp_stats[i++] =
6239 		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6240 		le32_to_cpu(stats->tmac_mcst_frms);
6241 	tmp_stats[i++] =
6242 		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6243 		le32_to_cpu(stats->tmac_bcst_frms);
6244 	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6245 	tmp_stats[i++] =
6246 		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6247 		le32_to_cpu(stats->tmac_ttl_octets);
6248 	tmp_stats[i++] =
6249 		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6250 		le32_to_cpu(stats->tmac_ucst_frms);
6251 	tmp_stats[i++] =
6252 		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6253 		le32_to_cpu(stats->tmac_nucst_frms);
6254 	tmp_stats[i++] =
6255 		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6256 		le32_to_cpu(stats->tmac_any_err_frms);
6257 	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6258 	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6259 	tmp_stats[i++] =
6260 		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6261 		le32_to_cpu(stats->tmac_vld_ip);
6262 	tmp_stats[i++] =
6263 		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6264 		le32_to_cpu(stats->tmac_drop_ip);
6265 	tmp_stats[i++] =
6266 		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6267 		le32_to_cpu(stats->tmac_icmp);
6268 	tmp_stats[i++] =
6269 		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6270 		le32_to_cpu(stats->tmac_rst_tcp);
6271 	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6272 	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6273 		le32_to_cpu(stats->tmac_udp);
6274 	tmp_stats[i++] =
6275 		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6276 		le32_to_cpu(stats->rmac_vld_frms);
6277 	tmp_stats[i++] =
6278 		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6279 		le32_to_cpu(stats->rmac_data_octets);
6280 	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6281 	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6282 	tmp_stats[i++] =
6283 		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6284 		le32_to_cpu(stats->rmac_vld_mcst_frms);
6285 	tmp_stats[i++] =
6286 		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6287 		le32_to_cpu(stats->rmac_vld_bcst_frms);
6288 	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6289 	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6290 	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6291 	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6292 	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6293 	tmp_stats[i++] =
6294 		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6295 		le32_to_cpu(stats->rmac_ttl_octets);
6296 	tmp_stats[i++] =
6297 		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6298 		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6299 	tmp_stats[i++] =
6300 		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6301 		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6302 	tmp_stats[i++] =
6303 		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6304 		le32_to_cpu(stats->rmac_discarded_frms);
6305 	tmp_stats[i++] =
6306 		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6307 		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6308 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6309 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6310 	tmp_stats[i++] =
6311 		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6312 		le32_to_cpu(stats->rmac_usized_frms);
6313 	tmp_stats[i++] =
6314 		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6315 		le32_to_cpu(stats->rmac_osized_frms);
6316 	tmp_stats[i++] =
6317 		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6318 		le32_to_cpu(stats->rmac_frag_frms);
6319 	tmp_stats[i++] =
6320 		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6321 		le32_to_cpu(stats->rmac_jabber_frms);
6322 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6323 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6324 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6325 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6326 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6327 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6328 	tmp_stats[i++] =
6329 		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6330 		le32_to_cpu(stats->rmac_ip);
6331 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6332 	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6333 	tmp_stats[i++] =
6334 		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6335 		le32_to_cpu(stats->rmac_drop_ip);
6336 	tmp_stats[i++] =
6337 		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6338 		le32_to_cpu(stats->rmac_icmp);
6339 	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6340 	tmp_stats[i++] =
6341 		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6342 		le32_to_cpu(stats->rmac_udp);
6343 	tmp_stats[i++] =
6344 		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6345 		le32_to_cpu(stats->rmac_err_drp_udp);
6346 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6347 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6348 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6349 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6350 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6351 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6352 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6353 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6354 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6355 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6356 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6357 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6358 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6359 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6360 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6361 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6362 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6363 	tmp_stats[i++] =
6364 		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6365 		le32_to_cpu(stats->rmac_pause_cnt);
6366 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6367 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6368 	tmp_stats[i++] =
6369 		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6370 		le32_to_cpu(stats->rmac_accepted_ip);
6371 	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6372 	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6373 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6374 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6375 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6376 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6377 	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6378 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6379 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6380 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6381 	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6382 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6383 	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6384 	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6385 	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6386 	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6387 	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6388 	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6389 	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6390 
6391 	/* Enhanced statistics exist only for Hercules */
6392 	if (sp->device_type == XFRAME_II_DEVICE) {
6393 		tmp_stats[i++] =
6394 			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6395 		tmp_stats[i++] =
6396 			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6397 		tmp_stats[i++] =
6398 			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6399 		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6400 		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6401 		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6402 		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6403 		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6404 		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6405 		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6406 		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6407 		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6408 		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6409 		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6410 		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6411 		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6412 	}
6413 
6414 	tmp_stats[i++] = 0;
6415 	tmp_stats[i++] = swstats->single_ecc_errs;
6416 	tmp_stats[i++] = swstats->double_ecc_errs;
6417 	tmp_stats[i++] = swstats->parity_err_cnt;
6418 	tmp_stats[i++] = swstats->serious_err_cnt;
6419 	tmp_stats[i++] = swstats->soft_reset_cnt;
6420 	tmp_stats[i++] = swstats->fifo_full_cnt;
6421 	for (k = 0; k < MAX_RX_RINGS; k++)
6422 		tmp_stats[i++] = swstats->ring_full_cnt[k];
6423 	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6424 	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6425 	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6426 	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6427 	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6428 	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6429 	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6430 	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6431 	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6432 	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6433 	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6434 	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6435 	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6436 	tmp_stats[i++] = swstats->sending_both;
6437 	tmp_stats[i++] = swstats->outof_sequence_pkts;
6438 	tmp_stats[i++] = swstats->flush_max_pkts;
6439 	if (swstats->num_aggregations) {
6440 		u64 tmp = swstats->sum_avg_pkts_aggregated;
6441 		int count = 0;
6442 		/*
6443 		 * Since 64-bit divide does not work on all platforms,
6444 		 * do repeated subtraction.
6445 		 */
6446 		while (tmp >= swstats->num_aggregations) {
6447 			tmp -= swstats->num_aggregations;
6448 			count++;
6449 		}
6450 		tmp_stats[i++] = count;
6451 	} else
6452 		tmp_stats[i++] = 0;
6453 	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6454 	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6455 	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6456 	tmp_stats[i++] = swstats->mem_allocated;
6457 	tmp_stats[i++] = swstats->mem_freed;
6458 	tmp_stats[i++] = swstats->link_up_cnt;
6459 	tmp_stats[i++] = swstats->link_down_cnt;
6460 	tmp_stats[i++] = swstats->link_up_time;
6461 	tmp_stats[i++] = swstats->link_down_time;
6462 
6463 	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6464 	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6465 	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6466 	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6467 	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6468 
6469 	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6470 	tmp_stats[i++] = swstats->rx_abort_cnt;
6471 	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6472 	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6473 	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6474 	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6475 	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6476 	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6477 	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6478 	tmp_stats[i++] = swstats->tda_err_cnt;
6479 	tmp_stats[i++] = swstats->pfc_err_cnt;
6480 	tmp_stats[i++] = swstats->pcc_err_cnt;
6481 	tmp_stats[i++] = swstats->tti_err_cnt;
6482 	tmp_stats[i++] = swstats->tpa_err_cnt;
6483 	tmp_stats[i++] = swstats->sm_err_cnt;
6484 	tmp_stats[i++] = swstats->lso_err_cnt;
6485 	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6486 	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6487 	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6488 	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6489 	tmp_stats[i++] = swstats->rc_err_cnt;
6490 	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6491 	tmp_stats[i++] = swstats->rpa_err_cnt;
6492 	tmp_stats[i++] = swstats->rda_err_cnt;
6493 	tmp_stats[i++] = swstats->rti_err_cnt;
6494 	tmp_stats[i++] = swstats->mc_err_cnt;
6495 }
6496 
6497 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6498 {
6499 	return XENA_REG_SPACE;
6500 }
6501 
6502 
6503 static int s2io_get_eeprom_len(struct net_device *dev)
6504 {
6505 	return XENA_EEPROM_SPACE;
6506 }
6507 
6508 static int s2io_get_sset_count(struct net_device *dev, int sset)
6509 {
6510 	struct s2io_nic *sp = netdev_priv(dev);
6511 
6512 	switch (sset) {
6513 	case ETH_SS_TEST:
6514 		return S2IO_TEST_LEN;
6515 	case ETH_SS_STATS:
6516 		switch (sp->device_type) {
6517 		case XFRAME_I_DEVICE:
6518 			return XFRAME_I_STAT_LEN;
6519 		case XFRAME_II_DEVICE:
6520 			return XFRAME_II_STAT_LEN;
6521 		default:
6522 			return 0;
6523 		}
6524 	default:
6525 		return -EOPNOTSUPP;
6526 	}
6527 }
6528 
6529 static void s2io_ethtool_get_strings(struct net_device *dev,
6530 				     u32 stringset, u8 *data)
6531 {
6532 	int stat_size = 0;
6533 	struct s2io_nic *sp = netdev_priv(dev);
6534 
6535 	switch (stringset) {
6536 	case ETH_SS_TEST:
6537 		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6538 		break;
6539 	case ETH_SS_STATS:
6540 		stat_size = sizeof(ethtool_xena_stats_keys);
6541 		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6542 		if (sp->device_type == XFRAME_II_DEVICE) {
6543 			memcpy(data + stat_size,
6544 			       &ethtool_enhanced_stats_keys,
6545 			       sizeof(ethtool_enhanced_stats_keys));
6546 			stat_size += sizeof(ethtool_enhanced_stats_keys);
6547 		}
6548 
6549 		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6550 		       sizeof(ethtool_driver_stats_keys));
6551 	}
6552 }
6553 
6554 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6555 {
6556 	struct s2io_nic *sp = netdev_priv(dev);
6557 	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6558 
6559 	if (changed && netif_running(dev)) {
6560 		int rc;
6561 
6562 		s2io_stop_all_tx_queue(sp);
6563 		s2io_card_down(sp);
6564 		dev->features = features;
6565 		rc = s2io_card_up(sp);
6566 		if (rc)
6567 			s2io_reset(sp);
6568 		else
6569 			s2io_start_all_tx_queue(sp);
6570 
6571 		return rc ? rc : 1;
6572 	}
6573 
6574 	return 0;
6575 }
6576 
6577 static const struct ethtool_ops netdev_ethtool_ops = {
6578 	.get_drvinfo = s2io_ethtool_gdrvinfo,
6579 	.get_regs_len = s2io_ethtool_get_regs_len,
6580 	.get_regs = s2io_ethtool_gregs,
6581 	.get_link = ethtool_op_get_link,
6582 	.get_eeprom_len = s2io_get_eeprom_len,
6583 	.get_eeprom = s2io_ethtool_geeprom,
6584 	.set_eeprom = s2io_ethtool_seeprom,
6585 	.get_ringparam = s2io_ethtool_gringparam,
6586 	.get_pauseparam = s2io_ethtool_getpause_data,
6587 	.set_pauseparam = s2io_ethtool_setpause_data,
6588 	.self_test = s2io_ethtool_test,
6589 	.get_strings = s2io_ethtool_get_strings,
6590 	.set_phys_id = s2io_ethtool_set_led,
6591 	.get_ethtool_stats = s2io_get_ethtool_stats,
6592 	.get_sset_count = s2io_get_sset_count,
6593 	.get_link_ksettings = s2io_ethtool_get_link_ksettings,
6594 	.set_link_ksettings = s2io_ethtool_set_link_ksettings,
6595 };
6596 
6597 /**
6598  *  s2io_ioctl - Entry point for the Ioctl
6599  *  @dev :  Device pointer.
6600  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6601  *  a proprietary structure used to pass information to the driver.
6602  *  @cmd :  This is used to distinguish between the different commands that
6603  *  can be passed to the IOCTL functions.
6604  *  Description:
6605  *  Currently there are no special functionality supported in IOCTL, hence
6606  *  function always return EOPNOTSUPPORTED
6607  */
6608 
6609 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6610 {
6611 	return -EOPNOTSUPP;
6612 }
6613 
6614 /**
6615  *  s2io_change_mtu - entry point to change MTU size for the device.
6616  *   @dev : device pointer.
6617  *   @new_mtu : the new MTU size for the device.
6618  *   Description: A driver entry point to change MTU size for the device.
6619  *   Before changing the MTU the device must be stopped.
6620  *  Return value:
6621  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6622  *   file on failure.
6623  */
6624 
6625 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6626 {
6627 	struct s2io_nic *sp = netdev_priv(dev);
6628 	int ret = 0;
6629 
6630 	dev->mtu = new_mtu;
6631 	if (netif_running(dev)) {
6632 		s2io_stop_all_tx_queue(sp);
6633 		s2io_card_down(sp);
6634 		ret = s2io_card_up(sp);
6635 		if (ret) {
6636 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6637 				  __func__);
6638 			return ret;
6639 		}
6640 		s2io_wake_all_tx_queue(sp);
6641 	} else { /* Device is down */
6642 		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6643 		u64 val64 = new_mtu;
6644 
6645 		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6646 	}
6647 
6648 	return ret;
6649 }
6650 
6651 /**
6652  * s2io_set_link - Set the LInk status
6653  * @data: long pointer to device private structue
6654  * Description: Sets the link status for the adapter
6655  */
6656 
6657 static void s2io_set_link(struct work_struct *work)
6658 {
6659 	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6660 					    set_link_task);
6661 	struct net_device *dev = nic->dev;
6662 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6663 	register u64 val64;
6664 	u16 subid;
6665 
6666 	rtnl_lock();
6667 
6668 	if (!netif_running(dev))
6669 		goto out_unlock;
6670 
6671 	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6672 		/* The card is being reset, no point doing anything */
6673 		goto out_unlock;
6674 	}
6675 
6676 	subid = nic->pdev->subsystem_device;
6677 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6678 		/*
6679 		 * Allow a small delay for the NICs self initiated
6680 		 * cleanup to complete.
6681 		 */
6682 		msleep(100);
6683 	}
6684 
6685 	val64 = readq(&bar0->adapter_status);
6686 	if (LINK_IS_UP(val64)) {
6687 		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6688 			if (verify_xena_quiescence(nic)) {
6689 				val64 = readq(&bar0->adapter_control);
6690 				val64 |= ADAPTER_CNTL_EN;
6691 				writeq(val64, &bar0->adapter_control);
6692 				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6693 					    nic->device_type, subid)) {
6694 					val64 = readq(&bar0->gpio_control);
6695 					val64 |= GPIO_CTRL_GPIO_0;
6696 					writeq(val64, &bar0->gpio_control);
6697 					val64 = readq(&bar0->gpio_control);
6698 				} else {
6699 					val64 |= ADAPTER_LED_ON;
6700 					writeq(val64, &bar0->adapter_control);
6701 				}
6702 				nic->device_enabled_once = true;
6703 			} else {
6704 				DBG_PRINT(ERR_DBG,
6705 					  "%s: Error: device is not Quiescent\n",
6706 					  dev->name);
6707 				s2io_stop_all_tx_queue(nic);
6708 			}
6709 		}
6710 		val64 = readq(&bar0->adapter_control);
6711 		val64 |= ADAPTER_LED_ON;
6712 		writeq(val64, &bar0->adapter_control);
6713 		s2io_link(nic, LINK_UP);
6714 	} else {
6715 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6716 						      subid)) {
6717 			val64 = readq(&bar0->gpio_control);
6718 			val64 &= ~GPIO_CTRL_GPIO_0;
6719 			writeq(val64, &bar0->gpio_control);
6720 			val64 = readq(&bar0->gpio_control);
6721 		}
6722 		/* turn off LED */
6723 		val64 = readq(&bar0->adapter_control);
6724 		val64 = val64 & (~ADAPTER_LED_ON);
6725 		writeq(val64, &bar0->adapter_control);
6726 		s2io_link(nic, LINK_DOWN);
6727 	}
6728 	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6729 
6730 out_unlock:
6731 	rtnl_unlock();
6732 }
6733 
6734 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6735 				  struct buffAdd *ba,
6736 				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6737 				  u64 *temp2, int size)
6738 {
6739 	struct net_device *dev = sp->dev;
6740 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6741 
6742 	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6743 		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6744 		/* allocate skb */
6745 		if (*skb) {
6746 			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6747 			/*
6748 			 * As Rx frame are not going to be processed,
6749 			 * using same mapped address for the Rxd
6750 			 * buffer pointer
6751 			 */
6752 			rxdp1->Buffer0_ptr = *temp0;
6753 		} else {
6754 			*skb = netdev_alloc_skb(dev, size);
6755 			if (!(*skb)) {
6756 				DBG_PRINT(INFO_DBG,
6757 					  "%s: Out of memory to allocate %s\n",
6758 					  dev->name, "1 buf mode SKBs");
6759 				stats->mem_alloc_fail_cnt++;
6760 				return -ENOMEM ;
6761 			}
6762 			stats->mem_allocated += (*skb)->truesize;
6763 			/* storing the mapped addr in a temp variable
6764 			 * such it will be used for next rxd whose
6765 			 * Host Control is NULL
6766 			 */
6767 			rxdp1->Buffer0_ptr = *temp0 =
6768 				dma_map_single(&sp->pdev->dev, (*skb)->data,
6769 					       size - NET_IP_ALIGN,
6770 					       DMA_FROM_DEVICE);
6771 			if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
6772 				goto memalloc_failed;
6773 			rxdp->Host_Control = (unsigned long) (*skb);
6774 		}
6775 	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6776 		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6777 		/* Two buffer Mode */
6778 		if (*skb) {
6779 			rxdp3->Buffer2_ptr = *temp2;
6780 			rxdp3->Buffer0_ptr = *temp0;
6781 			rxdp3->Buffer1_ptr = *temp1;
6782 		} else {
6783 			*skb = netdev_alloc_skb(dev, size);
6784 			if (!(*skb)) {
6785 				DBG_PRINT(INFO_DBG,
6786 					  "%s: Out of memory to allocate %s\n",
6787 					  dev->name,
6788 					  "2 buf mode SKBs");
6789 				stats->mem_alloc_fail_cnt++;
6790 				return -ENOMEM;
6791 			}
6792 			stats->mem_allocated += (*skb)->truesize;
6793 			rxdp3->Buffer2_ptr = *temp2 =
6794 				dma_map_single(&sp->pdev->dev, (*skb)->data,
6795 					       dev->mtu + 4, DMA_FROM_DEVICE);
6796 			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
6797 				goto memalloc_failed;
6798 			rxdp3->Buffer0_ptr = *temp0 =
6799 				dma_map_single(&sp->pdev->dev, ba->ba_0,
6800 					       BUF0_LEN, DMA_FROM_DEVICE);
6801 			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
6802 				dma_unmap_single(&sp->pdev->dev,
6803 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6804 						 dev->mtu + 4,
6805 						 DMA_FROM_DEVICE);
6806 				goto memalloc_failed;
6807 			}
6808 			rxdp->Host_Control = (unsigned long) (*skb);
6809 
6810 			/* Buffer-1 will be dummy buffer not used */
6811 			rxdp3->Buffer1_ptr = *temp1 =
6812 				dma_map_single(&sp->pdev->dev, ba->ba_1,
6813 					       BUF1_LEN, DMA_FROM_DEVICE);
6814 			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
6815 				dma_unmap_single(&sp->pdev->dev,
6816 						 (dma_addr_t)rxdp3->Buffer0_ptr,
6817 						 BUF0_LEN, DMA_FROM_DEVICE);
6818 				dma_unmap_single(&sp->pdev->dev,
6819 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6820 						 dev->mtu + 4,
6821 						 DMA_FROM_DEVICE);
6822 				goto memalloc_failed;
6823 			}
6824 		}
6825 	}
6826 	return 0;
6827 
6828 memalloc_failed:
6829 	stats->pci_map_fail_cnt++;
6830 	stats->mem_freed += (*skb)->truesize;
6831 	dev_kfree_skb(*skb);
6832 	return -ENOMEM;
6833 }
6834 
6835 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6836 				int size)
6837 {
6838 	struct net_device *dev = sp->dev;
6839 	if (sp->rxd_mode == RXD_MODE_1) {
6840 		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6841 	} else if (sp->rxd_mode == RXD_MODE_3B) {
6842 		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6843 		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6844 		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6845 	}
6846 }
6847 
6848 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6849 {
6850 	int i, j, k, blk_cnt = 0, size;
6851 	struct config_param *config = &sp->config;
6852 	struct mac_info *mac_control = &sp->mac_control;
6853 	struct net_device *dev = sp->dev;
6854 	struct RxD_t *rxdp = NULL;
6855 	struct sk_buff *skb = NULL;
6856 	struct buffAdd *ba = NULL;
6857 	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6858 
6859 	/* Calculate the size based on ring mode */
6860 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6861 		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6862 	if (sp->rxd_mode == RXD_MODE_1)
6863 		size += NET_IP_ALIGN;
6864 	else if (sp->rxd_mode == RXD_MODE_3B)
6865 		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6866 
6867 	for (i = 0; i < config->rx_ring_num; i++) {
6868 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6869 		struct ring_info *ring = &mac_control->rings[i];
6870 
6871 		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6872 
6873 		for (j = 0; j < blk_cnt; j++) {
6874 			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6875 				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6876 				if (sp->rxd_mode == RXD_MODE_3B)
6877 					ba = &ring->ba[j][k];
6878 				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6879 							   &temp0_64,
6880 							   &temp1_64,
6881 							   &temp2_64,
6882 							   size) == -ENOMEM) {
6883 					return 0;
6884 				}
6885 
6886 				set_rxd_buffer_size(sp, rxdp, size);
6887 				dma_wmb();
6888 				/* flip the Ownership bit to Hardware */
6889 				rxdp->Control_1 |= RXD_OWN_XENA;
6890 			}
6891 		}
6892 	}
6893 	return 0;
6894 
6895 }
6896 
6897 static int s2io_add_isr(struct s2io_nic *sp)
6898 {
6899 	int ret = 0;
6900 	struct net_device *dev = sp->dev;
6901 	int err = 0;
6902 
6903 	if (sp->config.intr_type == MSI_X)
6904 		ret = s2io_enable_msi_x(sp);
6905 	if (ret) {
6906 		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6907 		sp->config.intr_type = INTA;
6908 	}
6909 
6910 	/*
6911 	 * Store the values of the MSIX table in
6912 	 * the struct s2io_nic structure
6913 	 */
6914 	store_xmsi_data(sp);
6915 
6916 	/* After proper initialization of H/W, register ISR */
6917 	if (sp->config.intr_type == MSI_X) {
6918 		int i, msix_rx_cnt = 0;
6919 
6920 		for (i = 0; i < sp->num_entries; i++) {
6921 			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6922 				if (sp->s2io_entries[i].type ==
6923 				    MSIX_RING_TYPE) {
6924 					snprintf(sp->desc[i],
6925 						sizeof(sp->desc[i]),
6926 						"%s:MSI-X-%d-RX",
6927 						dev->name, i);
6928 					err = request_irq(sp->entries[i].vector,
6929 							  s2io_msix_ring_handle,
6930 							  0,
6931 							  sp->desc[i],
6932 							  sp->s2io_entries[i].arg);
6933 				} else if (sp->s2io_entries[i].type ==
6934 					   MSIX_ALARM_TYPE) {
6935 					snprintf(sp->desc[i],
6936 						sizeof(sp->desc[i]),
6937 						"%s:MSI-X-%d-TX",
6938 						dev->name, i);
6939 					err = request_irq(sp->entries[i].vector,
6940 							  s2io_msix_fifo_handle,
6941 							  0,
6942 							  sp->desc[i],
6943 							  sp->s2io_entries[i].arg);
6944 
6945 				}
6946 				/* if either data or addr is zero print it. */
6947 				if (!(sp->msix_info[i].addr &&
6948 				      sp->msix_info[i].data)) {
6949 					DBG_PRINT(ERR_DBG,
6950 						  "%s @Addr:0x%llx Data:0x%llx\n",
6951 						  sp->desc[i],
6952 						  (unsigned long long)
6953 						  sp->msix_info[i].addr,
6954 						  (unsigned long long)
6955 						  ntohl(sp->msix_info[i].data));
6956 				} else
6957 					msix_rx_cnt++;
6958 				if (err) {
6959 					remove_msix_isr(sp);
6960 
6961 					DBG_PRINT(ERR_DBG,
6962 						  "%s:MSI-X-%d registration "
6963 						  "failed\n", dev->name, i);
6964 
6965 					DBG_PRINT(ERR_DBG,
6966 						  "%s: Defaulting to INTA\n",
6967 						  dev->name);
6968 					sp->config.intr_type = INTA;
6969 					break;
6970 				}
6971 				sp->s2io_entries[i].in_use =
6972 					MSIX_REGISTERED_SUCCESS;
6973 			}
6974 		}
6975 		if (!err) {
6976 			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6977 			DBG_PRINT(INFO_DBG,
6978 				  "MSI-X-TX entries enabled through alarm vector\n");
6979 		}
6980 	}
6981 	if (sp->config.intr_type == INTA) {
6982 		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6983 				  sp->name, dev);
6984 		if (err) {
6985 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6986 				  dev->name);
6987 			return -1;
6988 		}
6989 	}
6990 	return 0;
6991 }
6992 
6993 static void s2io_rem_isr(struct s2io_nic *sp)
6994 {
6995 	if (sp->config.intr_type == MSI_X)
6996 		remove_msix_isr(sp);
6997 	else
6998 		remove_inta_isr(sp);
6999 }
7000 
7001 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7002 {
7003 	int cnt = 0;
7004 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7005 	register u64 val64 = 0;
7006 	struct config_param *config;
7007 	config = &sp->config;
7008 
7009 	if (!is_s2io_card_up(sp))
7010 		return;
7011 
7012 	del_timer_sync(&sp->alarm_timer);
7013 	/* If s2io_set_link task is executing, wait till it completes. */
7014 	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7015 		msleep(50);
7016 	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7017 
7018 	/* Disable napi */
7019 	if (sp->config.napi) {
7020 		int off = 0;
7021 		if (config->intr_type ==  MSI_X) {
7022 			for (; off < sp->config.rx_ring_num; off++)
7023 				napi_disable(&sp->mac_control.rings[off].napi);
7024 		}
7025 		else
7026 			napi_disable(&sp->napi);
7027 	}
7028 
7029 	/* disable Tx and Rx traffic on the NIC */
7030 	if (do_io)
7031 		stop_nic(sp);
7032 
7033 	s2io_rem_isr(sp);
7034 
7035 	/* stop the tx queue, indicate link down */
7036 	s2io_link(sp, LINK_DOWN);
7037 
7038 	/* Check if the device is Quiescent and then Reset the NIC */
7039 	while (do_io) {
7040 		/* As per the HW requirement we need to replenish the
7041 		 * receive buffer to avoid the ring bump. Since there is
7042 		 * no intention of processing the Rx frame at this pointwe are
7043 		 * just setting the ownership bit of rxd in Each Rx
7044 		 * ring to HW and set the appropriate buffer size
7045 		 * based on the ring mode
7046 		 */
7047 		rxd_owner_bit_reset(sp);
7048 
7049 		val64 = readq(&bar0->adapter_status);
7050 		if (verify_xena_quiescence(sp)) {
7051 			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7052 				break;
7053 		}
7054 
7055 		msleep(50);
7056 		cnt++;
7057 		if (cnt == 10) {
7058 			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7059 				  "adapter status reads 0x%llx\n",
7060 				  (unsigned long long)val64);
7061 			break;
7062 		}
7063 	}
7064 	if (do_io)
7065 		s2io_reset(sp);
7066 
7067 	/* Free all Tx buffers */
7068 	free_tx_buffers(sp);
7069 
7070 	/* Free all Rx buffers */
7071 	free_rx_buffers(sp);
7072 
7073 	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7074 }
7075 
7076 static void s2io_card_down(struct s2io_nic *sp)
7077 {
7078 	do_s2io_card_down(sp, 1);
7079 }
7080 
7081 static int s2io_card_up(struct s2io_nic *sp)
7082 {
7083 	int i, ret = 0;
7084 	struct config_param *config;
7085 	struct mac_info *mac_control;
7086 	struct net_device *dev = sp->dev;
7087 	u16 interruptible;
7088 
7089 	/* Initialize the H/W I/O registers */
7090 	ret = init_nic(sp);
7091 	if (ret != 0) {
7092 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7093 			  dev->name);
7094 		if (ret != -EIO)
7095 			s2io_reset(sp);
7096 		return ret;
7097 	}
7098 
7099 	/*
7100 	 * Initializing the Rx buffers. For now we are considering only 1
7101 	 * Rx ring and initializing buffers into 30 Rx blocks
7102 	 */
7103 	config = &sp->config;
7104 	mac_control = &sp->mac_control;
7105 
7106 	for (i = 0; i < config->rx_ring_num; i++) {
7107 		struct ring_info *ring = &mac_control->rings[i];
7108 
7109 		ring->mtu = dev->mtu;
7110 		ring->lro = !!(dev->features & NETIF_F_LRO);
7111 		ret = fill_rx_buffers(sp, ring, 1);
7112 		if (ret) {
7113 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7114 				  dev->name);
7115 			s2io_reset(sp);
7116 			free_rx_buffers(sp);
7117 			return -ENOMEM;
7118 		}
7119 		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7120 			  ring->rx_bufs_left);
7121 	}
7122 
7123 	/* Initialise napi */
7124 	if (config->napi) {
7125 		if (config->intr_type ==  MSI_X) {
7126 			for (i = 0; i < sp->config.rx_ring_num; i++)
7127 				napi_enable(&sp->mac_control.rings[i].napi);
7128 		} else {
7129 			napi_enable(&sp->napi);
7130 		}
7131 	}
7132 
7133 	/* Maintain the state prior to the open */
7134 	if (sp->promisc_flg)
7135 		sp->promisc_flg = 0;
7136 	if (sp->m_cast_flg) {
7137 		sp->m_cast_flg = 0;
7138 		sp->all_multi_pos = 0;
7139 	}
7140 
7141 	/* Setting its receive mode */
7142 	s2io_set_multicast(dev);
7143 
7144 	if (dev->features & NETIF_F_LRO) {
7145 		/* Initialize max aggregatable pkts per session based on MTU */
7146 		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7147 		/* Check if we can use (if specified) user provided value */
7148 		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7149 			sp->lro_max_aggr_per_sess = lro_max_pkts;
7150 	}
7151 
7152 	/* Enable Rx Traffic and interrupts on the NIC */
7153 	if (start_nic(sp)) {
7154 		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7155 		s2io_reset(sp);
7156 		free_rx_buffers(sp);
7157 		return -ENODEV;
7158 	}
7159 
7160 	/* Add interrupt service routine */
7161 	if (s2io_add_isr(sp) != 0) {
7162 		if (sp->config.intr_type == MSI_X)
7163 			s2io_rem_isr(sp);
7164 		s2io_reset(sp);
7165 		free_rx_buffers(sp);
7166 		return -ENODEV;
7167 	}
7168 
7169 	timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7170 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7171 
7172 	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7173 
7174 	/*  Enable select interrupts */
7175 	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7176 	if (sp->config.intr_type != INTA) {
7177 		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7178 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7179 	} else {
7180 		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7181 		interruptible |= TX_PIC_INTR;
7182 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7183 	}
7184 
7185 	return 0;
7186 }
7187 
7188 /**
7189  * s2io_restart_nic - Resets the NIC.
7190  * @data : long pointer to the device private structure
7191  * Description:
7192  * This function is scheduled to be run by the s2io_tx_watchdog
7193  * function after 0.5 secs to reset the NIC. The idea is to reduce
7194  * the run time of the watch dog routine which is run holding a
7195  * spin lock.
7196  */
7197 
7198 static void s2io_restart_nic(struct work_struct *work)
7199 {
7200 	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7201 	struct net_device *dev = sp->dev;
7202 
7203 	rtnl_lock();
7204 
7205 	if (!netif_running(dev))
7206 		goto out_unlock;
7207 
7208 	s2io_card_down(sp);
7209 	if (s2io_card_up(sp)) {
7210 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7211 	}
7212 	s2io_wake_all_tx_queue(sp);
7213 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7214 out_unlock:
7215 	rtnl_unlock();
7216 }
7217 
7218 /**
7219  *  s2io_tx_watchdog - Watchdog for transmit side.
7220  *  @dev : Pointer to net device structure
7221  *  Description:
7222  *  This function is triggered if the Tx Queue is stopped
7223  *  for a pre-defined amount of time when the Interface is still up.
7224  *  If the Interface is jammed in such a situation, the hardware is
7225  *  reset (by s2io_close) and restarted again (by s2io_open) to
7226  *  overcome any problem that might have been caused in the hardware.
7227  *  Return value:
7228  *  void
7229  */
7230 
7231 static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
7232 {
7233 	struct s2io_nic *sp = netdev_priv(dev);
7234 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7235 
7236 	if (netif_carrier_ok(dev)) {
7237 		swstats->watchdog_timer_cnt++;
7238 		schedule_work(&sp->rst_timer_task);
7239 		swstats->soft_reset_cnt++;
7240 	}
7241 }
7242 
7243 /**
7244  *   rx_osm_handler - To perform some OS related operations on SKB.
7245  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7246  *   @skb : the socket buffer pointer.
7247  *   @len : length of the packet
7248  *   @cksum : FCS checksum of the frame.
7249  *   @ring_no : the ring from which this RxD was extracted.
7250  *   Description:
7251  *   This function is called by the Rx interrupt serivce routine to perform
7252  *   some OS related operations on the SKB before passing it to the upper
7253  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7254  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7255  *   to the upper layer. If the checksum is wrong, it increments the Rx
7256  *   packet error count, frees the SKB and returns error.
7257  *   Return value:
7258  *   SUCCESS on success and -1 on failure.
7259  */
7260 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7261 {
7262 	struct s2io_nic *sp = ring_data->nic;
7263 	struct net_device *dev = ring_data->dev;
7264 	struct sk_buff *skb = (struct sk_buff *)
7265 		((unsigned long)rxdp->Host_Control);
7266 	int ring_no = ring_data->ring_no;
7267 	u16 l3_csum, l4_csum;
7268 	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7269 	struct lro *lro;
7270 	u8 err_mask;
7271 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7272 
7273 	skb->dev = dev;
7274 
7275 	if (err) {
7276 		/* Check for parity error */
7277 		if (err & 0x1)
7278 			swstats->parity_err_cnt++;
7279 
7280 		err_mask = err >> 48;
7281 		switch (err_mask) {
7282 		case 1:
7283 			swstats->rx_parity_err_cnt++;
7284 			break;
7285 
7286 		case 2:
7287 			swstats->rx_abort_cnt++;
7288 			break;
7289 
7290 		case 3:
7291 			swstats->rx_parity_abort_cnt++;
7292 			break;
7293 
7294 		case 4:
7295 			swstats->rx_rda_fail_cnt++;
7296 			break;
7297 
7298 		case 5:
7299 			swstats->rx_unkn_prot_cnt++;
7300 			break;
7301 
7302 		case 6:
7303 			swstats->rx_fcs_err_cnt++;
7304 			break;
7305 
7306 		case 7:
7307 			swstats->rx_buf_size_err_cnt++;
7308 			break;
7309 
7310 		case 8:
7311 			swstats->rx_rxd_corrupt_cnt++;
7312 			break;
7313 
7314 		case 15:
7315 			swstats->rx_unkn_err_cnt++;
7316 			break;
7317 		}
7318 		/*
7319 		 * Drop the packet if bad transfer code. Exception being
7320 		 * 0x5, which could be due to unsupported IPv6 extension header.
7321 		 * In this case, we let stack handle the packet.
7322 		 * Note that in this case, since checksum will be incorrect,
7323 		 * stack will validate the same.
7324 		 */
7325 		if (err_mask != 0x5) {
7326 			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7327 				  dev->name, err_mask);
7328 			dev->stats.rx_crc_errors++;
7329 			swstats->mem_freed
7330 				+= skb->truesize;
7331 			dev_kfree_skb(skb);
7332 			ring_data->rx_bufs_left -= 1;
7333 			rxdp->Host_Control = 0;
7334 			return 0;
7335 		}
7336 	}
7337 
7338 	rxdp->Host_Control = 0;
7339 	if (sp->rxd_mode == RXD_MODE_1) {
7340 		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7341 
7342 		skb_put(skb, len);
7343 	} else if (sp->rxd_mode == RXD_MODE_3B) {
7344 		int get_block = ring_data->rx_curr_get_info.block_index;
7345 		int get_off = ring_data->rx_curr_get_info.offset;
7346 		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7347 		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7348 		unsigned char *buff = skb_push(skb, buf0_len);
7349 
7350 		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7351 		memcpy(buff, ba->ba_0, buf0_len);
7352 		skb_put(skb, buf2_len);
7353 	}
7354 
7355 	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7356 	    ((!ring_data->lro) ||
7357 	     (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7358 	    (dev->features & NETIF_F_RXCSUM)) {
7359 		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7360 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7361 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7362 			/*
7363 			 * NIC verifies if the Checksum of the received
7364 			 * frame is Ok or not and accordingly returns
7365 			 * a flag in the RxD.
7366 			 */
7367 			skb->ip_summed = CHECKSUM_UNNECESSARY;
7368 			if (ring_data->lro) {
7369 				u32 tcp_len = 0;
7370 				u8 *tcp;
7371 				int ret = 0;
7372 
7373 				ret = s2io_club_tcp_session(ring_data,
7374 							    skb->data, &tcp,
7375 							    &tcp_len, &lro,
7376 							    rxdp, sp);
7377 				switch (ret) {
7378 				case 3: /* Begin anew */
7379 					lro->parent = skb;
7380 					goto aggregate;
7381 				case 1: /* Aggregate */
7382 					lro_append_pkt(sp, lro, skb, tcp_len);
7383 					goto aggregate;
7384 				case 4: /* Flush session */
7385 					lro_append_pkt(sp, lro, skb, tcp_len);
7386 					queue_rx_frame(lro->parent,
7387 						       lro->vlan_tag);
7388 					clear_lro_session(lro);
7389 					swstats->flush_max_pkts++;
7390 					goto aggregate;
7391 				case 2: /* Flush both */
7392 					lro->parent->data_len = lro->frags_len;
7393 					swstats->sending_both++;
7394 					queue_rx_frame(lro->parent,
7395 						       lro->vlan_tag);
7396 					clear_lro_session(lro);
7397 					goto send_up;
7398 				case 0: /* sessions exceeded */
7399 				case -1: /* non-TCP or not L2 aggregatable */
7400 				case 5: /*
7401 					 * First pkt in session not
7402 					 * L3/L4 aggregatable
7403 					 */
7404 					break;
7405 				default:
7406 					DBG_PRINT(ERR_DBG,
7407 						  "%s: Samadhana!!\n",
7408 						  __func__);
7409 					BUG();
7410 				}
7411 			}
7412 		} else {
7413 			/*
7414 			 * Packet with erroneous checksum, let the
7415 			 * upper layers deal with it.
7416 			 */
7417 			skb_checksum_none_assert(skb);
7418 		}
7419 	} else
7420 		skb_checksum_none_assert(skb);
7421 
7422 	swstats->mem_freed += skb->truesize;
7423 send_up:
7424 	skb_record_rx_queue(skb, ring_no);
7425 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7426 aggregate:
7427 	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7428 	return SUCCESS;
7429 }
7430 
7431 /**
7432  *  s2io_link - stops/starts the Tx queue.
7433  *  @sp : private member of the device structure, which is a pointer to the
7434  *  s2io_nic structure.
7435  *  @link : inidicates whether link is UP/DOWN.
7436  *  Description:
7437  *  This function stops/starts the Tx queue depending on whether the link
7438  *  status of the NIC is is down or up. This is called by the Alarm
7439  *  interrupt handler whenever a link change interrupt comes up.
7440  *  Return value:
7441  *  void.
7442  */
7443 
7444 static void s2io_link(struct s2io_nic *sp, int link)
7445 {
7446 	struct net_device *dev = sp->dev;
7447 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7448 
7449 	if (link != sp->last_link_state) {
7450 		init_tti(sp, link);
7451 		if (link == LINK_DOWN) {
7452 			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7453 			s2io_stop_all_tx_queue(sp);
7454 			netif_carrier_off(dev);
7455 			if (swstats->link_up_cnt)
7456 				swstats->link_up_time =
7457 					jiffies - sp->start_time;
7458 			swstats->link_down_cnt++;
7459 		} else {
7460 			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7461 			if (swstats->link_down_cnt)
7462 				swstats->link_down_time =
7463 					jiffies - sp->start_time;
7464 			swstats->link_up_cnt++;
7465 			netif_carrier_on(dev);
7466 			s2io_wake_all_tx_queue(sp);
7467 		}
7468 	}
7469 	sp->last_link_state = link;
7470 	sp->start_time = jiffies;
7471 }
7472 
7473 /**
7474  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7475  *  @sp : private member of the device structure, which is a pointer to the
7476  *  s2io_nic structure.
7477  *  Description:
7478  *  This function initializes a few of the PCI and PCI-X configuration registers
7479  *  with recommended values.
7480  *  Return value:
7481  *  void
7482  */
7483 
7484 static void s2io_init_pci(struct s2io_nic *sp)
7485 {
7486 	u16 pci_cmd = 0, pcix_cmd = 0;
7487 
7488 	/* Enable Data Parity Error Recovery in PCI-X command register. */
7489 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7490 			     &(pcix_cmd));
7491 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7492 			      (pcix_cmd | 1));
7493 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7494 			     &(pcix_cmd));
7495 
7496 	/* Set the PErr Response bit in PCI command register. */
7497 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7498 	pci_write_config_word(sp->pdev, PCI_COMMAND,
7499 			      (pci_cmd | PCI_COMMAND_PARITY));
7500 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7501 }
7502 
7503 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7504 			    u8 *dev_multiq)
7505 {
7506 	int i;
7507 
7508 	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7509 		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7510 			  "(%d) not supported\n", tx_fifo_num);
7511 
7512 		if (tx_fifo_num < 1)
7513 			tx_fifo_num = 1;
7514 		else
7515 			tx_fifo_num = MAX_TX_FIFOS;
7516 
7517 		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7518 	}
7519 
7520 	if (multiq)
7521 		*dev_multiq = multiq;
7522 
7523 	if (tx_steering_type && (1 == tx_fifo_num)) {
7524 		if (tx_steering_type != TX_DEFAULT_STEERING)
7525 			DBG_PRINT(ERR_DBG,
7526 				  "Tx steering is not supported with "
7527 				  "one fifo. Disabling Tx steering.\n");
7528 		tx_steering_type = NO_STEERING;
7529 	}
7530 
7531 	if ((tx_steering_type < NO_STEERING) ||
7532 	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7533 		DBG_PRINT(ERR_DBG,
7534 			  "Requested transmit steering not supported\n");
7535 		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7536 		tx_steering_type = NO_STEERING;
7537 	}
7538 
7539 	if (rx_ring_num > MAX_RX_RINGS) {
7540 		DBG_PRINT(ERR_DBG,
7541 			  "Requested number of rx rings not supported\n");
7542 		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7543 			  MAX_RX_RINGS);
7544 		rx_ring_num = MAX_RX_RINGS;
7545 	}
7546 
7547 	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7548 		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7549 			  "Defaulting to INTA\n");
7550 		*dev_intr_type = INTA;
7551 	}
7552 
7553 	if ((*dev_intr_type == MSI_X) &&
7554 	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7555 	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7556 		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7557 			  "Defaulting to INTA\n");
7558 		*dev_intr_type = INTA;
7559 	}
7560 
7561 	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7562 		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7563 		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7564 		rx_ring_mode = 1;
7565 	}
7566 
7567 	for (i = 0; i < MAX_RX_RINGS; i++)
7568 		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7569 			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7570 				  "supported\nDefaulting to %d\n",
7571 				  MAX_RX_BLOCKS_PER_RING);
7572 			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7573 		}
7574 
7575 	return SUCCESS;
7576 }
7577 
7578 /**
7579  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7580  * or Traffic class respectively.
7581  * @nic: device private variable
7582  * Description: The function configures the receive steering to
7583  * desired receive ring.
7584  * Return Value:  SUCCESS on success and
7585  * '-1' on failure (endian settings incorrect).
7586  */
7587 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7588 {
7589 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7590 	register u64 val64 = 0;
7591 
7592 	if (ds_codepoint > 63)
7593 		return FAILURE;
7594 
7595 	val64 = RTS_DS_MEM_DATA(ring);
7596 	writeq(val64, &bar0->rts_ds_mem_data);
7597 
7598 	val64 = RTS_DS_MEM_CTRL_WE |
7599 		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7600 		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7601 
7602 	writeq(val64, &bar0->rts_ds_mem_ctrl);
7603 
7604 	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7605 				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7606 				     S2IO_BIT_RESET);
7607 }
7608 
7609 static const struct net_device_ops s2io_netdev_ops = {
7610 	.ndo_open	        = s2io_open,
7611 	.ndo_stop	        = s2io_close,
7612 	.ndo_get_stats	        = s2io_get_stats,
7613 	.ndo_start_xmit    	= s2io_xmit,
7614 	.ndo_validate_addr	= eth_validate_addr,
7615 	.ndo_set_rx_mode	= s2io_set_multicast,
7616 	.ndo_do_ioctl	   	= s2io_ioctl,
7617 	.ndo_set_mac_address    = s2io_set_mac_addr,
7618 	.ndo_change_mtu	   	= s2io_change_mtu,
7619 	.ndo_set_features	= s2io_set_features,
7620 	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7621 #ifdef CONFIG_NET_POLL_CONTROLLER
7622 	.ndo_poll_controller    = s2io_netpoll,
7623 #endif
7624 };
7625 
7626 /**
7627  *  s2io_init_nic - Initialization of the adapter .
7628  *  @pdev : structure containing the PCI related information of the device.
7629  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7630  *  Description:
7631  *  The function initializes an adapter identified by the pci_dec structure.
7632  *  All OS related initialization including memory and device structure and
7633  *  initlaization of the device private variable is done. Also the swapper
7634  *  control register is initialized to enable read and write into the I/O
7635  *  registers of the device.
7636  *  Return value:
7637  *  returns 0 on success and negative on failure.
7638  */
7639 
7640 static int
7641 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7642 {
7643 	struct s2io_nic *sp;
7644 	struct net_device *dev;
7645 	int i, j, ret;
7646 	int dma_flag = false;
7647 	u32 mac_up, mac_down;
7648 	u64 val64 = 0, tmp64 = 0;
7649 	struct XENA_dev_config __iomem *bar0 = NULL;
7650 	u16 subid;
7651 	struct config_param *config;
7652 	struct mac_info *mac_control;
7653 	int mode;
7654 	u8 dev_intr_type = intr_type;
7655 	u8 dev_multiq = 0;
7656 
7657 	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7658 	if (ret)
7659 		return ret;
7660 
7661 	ret = pci_enable_device(pdev);
7662 	if (ret) {
7663 		DBG_PRINT(ERR_DBG,
7664 			  "%s: pci_enable_device failed\n", __func__);
7665 		return ret;
7666 	}
7667 
7668 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7669 		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7670 		dma_flag = true;
7671 		if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7672 			DBG_PRINT(ERR_DBG,
7673 				  "Unable to obtain 64bit DMA for coherent allocations\n");
7674 			pci_disable_device(pdev);
7675 			return -ENOMEM;
7676 		}
7677 	} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
7678 		DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7679 	} else {
7680 		pci_disable_device(pdev);
7681 		return -ENOMEM;
7682 	}
7683 	ret = pci_request_regions(pdev, s2io_driver_name);
7684 	if (ret) {
7685 		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7686 			  __func__, ret);
7687 		pci_disable_device(pdev);
7688 		return -ENODEV;
7689 	}
7690 	if (dev_multiq)
7691 		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7692 	else
7693 		dev = alloc_etherdev(sizeof(struct s2io_nic));
7694 	if (dev == NULL) {
7695 		pci_disable_device(pdev);
7696 		pci_release_regions(pdev);
7697 		return -ENODEV;
7698 	}
7699 
7700 	pci_set_master(pdev);
7701 	pci_set_drvdata(pdev, dev);
7702 	SET_NETDEV_DEV(dev, &pdev->dev);
7703 
7704 	/*  Private member variable initialized to s2io NIC structure */
7705 	sp = netdev_priv(dev);
7706 	sp->dev = dev;
7707 	sp->pdev = pdev;
7708 	sp->high_dma_flag = dma_flag;
7709 	sp->device_enabled_once = false;
7710 	if (rx_ring_mode == 1)
7711 		sp->rxd_mode = RXD_MODE_1;
7712 	if (rx_ring_mode == 2)
7713 		sp->rxd_mode = RXD_MODE_3B;
7714 
7715 	sp->config.intr_type = dev_intr_type;
7716 
7717 	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7718 	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7719 		sp->device_type = XFRAME_II_DEVICE;
7720 	else
7721 		sp->device_type = XFRAME_I_DEVICE;
7722 
7723 
7724 	/* Initialize some PCI/PCI-X fields of the NIC. */
7725 	s2io_init_pci(sp);
7726 
7727 	/*
7728 	 * Setting the device configuration parameters.
7729 	 * Most of these parameters can be specified by the user during
7730 	 * module insertion as they are module loadable parameters. If
7731 	 * these parameters are not not specified during load time, they
7732 	 * are initialized with default values.
7733 	 */
7734 	config = &sp->config;
7735 	mac_control = &sp->mac_control;
7736 
7737 	config->napi = napi;
7738 	config->tx_steering_type = tx_steering_type;
7739 
7740 	/* Tx side parameters. */
7741 	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7742 		config->tx_fifo_num = MAX_TX_FIFOS;
7743 	else
7744 		config->tx_fifo_num = tx_fifo_num;
7745 
7746 	/* Initialize the fifos used for tx steering */
7747 	if (config->tx_fifo_num < 5) {
7748 		if (config->tx_fifo_num  == 1)
7749 			sp->total_tcp_fifos = 1;
7750 		else
7751 			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7752 		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7753 		sp->total_udp_fifos = 1;
7754 		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7755 	} else {
7756 		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7757 				       FIFO_OTHER_MAX_NUM);
7758 		sp->udp_fifo_idx = sp->total_tcp_fifos;
7759 		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7760 		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7761 	}
7762 
7763 	config->multiq = dev_multiq;
7764 	for (i = 0; i < config->tx_fifo_num; i++) {
7765 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7766 
7767 		tx_cfg->fifo_len = tx_fifo_len[i];
7768 		tx_cfg->fifo_priority = i;
7769 	}
7770 
7771 	/* mapping the QoS priority to the configured fifos */
7772 	for (i = 0; i < MAX_TX_FIFOS; i++)
7773 		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7774 
7775 	/* map the hashing selector table to the configured fifos */
7776 	for (i = 0; i < config->tx_fifo_num; i++)
7777 		sp->fifo_selector[i] = fifo_selector[i];
7778 
7779 
7780 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7781 	for (i = 0; i < config->tx_fifo_num; i++) {
7782 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7783 
7784 		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7785 		if (tx_cfg->fifo_len < 65) {
7786 			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7787 			break;
7788 		}
7789 	}
7790 	/* + 2 because one Txd for skb->data and one Txd for UFO */
7791 	config->max_txds = MAX_SKB_FRAGS + 2;
7792 
7793 	/* Rx side parameters. */
7794 	config->rx_ring_num = rx_ring_num;
7795 	for (i = 0; i < config->rx_ring_num; i++) {
7796 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7797 		struct ring_info *ring = &mac_control->rings[i];
7798 
7799 		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7800 		rx_cfg->ring_priority = i;
7801 		ring->rx_bufs_left = 0;
7802 		ring->rxd_mode = sp->rxd_mode;
7803 		ring->rxd_count = rxd_count[sp->rxd_mode];
7804 		ring->pdev = sp->pdev;
7805 		ring->dev = sp->dev;
7806 	}
7807 
7808 	for (i = 0; i < rx_ring_num; i++) {
7809 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7810 
7811 		rx_cfg->ring_org = RING_ORG_BUFF1;
7812 		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7813 	}
7814 
7815 	/*  Setting Mac Control parameters */
7816 	mac_control->rmac_pause_time = rmac_pause_time;
7817 	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7818 	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7819 
7820 
7821 	/*  initialize the shared memory used by the NIC and the host */
7822 	if (init_shared_mem(sp)) {
7823 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7824 		ret = -ENOMEM;
7825 		goto mem_alloc_failed;
7826 	}
7827 
7828 	sp->bar0 = pci_ioremap_bar(pdev, 0);
7829 	if (!sp->bar0) {
7830 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7831 			  dev->name);
7832 		ret = -ENOMEM;
7833 		goto bar0_remap_failed;
7834 	}
7835 
7836 	sp->bar1 = pci_ioremap_bar(pdev, 2);
7837 	if (!sp->bar1) {
7838 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7839 			  dev->name);
7840 		ret = -ENOMEM;
7841 		goto bar1_remap_failed;
7842 	}
7843 
7844 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7845 	for (j = 0; j < MAX_TX_FIFOS; j++) {
7846 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7847 	}
7848 
7849 	/*  Driver entry points */
7850 	dev->netdev_ops = &s2io_netdev_ops;
7851 	dev->ethtool_ops = &netdev_ethtool_ops;
7852 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7853 		NETIF_F_TSO | NETIF_F_TSO6 |
7854 		NETIF_F_RXCSUM | NETIF_F_LRO;
7855 	dev->features |= dev->hw_features |
7856 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7857 	if (sp->high_dma_flag == true)
7858 		dev->features |= NETIF_F_HIGHDMA;
7859 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7860 	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7861 	INIT_WORK(&sp->set_link_task, s2io_set_link);
7862 
7863 	pci_save_state(sp->pdev);
7864 
7865 	/* Setting swapper control on the NIC, for proper reset operation */
7866 	if (s2io_set_swapper(sp)) {
7867 		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7868 			  dev->name);
7869 		ret = -EAGAIN;
7870 		goto set_swap_failed;
7871 	}
7872 
7873 	/* Verify if the Herc works on the slot its placed into */
7874 	if (sp->device_type & XFRAME_II_DEVICE) {
7875 		mode = s2io_verify_pci_mode(sp);
7876 		if (mode < 0) {
7877 			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7878 				  __func__);
7879 			ret = -EBADSLT;
7880 			goto set_swap_failed;
7881 		}
7882 	}
7883 
7884 	if (sp->config.intr_type == MSI_X) {
7885 		sp->num_entries = config->rx_ring_num + 1;
7886 		ret = s2io_enable_msi_x(sp);
7887 
7888 		if (!ret) {
7889 			ret = s2io_test_msi(sp);
7890 			/* rollback MSI-X, will re-enable during add_isr() */
7891 			remove_msix_isr(sp);
7892 		}
7893 		if (ret) {
7894 
7895 			DBG_PRINT(ERR_DBG,
7896 				  "MSI-X requested but failed to enable\n");
7897 			sp->config.intr_type = INTA;
7898 		}
7899 	}
7900 
7901 	if (config->intr_type ==  MSI_X) {
7902 		for (i = 0; i < config->rx_ring_num ; i++) {
7903 			struct ring_info *ring = &mac_control->rings[i];
7904 
7905 			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7906 		}
7907 	} else {
7908 		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7909 	}
7910 
7911 	/* Not needed for Herc */
7912 	if (sp->device_type & XFRAME_I_DEVICE) {
7913 		/*
7914 		 * Fix for all "FFs" MAC address problems observed on
7915 		 * Alpha platforms
7916 		 */
7917 		fix_mac_address(sp);
7918 		s2io_reset(sp);
7919 	}
7920 
7921 	/*
7922 	 * MAC address initialization.
7923 	 * For now only one mac address will be read and used.
7924 	 */
7925 	bar0 = sp->bar0;
7926 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7927 		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7928 	writeq(val64, &bar0->rmac_addr_cmd_mem);
7929 	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7930 			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7931 			      S2IO_BIT_RESET);
7932 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
7933 	mac_down = (u32)tmp64;
7934 	mac_up = (u32) (tmp64 >> 32);
7935 
7936 	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7937 	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7938 	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7939 	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7940 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7941 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7942 
7943 	/*  Set the factory defined MAC address initially   */
7944 	dev->addr_len = ETH_ALEN;
7945 	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7946 
7947 	/* initialize number of multicast & unicast MAC entries variables */
7948 	if (sp->device_type == XFRAME_I_DEVICE) {
7949 		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7950 		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7951 		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7952 	} else if (sp->device_type == XFRAME_II_DEVICE) {
7953 		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7954 		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7955 		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7956 	}
7957 
7958 	/* MTU range: 46 - 9600 */
7959 	dev->min_mtu = MIN_MTU;
7960 	dev->max_mtu = S2IO_JUMBO_SIZE;
7961 
7962 	/* store mac addresses from CAM to s2io_nic structure */
7963 	do_s2io_store_unicast_mc(sp);
7964 
7965 	/* Configure MSIX vector for number of rings configured plus one */
7966 	if ((sp->device_type == XFRAME_II_DEVICE) &&
7967 	    (config->intr_type == MSI_X))
7968 		sp->num_entries = config->rx_ring_num + 1;
7969 
7970 	/* Store the values of the MSIX table in the s2io_nic structure */
7971 	store_xmsi_data(sp);
7972 	/* reset Nic and bring it to known state */
7973 	s2io_reset(sp);
7974 
7975 	/*
7976 	 * Initialize link state flags
7977 	 * and the card state parameter
7978 	 */
7979 	sp->state = 0;
7980 
7981 	/* Initialize spinlocks */
7982 	for (i = 0; i < sp->config.tx_fifo_num; i++) {
7983 		struct fifo_info *fifo = &mac_control->fifos[i];
7984 
7985 		spin_lock_init(&fifo->tx_lock);
7986 	}
7987 
7988 	/*
7989 	 * SXE-002: Configure link and activity LED to init state
7990 	 * on driver load.
7991 	 */
7992 	subid = sp->pdev->subsystem_device;
7993 	if ((subid & 0xFF) >= 0x07) {
7994 		val64 = readq(&bar0->gpio_control);
7995 		val64 |= 0x0000800000000000ULL;
7996 		writeq(val64, &bar0->gpio_control);
7997 		val64 = 0x0411040400000000ULL;
7998 		writeq(val64, (void __iomem *)bar0 + 0x2700);
7999 		val64 = readq(&bar0->gpio_control);
8000 	}
8001 
8002 	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8003 
8004 	if (register_netdev(dev)) {
8005 		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8006 		ret = -ENODEV;
8007 		goto register_failed;
8008 	}
8009 	s2io_vpd_read(sp);
8010 	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8011 	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8012 		  sp->product_name, pdev->revision);
8013 	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8014 		  s2io_driver_version);
8015 	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8016 	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8017 	if (sp->device_type & XFRAME_II_DEVICE) {
8018 		mode = s2io_print_pci_mode(sp);
8019 		if (mode < 0) {
8020 			ret = -EBADSLT;
8021 			unregister_netdev(dev);
8022 			goto set_swap_failed;
8023 		}
8024 	}
8025 	switch (sp->rxd_mode) {
8026 	case RXD_MODE_1:
8027 		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8028 			  dev->name);
8029 		break;
8030 	case RXD_MODE_3B:
8031 		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8032 			  dev->name);
8033 		break;
8034 	}
8035 
8036 	switch (sp->config.napi) {
8037 	case 0:
8038 		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8039 		break;
8040 	case 1:
8041 		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8042 		break;
8043 	}
8044 
8045 	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8046 		  sp->config.tx_fifo_num);
8047 
8048 	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8049 		  sp->config.rx_ring_num);
8050 
8051 	switch (sp->config.intr_type) {
8052 	case INTA:
8053 		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8054 		break;
8055 	case MSI_X:
8056 		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8057 		break;
8058 	}
8059 	if (sp->config.multiq) {
8060 		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8061 			struct fifo_info *fifo = &mac_control->fifos[i];
8062 
8063 			fifo->multiq = config->multiq;
8064 		}
8065 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8066 			  dev->name);
8067 	} else
8068 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8069 			  dev->name);
8070 
8071 	switch (sp->config.tx_steering_type) {
8072 	case NO_STEERING:
8073 		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8074 			  dev->name);
8075 		break;
8076 	case TX_PRIORITY_STEERING:
8077 		DBG_PRINT(ERR_DBG,
8078 			  "%s: Priority steering enabled for transmit\n",
8079 			  dev->name);
8080 		break;
8081 	case TX_DEFAULT_STEERING:
8082 		DBG_PRINT(ERR_DBG,
8083 			  "%s: Default steering enabled for transmit\n",
8084 			  dev->name);
8085 	}
8086 
8087 	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8088 		  dev->name);
8089 	/* Initialize device name */
8090 	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8091 		 sp->product_name);
8092 
8093 	if (vlan_tag_strip)
8094 		sp->vlan_strip_flag = 1;
8095 	else
8096 		sp->vlan_strip_flag = 0;
8097 
8098 	/*
8099 	 * Make Link state as off at this point, when the Link change
8100 	 * interrupt comes the state will be automatically changed to
8101 	 * the right state.
8102 	 */
8103 	netif_carrier_off(dev);
8104 
8105 	return 0;
8106 
8107 register_failed:
8108 set_swap_failed:
8109 	iounmap(sp->bar1);
8110 bar1_remap_failed:
8111 	iounmap(sp->bar0);
8112 bar0_remap_failed:
8113 mem_alloc_failed:
8114 	free_shared_mem(sp);
8115 	pci_disable_device(pdev);
8116 	pci_release_regions(pdev);
8117 	free_netdev(dev);
8118 
8119 	return ret;
8120 }
8121 
8122 /**
8123  * s2io_rem_nic - Free the PCI device
8124  * @pdev: structure containing the PCI related information of the device.
8125  * Description: This function is called by the Pci subsystem to release a
8126  * PCI device and free up all resource held up by the device. This could
8127  * be in response to a Hot plug event or when the driver is to be removed
8128  * from memory.
8129  */
8130 
8131 static void s2io_rem_nic(struct pci_dev *pdev)
8132 {
8133 	struct net_device *dev = pci_get_drvdata(pdev);
8134 	struct s2io_nic *sp;
8135 
8136 	if (dev == NULL) {
8137 		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8138 		return;
8139 	}
8140 
8141 	sp = netdev_priv(dev);
8142 
8143 	cancel_work_sync(&sp->rst_timer_task);
8144 	cancel_work_sync(&sp->set_link_task);
8145 
8146 	unregister_netdev(dev);
8147 
8148 	free_shared_mem(sp);
8149 	iounmap(sp->bar0);
8150 	iounmap(sp->bar1);
8151 	pci_release_regions(pdev);
8152 	free_netdev(dev);
8153 	pci_disable_device(pdev);
8154 }
8155 
8156 module_pci_driver(s2io_driver);
8157 
8158 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8159 				struct tcphdr **tcp, struct RxD_t *rxdp,
8160 				struct s2io_nic *sp)
8161 {
8162 	int ip_off;
8163 	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8164 
8165 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8166 		DBG_PRINT(INIT_DBG,
8167 			  "%s: Non-TCP frames not supported for LRO\n",
8168 			  __func__);
8169 		return -1;
8170 	}
8171 
8172 	/* Checking for DIX type or DIX type with VLAN */
8173 	if ((l2_type == 0) || (l2_type == 4)) {
8174 		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8175 		/*
8176 		 * If vlan stripping is disabled and the frame is VLAN tagged,
8177 		 * shift the offset by the VLAN header size bytes.
8178 		 */
8179 		if ((!sp->vlan_strip_flag) &&
8180 		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8181 			ip_off += HEADER_VLAN_SIZE;
8182 	} else {
8183 		/* LLC, SNAP etc are considered non-mergeable */
8184 		return -1;
8185 	}
8186 
8187 	*ip = (struct iphdr *)(buffer + ip_off);
8188 	ip_len = (u8)((*ip)->ihl);
8189 	ip_len <<= 2;
8190 	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8191 
8192 	return 0;
8193 }
8194 
8195 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8196 				  struct tcphdr *tcp)
8197 {
8198 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8199 	if ((lro->iph->saddr != ip->saddr) ||
8200 	    (lro->iph->daddr != ip->daddr) ||
8201 	    (lro->tcph->source != tcp->source) ||
8202 	    (lro->tcph->dest != tcp->dest))
8203 		return -1;
8204 	return 0;
8205 }
8206 
8207 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8208 {
8209 	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8210 }
8211 
8212 static void initiate_new_session(struct lro *lro, u8 *l2h,
8213 				 struct iphdr *ip, struct tcphdr *tcp,
8214 				 u32 tcp_pyld_len, u16 vlan_tag)
8215 {
8216 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8217 	lro->l2h = l2h;
8218 	lro->iph = ip;
8219 	lro->tcph = tcp;
8220 	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8221 	lro->tcp_ack = tcp->ack_seq;
8222 	lro->sg_num = 1;
8223 	lro->total_len = ntohs(ip->tot_len);
8224 	lro->frags_len = 0;
8225 	lro->vlan_tag = vlan_tag;
8226 	/*
8227 	 * Check if we saw TCP timestamp.
8228 	 * Other consistency checks have already been done.
8229 	 */
8230 	if (tcp->doff == 8) {
8231 		__be32 *ptr;
8232 		ptr = (__be32 *)(tcp+1);
8233 		lro->saw_ts = 1;
8234 		lro->cur_tsval = ntohl(*(ptr+1));
8235 		lro->cur_tsecr = *(ptr+2);
8236 	}
8237 	lro->in_use = 1;
8238 }
8239 
8240 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8241 {
8242 	struct iphdr *ip = lro->iph;
8243 	struct tcphdr *tcp = lro->tcph;
8244 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8245 
8246 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8247 
8248 	/* Update L3 header */
8249 	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8250 	ip->tot_len = htons(lro->total_len);
8251 
8252 	/* Update L4 header */
8253 	tcp->ack_seq = lro->tcp_ack;
8254 	tcp->window = lro->window;
8255 
8256 	/* Update tsecr field if this session has timestamps enabled */
8257 	if (lro->saw_ts) {
8258 		__be32 *ptr = (__be32 *)(tcp + 1);
8259 		*(ptr+2) = lro->cur_tsecr;
8260 	}
8261 
8262 	/* Update counters required for calculation of
8263 	 * average no. of packets aggregated.
8264 	 */
8265 	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8266 	swstats->num_aggregations++;
8267 }
8268 
8269 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8270 			     struct tcphdr *tcp, u32 l4_pyld)
8271 {
8272 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8273 	lro->total_len += l4_pyld;
8274 	lro->frags_len += l4_pyld;
8275 	lro->tcp_next_seq += l4_pyld;
8276 	lro->sg_num++;
8277 
8278 	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8279 	lro->tcp_ack = tcp->ack_seq;
8280 	lro->window = tcp->window;
8281 
8282 	if (lro->saw_ts) {
8283 		__be32 *ptr;
8284 		/* Update tsecr and tsval from this packet */
8285 		ptr = (__be32 *)(tcp+1);
8286 		lro->cur_tsval = ntohl(*(ptr+1));
8287 		lro->cur_tsecr = *(ptr + 2);
8288 	}
8289 }
8290 
8291 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8292 				    struct tcphdr *tcp, u32 tcp_pyld_len)
8293 {
8294 	u8 *ptr;
8295 
8296 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8297 
8298 	if (!tcp_pyld_len) {
8299 		/* Runt frame or a pure ack */
8300 		return -1;
8301 	}
8302 
8303 	if (ip->ihl != 5) /* IP has options */
8304 		return -1;
8305 
8306 	/* If we see CE codepoint in IP header, packet is not mergeable */
8307 	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8308 		return -1;
8309 
8310 	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8311 	if (tcp->urg || tcp->psh || tcp->rst ||
8312 	    tcp->syn || tcp->fin ||
8313 	    tcp->ece || tcp->cwr || !tcp->ack) {
8314 		/*
8315 		 * Currently recognize only the ack control word and
8316 		 * any other control field being set would result in
8317 		 * flushing the LRO session
8318 		 */
8319 		return -1;
8320 	}
8321 
8322 	/*
8323 	 * Allow only one TCP timestamp option. Don't aggregate if
8324 	 * any other options are detected.
8325 	 */
8326 	if (tcp->doff != 5 && tcp->doff != 8)
8327 		return -1;
8328 
8329 	if (tcp->doff == 8) {
8330 		ptr = (u8 *)(tcp + 1);
8331 		while (*ptr == TCPOPT_NOP)
8332 			ptr++;
8333 		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8334 			return -1;
8335 
8336 		/* Ensure timestamp value increases monotonically */
8337 		if (l_lro)
8338 			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8339 				return -1;
8340 
8341 		/* timestamp echo reply should be non-zero */
8342 		if (*((__be32 *)(ptr+6)) == 0)
8343 			return -1;
8344 	}
8345 
8346 	return 0;
8347 }
8348 
8349 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8350 				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8351 				 struct RxD_t *rxdp, struct s2io_nic *sp)
8352 {
8353 	struct iphdr *ip;
8354 	struct tcphdr *tcph;
8355 	int ret = 0, i;
8356 	u16 vlan_tag = 0;
8357 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8358 
8359 	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8360 				   rxdp, sp);
8361 	if (ret)
8362 		return ret;
8363 
8364 	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8365 
8366 	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8367 	tcph = (struct tcphdr *)*tcp;
8368 	*tcp_len = get_l4_pyld_length(ip, tcph);
8369 	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8370 		struct lro *l_lro = &ring_data->lro0_n[i];
8371 		if (l_lro->in_use) {
8372 			if (check_for_socket_match(l_lro, ip, tcph))
8373 				continue;
8374 			/* Sock pair matched */
8375 			*lro = l_lro;
8376 
8377 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8378 				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8379 					  "expected 0x%x, actual 0x%x\n",
8380 					  __func__,
8381 					  (*lro)->tcp_next_seq,
8382 					  ntohl(tcph->seq));
8383 
8384 				swstats->outof_sequence_pkts++;
8385 				ret = 2;
8386 				break;
8387 			}
8388 
8389 			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8390 						      *tcp_len))
8391 				ret = 1; /* Aggregate */
8392 			else
8393 				ret = 2; /* Flush both */
8394 			break;
8395 		}
8396 	}
8397 
8398 	if (ret == 0) {
8399 		/* Before searching for available LRO objects,
8400 		 * check if the pkt is L3/L4 aggregatable. If not
8401 		 * don't create new LRO session. Just send this
8402 		 * packet up.
8403 		 */
8404 		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8405 			return 5;
8406 
8407 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8408 			struct lro *l_lro = &ring_data->lro0_n[i];
8409 			if (!(l_lro->in_use)) {
8410 				*lro = l_lro;
8411 				ret = 3; /* Begin anew */
8412 				break;
8413 			}
8414 		}
8415 	}
8416 
8417 	if (ret == 0) { /* sessions exceeded */
8418 		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8419 			  __func__);
8420 		*lro = NULL;
8421 		return ret;
8422 	}
8423 
8424 	switch (ret) {
8425 	case 3:
8426 		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8427 				     vlan_tag);
8428 		break;
8429 	case 2:
8430 		update_L3L4_header(sp, *lro);
8431 		break;
8432 	case 1:
8433 		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8434 		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8435 			update_L3L4_header(sp, *lro);
8436 			ret = 4; /* Flush the LRO */
8437 		}
8438 		break;
8439 	default:
8440 		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8441 		break;
8442 	}
8443 
8444 	return ret;
8445 }
8446 
8447 static void clear_lro_session(struct lro *lro)
8448 {
8449 	static u16 lro_struct_size = sizeof(struct lro);
8450 
8451 	memset(lro, 0, lro_struct_size);
8452 }
8453 
8454 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8455 {
8456 	struct net_device *dev = skb->dev;
8457 	struct s2io_nic *sp = netdev_priv(dev);
8458 
8459 	skb->protocol = eth_type_trans(skb, dev);
8460 	if (vlan_tag && sp->vlan_strip_flag)
8461 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8462 	if (sp->config.napi)
8463 		netif_receive_skb(skb);
8464 	else
8465 		netif_rx(skb);
8466 }
8467 
8468 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8469 			   struct sk_buff *skb, u32 tcp_len)
8470 {
8471 	struct sk_buff *first = lro->parent;
8472 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8473 
8474 	first->len += tcp_len;
8475 	first->data_len = lro->frags_len;
8476 	skb_pull(skb, (skb->len - tcp_len));
8477 	if (skb_shinfo(first)->frag_list)
8478 		lro->last_frag->next = skb;
8479 	else
8480 		skb_shinfo(first)->frag_list = skb;
8481 	first->truesize += skb->truesize;
8482 	lro->last_frag = skb;
8483 	swstats->clubbed_frms_cnt++;
8484 }
8485 
8486 /**
8487  * s2io_io_error_detected - called when PCI error is detected
8488  * @pdev: Pointer to PCI device
8489  * @state: The current pci connection state
8490  *
8491  * This function is called after a PCI bus error affecting
8492  * this device has been detected.
8493  */
8494 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8495 					       pci_channel_state_t state)
8496 {
8497 	struct net_device *netdev = pci_get_drvdata(pdev);
8498 	struct s2io_nic *sp = netdev_priv(netdev);
8499 
8500 	netif_device_detach(netdev);
8501 
8502 	if (state == pci_channel_io_perm_failure)
8503 		return PCI_ERS_RESULT_DISCONNECT;
8504 
8505 	if (netif_running(netdev)) {
8506 		/* Bring down the card, while avoiding PCI I/O */
8507 		do_s2io_card_down(sp, 0);
8508 	}
8509 	pci_disable_device(pdev);
8510 
8511 	return PCI_ERS_RESULT_NEED_RESET;
8512 }
8513 
8514 /**
8515  * s2io_io_slot_reset - called after the pci bus has been reset.
8516  * @pdev: Pointer to PCI device
8517  *
8518  * Restart the card from scratch, as if from a cold-boot.
8519  * At this point, the card has exprienced a hard reset,
8520  * followed by fixups by BIOS, and has its config space
8521  * set up identically to what it was at cold boot.
8522  */
8523 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8524 {
8525 	struct net_device *netdev = pci_get_drvdata(pdev);
8526 	struct s2io_nic *sp = netdev_priv(netdev);
8527 
8528 	if (pci_enable_device(pdev)) {
8529 		pr_err("Cannot re-enable PCI device after reset.\n");
8530 		return PCI_ERS_RESULT_DISCONNECT;
8531 	}
8532 
8533 	pci_set_master(pdev);
8534 	s2io_reset(sp);
8535 
8536 	return PCI_ERS_RESULT_RECOVERED;
8537 }
8538 
8539 /**
8540  * s2io_io_resume - called when traffic can start flowing again.
8541  * @pdev: Pointer to PCI device
8542  *
8543  * This callback is called when the error recovery driver tells
8544  * us that its OK to resume normal operation.
8545  */
8546 static void s2io_io_resume(struct pci_dev *pdev)
8547 {
8548 	struct net_device *netdev = pci_get_drvdata(pdev);
8549 	struct s2io_nic *sp = netdev_priv(netdev);
8550 
8551 	if (netif_running(netdev)) {
8552 		if (s2io_card_up(sp)) {
8553 			pr_err("Can't bring device back up after reset.\n");
8554 			return;
8555 		}
8556 
8557 		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8558 			s2io_card_down(sp);
8559 			pr_err("Can't restore mac addr after reset.\n");
8560 			return;
8561 		}
8562 	}
8563 
8564 	netif_device_attach(netdev);
8565 	netif_tx_wake_all_queues(netdev);
8566 }
8567