1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2010 Exar Corp.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik		: For pointing out the improper error condition
15  *			  check in the s2io_xmit routine and also some
16  *			  issues in the Tx watch dog function. Also for
17  *			  patiently answering all those innumerable
18  *			  questions regaring the 2.6 porting issues.
19  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
20  *			  macros available only in 2.6 Kernel.
21  * Francois Romieu	: For pointing out all code part that were
22  *			  deprecated and also styling related comments.
23  * Grant Grundler	: For helping me get rid of some Architecture
24  *			  dependent code.
25  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explanation of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *		values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_max_pkts: This parameter defines maximum number of packets can be
42  *     aggregated as a single large packet
43  * napi: This parameter used to enable/disable NAPI (polling Rx)
44  *     Possible values '1' for enable and '0' for disable. Default is '1'
45  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
46  *                 Possible values '1' for enable , '0' for disable.
47  *                 Default is '2' - which means disable in promisc mode
48  *                 and enable in non-promiscuous mode.
49  * multiq: This parameter used to enable/disable MULTIQUEUE support.
50  *      Possible values '1' for enable and '0' for disable. Default is '0'
51  ************************************************************************/
52 
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/mdio.h>
65 #include <linux/skbuff.h>
66 #include <linux/init.h>
67 #include <linux/delay.h>
68 #include <linux/stddef.h>
69 #include <linux/ioctl.h>
70 #include <linux/timex.h>
71 #include <linux/ethtool.h>
72 #include <linux/workqueue.h>
73 #include <linux/if_vlan.h>
74 #include <linux/ip.h>
75 #include <linux/tcp.h>
76 #include <linux/uaccess.h>
77 #include <linux/io.h>
78 #include <linux/io-64-nonatomic-lo-hi.h>
79 #include <linux/slab.h>
80 #include <linux/prefetch.h>
81 #include <net/tcp.h>
82 #include <net/checksum.h>
83 
84 #include <asm/div64.h>
85 #include <asm/irq.h>
86 
87 /* local include */
88 #include "s2io.h"
89 #include "s2io-regs.h"
90 
91 #define DRV_VERSION "2.0.26.28"
92 
93 /* S2io Driver name & version. */
94 static const char s2io_driver_name[] = "Neterion";
95 static const char s2io_driver_version[] = DRV_VERSION;
96 
97 static const int rxd_size[2] = {32, 48};
98 static const int rxd_count[2] = {127, 85};
99 
100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101 {
102 	int ret;
103 
104 	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105 	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
106 
107 	return ret;
108 }
109 
110 /*
111  * Cards with following subsystem_id have a link state indication
112  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113  * macro below identifies these cards given the subsystem_id.
114  */
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
116 	(dev_type == XFRAME_I_DEVICE) ?					\
117 	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
118 	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
119 
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
122 
123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
124 {
125 	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 }
127 
128 /* Ethtool related variables and Macros. */
129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130 	"Register test\t(offline)",
131 	"Eeprom test\t(offline)",
132 	"Link test\t(online)",
133 	"RLDRAM test\t(offline)",
134 	"BIST Test\t(offline)"
135 };
136 
137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138 	{"tmac_frms"},
139 	{"tmac_data_octets"},
140 	{"tmac_drop_frms"},
141 	{"tmac_mcst_frms"},
142 	{"tmac_bcst_frms"},
143 	{"tmac_pause_ctrl_frms"},
144 	{"tmac_ttl_octets"},
145 	{"tmac_ucst_frms"},
146 	{"tmac_nucst_frms"},
147 	{"tmac_any_err_frms"},
148 	{"tmac_ttl_less_fb_octets"},
149 	{"tmac_vld_ip_octets"},
150 	{"tmac_vld_ip"},
151 	{"tmac_drop_ip"},
152 	{"tmac_icmp"},
153 	{"tmac_rst_tcp"},
154 	{"tmac_tcp"},
155 	{"tmac_udp"},
156 	{"rmac_vld_frms"},
157 	{"rmac_data_octets"},
158 	{"rmac_fcs_err_frms"},
159 	{"rmac_drop_frms"},
160 	{"rmac_vld_mcst_frms"},
161 	{"rmac_vld_bcst_frms"},
162 	{"rmac_in_rng_len_err_frms"},
163 	{"rmac_out_rng_len_err_frms"},
164 	{"rmac_long_frms"},
165 	{"rmac_pause_ctrl_frms"},
166 	{"rmac_unsup_ctrl_frms"},
167 	{"rmac_ttl_octets"},
168 	{"rmac_accepted_ucst_frms"},
169 	{"rmac_accepted_nucst_frms"},
170 	{"rmac_discarded_frms"},
171 	{"rmac_drop_events"},
172 	{"rmac_ttl_less_fb_octets"},
173 	{"rmac_ttl_frms"},
174 	{"rmac_usized_frms"},
175 	{"rmac_osized_frms"},
176 	{"rmac_frag_frms"},
177 	{"rmac_jabber_frms"},
178 	{"rmac_ttl_64_frms"},
179 	{"rmac_ttl_65_127_frms"},
180 	{"rmac_ttl_128_255_frms"},
181 	{"rmac_ttl_256_511_frms"},
182 	{"rmac_ttl_512_1023_frms"},
183 	{"rmac_ttl_1024_1518_frms"},
184 	{"rmac_ip"},
185 	{"rmac_ip_octets"},
186 	{"rmac_hdr_err_ip"},
187 	{"rmac_drop_ip"},
188 	{"rmac_icmp"},
189 	{"rmac_tcp"},
190 	{"rmac_udp"},
191 	{"rmac_err_drp_udp"},
192 	{"rmac_xgmii_err_sym"},
193 	{"rmac_frms_q0"},
194 	{"rmac_frms_q1"},
195 	{"rmac_frms_q2"},
196 	{"rmac_frms_q3"},
197 	{"rmac_frms_q4"},
198 	{"rmac_frms_q5"},
199 	{"rmac_frms_q6"},
200 	{"rmac_frms_q7"},
201 	{"rmac_full_q0"},
202 	{"rmac_full_q1"},
203 	{"rmac_full_q2"},
204 	{"rmac_full_q3"},
205 	{"rmac_full_q4"},
206 	{"rmac_full_q5"},
207 	{"rmac_full_q6"},
208 	{"rmac_full_q7"},
209 	{"rmac_pause_cnt"},
210 	{"rmac_xgmii_data_err_cnt"},
211 	{"rmac_xgmii_ctrl_err_cnt"},
212 	{"rmac_accepted_ip"},
213 	{"rmac_err_tcp"},
214 	{"rd_req_cnt"},
215 	{"new_rd_req_cnt"},
216 	{"new_rd_req_rtry_cnt"},
217 	{"rd_rtry_cnt"},
218 	{"wr_rtry_rd_ack_cnt"},
219 	{"wr_req_cnt"},
220 	{"new_wr_req_cnt"},
221 	{"new_wr_req_rtry_cnt"},
222 	{"wr_rtry_cnt"},
223 	{"wr_disc_cnt"},
224 	{"rd_rtry_wr_ack_cnt"},
225 	{"txp_wr_cnt"},
226 	{"txd_rd_cnt"},
227 	{"txd_wr_cnt"},
228 	{"rxd_rd_cnt"},
229 	{"rxd_wr_cnt"},
230 	{"txf_rd_cnt"},
231 	{"rxf_wr_cnt"}
232 };
233 
234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235 	{"rmac_ttl_1519_4095_frms"},
236 	{"rmac_ttl_4096_8191_frms"},
237 	{"rmac_ttl_8192_max_frms"},
238 	{"rmac_ttl_gt_max_frms"},
239 	{"rmac_osized_alt_frms"},
240 	{"rmac_jabber_alt_frms"},
241 	{"rmac_gt_max_alt_frms"},
242 	{"rmac_vlan_frms"},
243 	{"rmac_len_discard"},
244 	{"rmac_fcs_discard"},
245 	{"rmac_pf_discard"},
246 	{"rmac_da_discard"},
247 	{"rmac_red_discard"},
248 	{"rmac_rts_discard"},
249 	{"rmac_ingm_full_discard"},
250 	{"link_fault_cnt"}
251 };
252 
253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254 	{"\n DRIVER STATISTICS"},
255 	{"single_bit_ecc_errs"},
256 	{"double_bit_ecc_errs"},
257 	{"parity_err_cnt"},
258 	{"serious_err_cnt"},
259 	{"soft_reset_cnt"},
260 	{"fifo_full_cnt"},
261 	{"ring_0_full_cnt"},
262 	{"ring_1_full_cnt"},
263 	{"ring_2_full_cnt"},
264 	{"ring_3_full_cnt"},
265 	{"ring_4_full_cnt"},
266 	{"ring_5_full_cnt"},
267 	{"ring_6_full_cnt"},
268 	{"ring_7_full_cnt"},
269 	{"alarm_transceiver_temp_high"},
270 	{"alarm_transceiver_temp_low"},
271 	{"alarm_laser_bias_current_high"},
272 	{"alarm_laser_bias_current_low"},
273 	{"alarm_laser_output_power_high"},
274 	{"alarm_laser_output_power_low"},
275 	{"warn_transceiver_temp_high"},
276 	{"warn_transceiver_temp_low"},
277 	{"warn_laser_bias_current_high"},
278 	{"warn_laser_bias_current_low"},
279 	{"warn_laser_output_power_high"},
280 	{"warn_laser_output_power_low"},
281 	{"lro_aggregated_pkts"},
282 	{"lro_flush_both_count"},
283 	{"lro_out_of_sequence_pkts"},
284 	{"lro_flush_due_to_max_pkts"},
285 	{"lro_avg_aggr_pkts"},
286 	{"mem_alloc_fail_cnt"},
287 	{"pci_map_fail_cnt"},
288 	{"watchdog_timer_cnt"},
289 	{"mem_allocated"},
290 	{"mem_freed"},
291 	{"link_up_cnt"},
292 	{"link_down_cnt"},
293 	{"link_up_time"},
294 	{"link_down_time"},
295 	{"tx_tcode_buf_abort_cnt"},
296 	{"tx_tcode_desc_abort_cnt"},
297 	{"tx_tcode_parity_err_cnt"},
298 	{"tx_tcode_link_loss_cnt"},
299 	{"tx_tcode_list_proc_err_cnt"},
300 	{"rx_tcode_parity_err_cnt"},
301 	{"rx_tcode_abort_cnt"},
302 	{"rx_tcode_parity_abort_cnt"},
303 	{"rx_tcode_rda_fail_cnt"},
304 	{"rx_tcode_unkn_prot_cnt"},
305 	{"rx_tcode_fcs_err_cnt"},
306 	{"rx_tcode_buf_size_err_cnt"},
307 	{"rx_tcode_rxd_corrupt_cnt"},
308 	{"rx_tcode_unkn_err_cnt"},
309 	{"tda_err_cnt"},
310 	{"pfc_err_cnt"},
311 	{"pcc_err_cnt"},
312 	{"tti_err_cnt"},
313 	{"tpa_err_cnt"},
314 	{"sm_err_cnt"},
315 	{"lso_err_cnt"},
316 	{"mac_tmac_err_cnt"},
317 	{"mac_rmac_err_cnt"},
318 	{"xgxs_txgxs_err_cnt"},
319 	{"xgxs_rxgxs_err_cnt"},
320 	{"rc_err_cnt"},
321 	{"prc_pcix_err_cnt"},
322 	{"rpa_err_cnt"},
323 	{"rda_err_cnt"},
324 	{"rti_err_cnt"},
325 	{"mc_err_cnt"}
326 };
327 
328 #define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
331 
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
334 
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
337 
338 #define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
340 
341 /* copy mac addr to def_mac_addr array */
342 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
343 {
344 	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
345 	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
346 	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
347 	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
348 	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
349 	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
350 }
351 
352 /*
353  * Constants to be programmed into the Xena's registers, to configure
354  * the XAUI.
355  */
356 
357 #define	END_SIGN	0x0
358 static const u64 herc_act_dtx_cfg[] = {
359 	/* Set address */
360 	0x8000051536750000ULL, 0x80000515367500E0ULL,
361 	/* Write data */
362 	0x8000051536750004ULL, 0x80000515367500E4ULL,
363 	/* Set address */
364 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
365 	/* Write data */
366 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
367 	/* Set address */
368 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
369 	/* Write data */
370 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
371 	/* Set address */
372 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
373 	/* Write data */
374 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
375 	/* Done */
376 	END_SIGN
377 };
378 
379 static const u64 xena_dtx_cfg[] = {
380 	/* Set address */
381 	0x8000051500000000ULL, 0x80000515000000E0ULL,
382 	/* Write data */
383 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
384 	/* Set address */
385 	0x8001051500000000ULL, 0x80010515000000E0ULL,
386 	/* Write data */
387 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
388 	/* Set address */
389 	0x8002051500000000ULL, 0x80020515000000E0ULL,
390 	/* Write data */
391 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
392 	END_SIGN
393 };
394 
395 /*
396  * Constants for Fixing the MacAddress problem seen mostly on
397  * Alpha machines.
398  */
399 static const u64 fix_mac[] = {
400 	0x0060000000000000ULL, 0x0060600000000000ULL,
401 	0x0040600000000000ULL, 0x0000600000000000ULL,
402 	0x0020600000000000ULL, 0x0060600000000000ULL,
403 	0x0020600000000000ULL, 0x0060600000000000ULL,
404 	0x0020600000000000ULL, 0x0060600000000000ULL,
405 	0x0020600000000000ULL, 0x0060600000000000ULL,
406 	0x0020600000000000ULL, 0x0060600000000000ULL,
407 	0x0020600000000000ULL, 0x0060600000000000ULL,
408 	0x0020600000000000ULL, 0x0060600000000000ULL,
409 	0x0020600000000000ULL, 0x0060600000000000ULL,
410 	0x0020600000000000ULL, 0x0060600000000000ULL,
411 	0x0020600000000000ULL, 0x0060600000000000ULL,
412 	0x0020600000000000ULL, 0x0000600000000000ULL,
413 	0x0040600000000000ULL, 0x0060600000000000ULL,
414 	END_SIGN
415 };
416 
417 MODULE_LICENSE("GPL");
418 MODULE_VERSION(DRV_VERSION);
419 
420 
421 /* Module Loadable parameters. */
422 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
423 S2IO_PARM_INT(rx_ring_num, 1);
424 S2IO_PARM_INT(multiq, 0);
425 S2IO_PARM_INT(rx_ring_mode, 1);
426 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
427 S2IO_PARM_INT(rmac_pause_time, 0x100);
428 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
429 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
430 S2IO_PARM_INT(shared_splits, 0);
431 S2IO_PARM_INT(tmac_util_period, 5);
432 S2IO_PARM_INT(rmac_util_period, 5);
433 S2IO_PARM_INT(l3l4hdr_size, 128);
434 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
435 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
436 /* Frequency of Rx desc syncs expressed as power of 2 */
437 S2IO_PARM_INT(rxsync_frequency, 3);
438 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
439 S2IO_PARM_INT(intr_type, 2);
440 /* Large receive offload feature */
441 
442 /* Max pkts to be aggregated by LRO at one time. If not specified,
443  * aggregation happens until we hit max IP pkt size(64K)
444  */
445 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
446 S2IO_PARM_INT(indicate_max_pkts, 0);
447 
448 S2IO_PARM_INT(napi, 1);
449 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
450 
451 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
452 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
453 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
454 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
455 static unsigned int rts_frm_len[MAX_RX_RINGS] =
456 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
457 
458 module_param_array(tx_fifo_len, uint, NULL, 0);
459 module_param_array(rx_ring_sz, uint, NULL, 0);
460 module_param_array(rts_frm_len, uint, NULL, 0);
461 
462 /*
463  * S2IO device table.
464  * This table lists all the devices that this driver supports.
465  */
466 static const struct pci_device_id s2io_tbl[] = {
467 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
468 	 PCI_ANY_ID, PCI_ANY_ID},
469 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
470 	 PCI_ANY_ID, PCI_ANY_ID},
471 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
472 	 PCI_ANY_ID, PCI_ANY_ID},
473 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
474 	 PCI_ANY_ID, PCI_ANY_ID},
475 	{0,}
476 };
477 
478 MODULE_DEVICE_TABLE(pci, s2io_tbl);
479 
480 static const struct pci_error_handlers s2io_err_handler = {
481 	.error_detected = s2io_io_error_detected,
482 	.slot_reset = s2io_io_slot_reset,
483 	.resume = s2io_io_resume,
484 };
485 
486 static struct pci_driver s2io_driver = {
487 	.name = "S2IO",
488 	.id_table = s2io_tbl,
489 	.probe = s2io_init_nic,
490 	.remove = s2io_rem_nic,
491 	.err_handler = &s2io_err_handler,
492 };
493 
494 /* A simplifier macro used both by init and free shared_mem Fns(). */
495 #define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
496 
497 /* netqueue manipulation helper functions */
498 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
499 {
500 	if (!sp->config.multiq) {
501 		int i;
502 
503 		for (i = 0; i < sp->config.tx_fifo_num; i++)
504 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
505 	}
506 	netif_tx_stop_all_queues(sp->dev);
507 }
508 
509 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
510 {
511 	if (!sp->config.multiq)
512 		sp->mac_control.fifos[fifo_no].queue_state =
513 			FIFO_QUEUE_STOP;
514 
515 	netif_tx_stop_all_queues(sp->dev);
516 }
517 
518 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
519 {
520 	if (!sp->config.multiq) {
521 		int i;
522 
523 		for (i = 0; i < sp->config.tx_fifo_num; i++)
524 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
525 	}
526 	netif_tx_start_all_queues(sp->dev);
527 }
528 
529 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
530 {
531 	if (!sp->config.multiq) {
532 		int i;
533 
534 		for (i = 0; i < sp->config.tx_fifo_num; i++)
535 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
536 	}
537 	netif_tx_wake_all_queues(sp->dev);
538 }
539 
540 static inline void s2io_wake_tx_queue(
541 	struct fifo_info *fifo, int cnt, u8 multiq)
542 {
543 
544 	if (multiq) {
545 		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
546 			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
547 	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
548 		if (netif_queue_stopped(fifo->dev)) {
549 			fifo->queue_state = FIFO_QUEUE_START;
550 			netif_wake_queue(fifo->dev);
551 		}
552 	}
553 }
554 
555 /**
556  * init_shared_mem - Allocation and Initialization of Memory
557  * @nic: Device private variable.
558  * Description: The function allocates all the memory areas shared
559  * between the NIC and the driver. This includes Tx descriptors,
560  * Rx descriptors and the statistics block.
561  */
562 
563 static int init_shared_mem(struct s2io_nic *nic)
564 {
565 	u32 size;
566 	void *tmp_v_addr, *tmp_v_addr_next;
567 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
568 	struct RxD_block *pre_rxd_blk = NULL;
569 	int i, j, blk_cnt;
570 	int lst_size, lst_per_page;
571 	struct net_device *dev = nic->dev;
572 	unsigned long tmp;
573 	struct buffAdd *ba;
574 	struct config_param *config = &nic->config;
575 	struct mac_info *mac_control = &nic->mac_control;
576 	unsigned long long mem_allocated = 0;
577 
578 	/* Allocation and initialization of TXDLs in FIFOs */
579 	size = 0;
580 	for (i = 0; i < config->tx_fifo_num; i++) {
581 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
582 
583 		size += tx_cfg->fifo_len;
584 	}
585 	if (size > MAX_AVAILABLE_TXDS) {
586 		DBG_PRINT(ERR_DBG,
587 			  "Too many TxDs requested: %d, max supported: %d\n",
588 			  size, MAX_AVAILABLE_TXDS);
589 		return -EINVAL;
590 	}
591 
592 	size = 0;
593 	for (i = 0; i < config->tx_fifo_num; i++) {
594 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
595 
596 		size = tx_cfg->fifo_len;
597 		/*
598 		 * Legal values are from 2 to 8192
599 		 */
600 		if (size < 2) {
601 			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
602 				  "Valid lengths are 2 through 8192\n",
603 				  i, size);
604 			return -EINVAL;
605 		}
606 	}
607 
608 	lst_size = (sizeof(struct TxD) * config->max_txds);
609 	lst_per_page = PAGE_SIZE / lst_size;
610 
611 	for (i = 0; i < config->tx_fifo_num; i++) {
612 		struct fifo_info *fifo = &mac_control->fifos[i];
613 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
614 		int fifo_len = tx_cfg->fifo_len;
615 		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
616 
617 		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
618 		if (!fifo->list_info) {
619 			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
620 			return -ENOMEM;
621 		}
622 		mem_allocated += list_holder_size;
623 	}
624 	for (i = 0; i < config->tx_fifo_num; i++) {
625 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
626 						lst_per_page);
627 		struct fifo_info *fifo = &mac_control->fifos[i];
628 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
629 
630 		fifo->tx_curr_put_info.offset = 0;
631 		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
632 		fifo->tx_curr_get_info.offset = 0;
633 		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
634 		fifo->fifo_no = i;
635 		fifo->nic = nic;
636 		fifo->max_txds = MAX_SKB_FRAGS + 2;
637 		fifo->dev = dev;
638 
639 		for (j = 0; j < page_num; j++) {
640 			int k = 0;
641 			dma_addr_t tmp_p;
642 			void *tmp_v;
643 			tmp_v = pci_alloc_consistent(nic->pdev,
644 						     PAGE_SIZE, &tmp_p);
645 			if (!tmp_v) {
646 				DBG_PRINT(INFO_DBG,
647 					  "pci_alloc_consistent failed for TxDL\n");
648 				return -ENOMEM;
649 			}
650 			/* If we got a zero DMA address(can happen on
651 			 * certain platforms like PPC), reallocate.
652 			 * Store virtual address of page we don't want,
653 			 * to be freed later.
654 			 */
655 			if (!tmp_p) {
656 				mac_control->zerodma_virt_addr = tmp_v;
657 				DBG_PRINT(INIT_DBG,
658 					  "%s: Zero DMA address for TxDL. "
659 					  "Virtual address %p\n",
660 					  dev->name, tmp_v);
661 				tmp_v = pci_alloc_consistent(nic->pdev,
662 							     PAGE_SIZE, &tmp_p);
663 				if (!tmp_v) {
664 					DBG_PRINT(INFO_DBG,
665 						  "pci_alloc_consistent failed for TxDL\n");
666 					return -ENOMEM;
667 				}
668 				mem_allocated += PAGE_SIZE;
669 			}
670 			while (k < lst_per_page) {
671 				int l = (j * lst_per_page) + k;
672 				if (l == tx_cfg->fifo_len)
673 					break;
674 				fifo->list_info[l].list_virt_addr =
675 					tmp_v + (k * lst_size);
676 				fifo->list_info[l].list_phy_addr =
677 					tmp_p + (k * lst_size);
678 				k++;
679 			}
680 		}
681 	}
682 
683 	for (i = 0; i < config->tx_fifo_num; i++) {
684 		struct fifo_info *fifo = &mac_control->fifos[i];
685 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
686 
687 		size = tx_cfg->fifo_len;
688 		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
689 		if (!fifo->ufo_in_band_v)
690 			return -ENOMEM;
691 		mem_allocated += (size * sizeof(u64));
692 	}
693 
694 	/* Allocation and initialization of RXDs in Rings */
695 	size = 0;
696 	for (i = 0; i < config->rx_ring_num; i++) {
697 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
698 		struct ring_info *ring = &mac_control->rings[i];
699 
700 		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
701 			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
702 				  "multiple of RxDs per Block\n",
703 				  dev->name, i);
704 			return FAILURE;
705 		}
706 		size += rx_cfg->num_rxd;
707 		ring->block_count = rx_cfg->num_rxd /
708 			(rxd_count[nic->rxd_mode] + 1);
709 		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
710 	}
711 	if (nic->rxd_mode == RXD_MODE_1)
712 		size = (size * (sizeof(struct RxD1)));
713 	else
714 		size = (size * (sizeof(struct RxD3)));
715 
716 	for (i = 0; i < config->rx_ring_num; i++) {
717 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
718 		struct ring_info *ring = &mac_control->rings[i];
719 
720 		ring->rx_curr_get_info.block_index = 0;
721 		ring->rx_curr_get_info.offset = 0;
722 		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
723 		ring->rx_curr_put_info.block_index = 0;
724 		ring->rx_curr_put_info.offset = 0;
725 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
726 		ring->nic = nic;
727 		ring->ring_no = i;
728 
729 		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
730 		/*  Allocating all the Rx blocks */
731 		for (j = 0; j < blk_cnt; j++) {
732 			struct rx_block_info *rx_blocks;
733 			int l;
734 
735 			rx_blocks = &ring->rx_blocks[j];
736 			size = SIZE_OF_BLOCK;	/* size is always page size */
737 			tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
738 							  &tmp_p_addr);
739 			if (tmp_v_addr == NULL) {
740 				/*
741 				 * In case of failure, free_shared_mem()
742 				 * is called, which should free any
743 				 * memory that was alloced till the
744 				 * failure happened.
745 				 */
746 				rx_blocks->block_virt_addr = tmp_v_addr;
747 				return -ENOMEM;
748 			}
749 			mem_allocated += size;
750 
751 			size = sizeof(struct rxd_info) *
752 				rxd_count[nic->rxd_mode];
753 			rx_blocks->block_virt_addr = tmp_v_addr;
754 			rx_blocks->block_dma_addr = tmp_p_addr;
755 			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
756 			if (!rx_blocks->rxds)
757 				return -ENOMEM;
758 			mem_allocated += size;
759 			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
760 				rx_blocks->rxds[l].virt_addr =
761 					rx_blocks->block_virt_addr +
762 					(rxd_size[nic->rxd_mode] * l);
763 				rx_blocks->rxds[l].dma_addr =
764 					rx_blocks->block_dma_addr +
765 					(rxd_size[nic->rxd_mode] * l);
766 			}
767 		}
768 		/* Interlinking all Rx Blocks */
769 		for (j = 0; j < blk_cnt; j++) {
770 			int next = (j + 1) % blk_cnt;
771 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
772 			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
773 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
774 			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
775 
776 			pre_rxd_blk = tmp_v_addr;
777 			pre_rxd_blk->reserved_2_pNext_RxD_block =
778 				(unsigned long)tmp_v_addr_next;
779 			pre_rxd_blk->pNext_RxD_Blk_physical =
780 				(u64)tmp_p_addr_next;
781 		}
782 	}
783 	if (nic->rxd_mode == RXD_MODE_3B) {
784 		/*
785 		 * Allocation of Storages for buffer addresses in 2BUFF mode
786 		 * and the buffers as well.
787 		 */
788 		for (i = 0; i < config->rx_ring_num; i++) {
789 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
790 			struct ring_info *ring = &mac_control->rings[i];
791 
792 			blk_cnt = rx_cfg->num_rxd /
793 				(rxd_count[nic->rxd_mode] + 1);
794 			size = sizeof(struct buffAdd *) * blk_cnt;
795 			ring->ba = kmalloc(size, GFP_KERNEL);
796 			if (!ring->ba)
797 				return -ENOMEM;
798 			mem_allocated += size;
799 			for (j = 0; j < blk_cnt; j++) {
800 				int k = 0;
801 
802 				size = sizeof(struct buffAdd) *
803 					(rxd_count[nic->rxd_mode] + 1);
804 				ring->ba[j] = kmalloc(size, GFP_KERNEL);
805 				if (!ring->ba[j])
806 					return -ENOMEM;
807 				mem_allocated += size;
808 				while (k != rxd_count[nic->rxd_mode]) {
809 					ba = &ring->ba[j][k];
810 					size = BUF0_LEN + ALIGN_SIZE;
811 					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
812 					if (!ba->ba_0_org)
813 						return -ENOMEM;
814 					mem_allocated += size;
815 					tmp = (unsigned long)ba->ba_0_org;
816 					tmp += ALIGN_SIZE;
817 					tmp &= ~((unsigned long)ALIGN_SIZE);
818 					ba->ba_0 = (void *)tmp;
819 
820 					size = BUF1_LEN + ALIGN_SIZE;
821 					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
822 					if (!ba->ba_1_org)
823 						return -ENOMEM;
824 					mem_allocated += size;
825 					tmp = (unsigned long)ba->ba_1_org;
826 					tmp += ALIGN_SIZE;
827 					tmp &= ~((unsigned long)ALIGN_SIZE);
828 					ba->ba_1 = (void *)tmp;
829 					k++;
830 				}
831 			}
832 		}
833 	}
834 
835 	/* Allocation and initialization of Statistics block */
836 	size = sizeof(struct stat_block);
837 	mac_control->stats_mem =
838 		pci_alloc_consistent(nic->pdev, size,
839 				     &mac_control->stats_mem_phy);
840 
841 	if (!mac_control->stats_mem) {
842 		/*
843 		 * In case of failure, free_shared_mem() is called, which
844 		 * should free any memory that was alloced till the
845 		 * failure happened.
846 		 */
847 		return -ENOMEM;
848 	}
849 	mem_allocated += size;
850 	mac_control->stats_mem_sz = size;
851 
852 	tmp_v_addr = mac_control->stats_mem;
853 	mac_control->stats_info = tmp_v_addr;
854 	memset(tmp_v_addr, 0, size);
855 	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
856 		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
857 	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
858 	return SUCCESS;
859 }
860 
861 /**
862  * free_shared_mem - Free the allocated Memory
863  * @nic:  Device private variable.
864  * Description: This function is to free all memory locations allocated by
865  * the init_shared_mem() function and return it to the kernel.
866  */
867 
868 static void free_shared_mem(struct s2io_nic *nic)
869 {
870 	int i, j, blk_cnt, size;
871 	void *tmp_v_addr;
872 	dma_addr_t tmp_p_addr;
873 	int lst_size, lst_per_page;
874 	struct net_device *dev;
875 	int page_num = 0;
876 	struct config_param *config;
877 	struct mac_info *mac_control;
878 	struct stat_block *stats;
879 	struct swStat *swstats;
880 
881 	if (!nic)
882 		return;
883 
884 	dev = nic->dev;
885 
886 	config = &nic->config;
887 	mac_control = &nic->mac_control;
888 	stats = mac_control->stats_info;
889 	swstats = &stats->sw_stat;
890 
891 	lst_size = sizeof(struct TxD) * config->max_txds;
892 	lst_per_page = PAGE_SIZE / lst_size;
893 
894 	for (i = 0; i < config->tx_fifo_num; i++) {
895 		struct fifo_info *fifo = &mac_control->fifos[i];
896 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
897 
898 		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
899 		for (j = 0; j < page_num; j++) {
900 			int mem_blks = (j * lst_per_page);
901 			struct list_info_hold *fli;
902 
903 			if (!fifo->list_info)
904 				return;
905 
906 			fli = &fifo->list_info[mem_blks];
907 			if (!fli->list_virt_addr)
908 				break;
909 			pci_free_consistent(nic->pdev, PAGE_SIZE,
910 					    fli->list_virt_addr,
911 					    fli->list_phy_addr);
912 			swstats->mem_freed += PAGE_SIZE;
913 		}
914 		/* If we got a zero DMA address during allocation,
915 		 * free the page now
916 		 */
917 		if (mac_control->zerodma_virt_addr) {
918 			pci_free_consistent(nic->pdev, PAGE_SIZE,
919 					    mac_control->zerodma_virt_addr,
920 					    (dma_addr_t)0);
921 			DBG_PRINT(INIT_DBG,
922 				  "%s: Freeing TxDL with zero DMA address. "
923 				  "Virtual address %p\n",
924 				  dev->name, mac_control->zerodma_virt_addr);
925 			swstats->mem_freed += PAGE_SIZE;
926 		}
927 		kfree(fifo->list_info);
928 		swstats->mem_freed += tx_cfg->fifo_len *
929 			sizeof(struct list_info_hold);
930 	}
931 
932 	size = SIZE_OF_BLOCK;
933 	for (i = 0; i < config->rx_ring_num; i++) {
934 		struct ring_info *ring = &mac_control->rings[i];
935 
936 		blk_cnt = ring->block_count;
937 		for (j = 0; j < blk_cnt; j++) {
938 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
939 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
940 			if (tmp_v_addr == NULL)
941 				break;
942 			pci_free_consistent(nic->pdev, size,
943 					    tmp_v_addr, tmp_p_addr);
944 			swstats->mem_freed += size;
945 			kfree(ring->rx_blocks[j].rxds);
946 			swstats->mem_freed += sizeof(struct rxd_info) *
947 				rxd_count[nic->rxd_mode];
948 		}
949 	}
950 
951 	if (nic->rxd_mode == RXD_MODE_3B) {
952 		/* Freeing buffer storage addresses in 2BUFF mode. */
953 		for (i = 0; i < config->rx_ring_num; i++) {
954 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
955 			struct ring_info *ring = &mac_control->rings[i];
956 
957 			blk_cnt = rx_cfg->num_rxd /
958 				(rxd_count[nic->rxd_mode] + 1);
959 			for (j = 0; j < blk_cnt; j++) {
960 				int k = 0;
961 				if (!ring->ba[j])
962 					continue;
963 				while (k != rxd_count[nic->rxd_mode]) {
964 					struct buffAdd *ba = &ring->ba[j][k];
965 					kfree(ba->ba_0_org);
966 					swstats->mem_freed +=
967 						BUF0_LEN + ALIGN_SIZE;
968 					kfree(ba->ba_1_org);
969 					swstats->mem_freed +=
970 						BUF1_LEN + ALIGN_SIZE;
971 					k++;
972 				}
973 				kfree(ring->ba[j]);
974 				swstats->mem_freed += sizeof(struct buffAdd) *
975 					(rxd_count[nic->rxd_mode] + 1);
976 			}
977 			kfree(ring->ba);
978 			swstats->mem_freed += sizeof(struct buffAdd *) *
979 				blk_cnt;
980 		}
981 	}
982 
983 	for (i = 0; i < nic->config.tx_fifo_num; i++) {
984 		struct fifo_info *fifo = &mac_control->fifos[i];
985 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
986 
987 		if (fifo->ufo_in_band_v) {
988 			swstats->mem_freed += tx_cfg->fifo_len *
989 				sizeof(u64);
990 			kfree(fifo->ufo_in_band_v);
991 		}
992 	}
993 
994 	if (mac_control->stats_mem) {
995 		swstats->mem_freed += mac_control->stats_mem_sz;
996 		pci_free_consistent(nic->pdev,
997 				    mac_control->stats_mem_sz,
998 				    mac_control->stats_mem,
999 				    mac_control->stats_mem_phy);
1000 	}
1001 }
1002 
1003 /**
1004  * s2io_verify_pci_mode -
1005  */
1006 
1007 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1008 {
1009 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1010 	register u64 val64 = 0;
1011 	int     mode;
1012 
1013 	val64 = readq(&bar0->pci_mode);
1014 	mode = (u8)GET_PCI_MODE(val64);
1015 
1016 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1017 		return -1;      /* Unknown PCI mode */
1018 	return mode;
1019 }
1020 
1021 #define NEC_VENID   0x1033
1022 #define NEC_DEVID   0x0125
1023 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024 {
1025 	struct pci_dev *tdev = NULL;
1026 	for_each_pci_dev(tdev) {
1027 		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1028 			if (tdev->bus == s2io_pdev->bus->parent) {
1029 				pci_dev_put(tdev);
1030 				return 1;
1031 			}
1032 		}
1033 	}
1034 	return 0;
1035 }
1036 
1037 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1038 /**
1039  * s2io_print_pci_mode -
1040  */
1041 static int s2io_print_pci_mode(struct s2io_nic *nic)
1042 {
1043 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1044 	register u64 val64 = 0;
1045 	int	mode;
1046 	struct config_param *config = &nic->config;
1047 	const char *pcimode;
1048 
1049 	val64 = readq(&bar0->pci_mode);
1050 	mode = (u8)GET_PCI_MODE(val64);
1051 
1052 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1053 		return -1;	/* Unknown PCI mode */
1054 
1055 	config->bus_speed = bus_speed[mode];
1056 
1057 	if (s2io_on_nec_bridge(nic->pdev)) {
1058 		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1059 			  nic->dev->name);
1060 		return mode;
1061 	}
1062 
1063 	switch (mode) {
1064 	case PCI_MODE_PCI_33:
1065 		pcimode = "33MHz PCI bus";
1066 		break;
1067 	case PCI_MODE_PCI_66:
1068 		pcimode = "66MHz PCI bus";
1069 		break;
1070 	case PCI_MODE_PCIX_M1_66:
1071 		pcimode = "66MHz PCIX(M1) bus";
1072 		break;
1073 	case PCI_MODE_PCIX_M1_100:
1074 		pcimode = "100MHz PCIX(M1) bus";
1075 		break;
1076 	case PCI_MODE_PCIX_M1_133:
1077 		pcimode = "133MHz PCIX(M1) bus";
1078 		break;
1079 	case PCI_MODE_PCIX_M2_66:
1080 		pcimode = "133MHz PCIX(M2) bus";
1081 		break;
1082 	case PCI_MODE_PCIX_M2_100:
1083 		pcimode = "200MHz PCIX(M2) bus";
1084 		break;
1085 	case PCI_MODE_PCIX_M2_133:
1086 		pcimode = "266MHz PCIX(M2) bus";
1087 		break;
1088 	default:
1089 		pcimode = "unsupported bus!";
1090 		mode = -1;
1091 	}
1092 
1093 	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094 		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095 
1096 	return mode;
1097 }
1098 
1099 /**
1100  *  init_tti - Initialization transmit traffic interrupt scheme
1101  *  @nic: device private variable
1102  *  @link: link status (UP/DOWN) used to enable/disable continuous
1103  *  transmit interrupts
1104  *  Description: The function configures transmit traffic interrupts
1105  *  Return Value:  SUCCESS on success and
1106  *  '-1' on failure
1107  */
1108 
1109 static int init_tti(struct s2io_nic *nic, int link)
1110 {
1111 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1112 	register u64 val64 = 0;
1113 	int i;
1114 	struct config_param *config = &nic->config;
1115 
1116 	for (i = 0; i < config->tx_fifo_num; i++) {
1117 		/*
1118 		 * TTI Initialization. Default Tx timer gets us about
1119 		 * 250 interrupts per sec. Continuous interrupts are enabled
1120 		 * by default.
1121 		 */
1122 		if (nic->device_type == XFRAME_II_DEVICE) {
1123 			int count = (nic->config.bus_speed * 125)/2;
1124 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1125 		} else
1126 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1127 
1128 		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1129 			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1130 			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1131 			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1132 		if (i == 0)
1133 			if (use_continuous_tx_intrs && (link == LINK_UP))
1134 				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1135 		writeq(val64, &bar0->tti_data1_mem);
1136 
1137 		if (nic->config.intr_type == MSI_X) {
1138 			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1139 				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1140 				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1141 				TTI_DATA2_MEM_TX_UFC_D(0x300);
1142 		} else {
1143 			if ((nic->config.tx_steering_type ==
1144 			     TX_DEFAULT_STEERING) &&
1145 			    (config->tx_fifo_num > 1) &&
1146 			    (i >= nic->udp_fifo_idx) &&
1147 			    (i < (nic->udp_fifo_idx +
1148 				  nic->total_udp_fifos)))
1149 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1150 					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1151 					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1152 					TTI_DATA2_MEM_TX_UFC_D(0x120);
1153 			else
1154 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1155 					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1156 					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1157 					TTI_DATA2_MEM_TX_UFC_D(0x80);
1158 		}
1159 
1160 		writeq(val64, &bar0->tti_data2_mem);
1161 
1162 		val64 = TTI_CMD_MEM_WE |
1163 			TTI_CMD_MEM_STROBE_NEW_CMD |
1164 			TTI_CMD_MEM_OFFSET(i);
1165 		writeq(val64, &bar0->tti_command_mem);
1166 
1167 		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1168 					  TTI_CMD_MEM_STROBE_NEW_CMD,
1169 					  S2IO_BIT_RESET) != SUCCESS)
1170 			return FAILURE;
1171 	}
1172 
1173 	return SUCCESS;
1174 }
1175 
1176 /**
1177  *  init_nic - Initialization of hardware
1178  *  @nic: device private variable
1179  *  Description: The function sequentially configures every block
1180  *  of the H/W from their reset values.
1181  *  Return Value:  SUCCESS on success and
1182  *  '-1' on failure (endian settings incorrect).
1183  */
1184 
1185 static int init_nic(struct s2io_nic *nic)
1186 {
1187 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1188 	struct net_device *dev = nic->dev;
1189 	register u64 val64 = 0;
1190 	void __iomem *add;
1191 	u32 time;
1192 	int i, j;
1193 	int dtx_cnt = 0;
1194 	unsigned long long mem_share;
1195 	int mem_size;
1196 	struct config_param *config = &nic->config;
1197 	struct mac_info *mac_control = &nic->mac_control;
1198 
1199 	/* to set the swapper controle on the card */
1200 	if (s2io_set_swapper(nic)) {
1201 		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1202 		return -EIO;
1203 	}
1204 
1205 	/*
1206 	 * Herc requires EOI to be removed from reset before XGXS, so..
1207 	 */
1208 	if (nic->device_type & XFRAME_II_DEVICE) {
1209 		val64 = 0xA500000000ULL;
1210 		writeq(val64, &bar0->sw_reset);
1211 		msleep(500);
1212 		val64 = readq(&bar0->sw_reset);
1213 	}
1214 
1215 	/* Remove XGXS from reset state */
1216 	val64 = 0;
1217 	writeq(val64, &bar0->sw_reset);
1218 	msleep(500);
1219 	val64 = readq(&bar0->sw_reset);
1220 
1221 	/* Ensure that it's safe to access registers by checking
1222 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1223 	 */
1224 	if (nic->device_type == XFRAME_II_DEVICE) {
1225 		for (i = 0; i < 50; i++) {
1226 			val64 = readq(&bar0->adapter_status);
1227 			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1228 				break;
1229 			msleep(10);
1230 		}
1231 		if (i == 50)
1232 			return -ENODEV;
1233 	}
1234 
1235 	/*  Enable Receiving broadcasts */
1236 	add = &bar0->mac_cfg;
1237 	val64 = readq(&bar0->mac_cfg);
1238 	val64 |= MAC_RMAC_BCAST_ENABLE;
1239 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1240 	writel((u32)val64, add);
1241 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1242 	writel((u32) (val64 >> 32), (add + 4));
1243 
1244 	/* Read registers in all blocks */
1245 	val64 = readq(&bar0->mac_int_mask);
1246 	val64 = readq(&bar0->mc_int_mask);
1247 	val64 = readq(&bar0->xgxs_int_mask);
1248 
1249 	/*  Set MTU */
1250 	val64 = dev->mtu;
1251 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1252 
1253 	if (nic->device_type & XFRAME_II_DEVICE) {
1254 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1255 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1256 					  &bar0->dtx_control, UF);
1257 			if (dtx_cnt & 0x1)
1258 				msleep(1); /* Necessary!! */
1259 			dtx_cnt++;
1260 		}
1261 	} else {
1262 		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1263 			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1264 					  &bar0->dtx_control, UF);
1265 			val64 = readq(&bar0->dtx_control);
1266 			dtx_cnt++;
1267 		}
1268 	}
1269 
1270 	/*  Tx DMA Initialization */
1271 	val64 = 0;
1272 	writeq(val64, &bar0->tx_fifo_partition_0);
1273 	writeq(val64, &bar0->tx_fifo_partition_1);
1274 	writeq(val64, &bar0->tx_fifo_partition_2);
1275 	writeq(val64, &bar0->tx_fifo_partition_3);
1276 
1277 	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1278 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1279 
1280 		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1281 			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1282 
1283 		if (i == (config->tx_fifo_num - 1)) {
1284 			if (i % 2 == 0)
1285 				i++;
1286 		}
1287 
1288 		switch (i) {
1289 		case 1:
1290 			writeq(val64, &bar0->tx_fifo_partition_0);
1291 			val64 = 0;
1292 			j = 0;
1293 			break;
1294 		case 3:
1295 			writeq(val64, &bar0->tx_fifo_partition_1);
1296 			val64 = 0;
1297 			j = 0;
1298 			break;
1299 		case 5:
1300 			writeq(val64, &bar0->tx_fifo_partition_2);
1301 			val64 = 0;
1302 			j = 0;
1303 			break;
1304 		case 7:
1305 			writeq(val64, &bar0->tx_fifo_partition_3);
1306 			val64 = 0;
1307 			j = 0;
1308 			break;
1309 		default:
1310 			j++;
1311 			break;
1312 		}
1313 	}
1314 
1315 	/*
1316 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1317 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1318 	 */
1319 	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1320 		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1321 
1322 	val64 = readq(&bar0->tx_fifo_partition_0);
1323 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1324 		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1325 
1326 	/*
1327 	 * Initialization of Tx_PA_CONFIG register to ignore packet
1328 	 * integrity checking.
1329 	 */
1330 	val64 = readq(&bar0->tx_pa_cfg);
1331 	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1332 		TX_PA_CFG_IGNORE_SNAP_OUI |
1333 		TX_PA_CFG_IGNORE_LLC_CTRL |
1334 		TX_PA_CFG_IGNORE_L2_ERR;
1335 	writeq(val64, &bar0->tx_pa_cfg);
1336 
1337 	/* Rx DMA initialization. */
1338 	val64 = 0;
1339 	for (i = 0; i < config->rx_ring_num; i++) {
1340 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1341 
1342 		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1343 	}
1344 	writeq(val64, &bar0->rx_queue_priority);
1345 
1346 	/*
1347 	 * Allocating equal share of memory to all the
1348 	 * configured Rings.
1349 	 */
1350 	val64 = 0;
1351 	if (nic->device_type & XFRAME_II_DEVICE)
1352 		mem_size = 32;
1353 	else
1354 		mem_size = 64;
1355 
1356 	for (i = 0; i < config->rx_ring_num; i++) {
1357 		switch (i) {
1358 		case 0:
1359 			mem_share = (mem_size / config->rx_ring_num +
1360 				     mem_size % config->rx_ring_num);
1361 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1362 			continue;
1363 		case 1:
1364 			mem_share = (mem_size / config->rx_ring_num);
1365 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1366 			continue;
1367 		case 2:
1368 			mem_share = (mem_size / config->rx_ring_num);
1369 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1370 			continue;
1371 		case 3:
1372 			mem_share = (mem_size / config->rx_ring_num);
1373 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1374 			continue;
1375 		case 4:
1376 			mem_share = (mem_size / config->rx_ring_num);
1377 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1378 			continue;
1379 		case 5:
1380 			mem_share = (mem_size / config->rx_ring_num);
1381 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1382 			continue;
1383 		case 6:
1384 			mem_share = (mem_size / config->rx_ring_num);
1385 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1386 			continue;
1387 		case 7:
1388 			mem_share = (mem_size / config->rx_ring_num);
1389 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1390 			continue;
1391 		}
1392 	}
1393 	writeq(val64, &bar0->rx_queue_cfg);
1394 
1395 	/*
1396 	 * Filling Tx round robin registers
1397 	 * as per the number of FIFOs for equal scheduling priority
1398 	 */
1399 	switch (config->tx_fifo_num) {
1400 	case 1:
1401 		val64 = 0x0;
1402 		writeq(val64, &bar0->tx_w_round_robin_0);
1403 		writeq(val64, &bar0->tx_w_round_robin_1);
1404 		writeq(val64, &bar0->tx_w_round_robin_2);
1405 		writeq(val64, &bar0->tx_w_round_robin_3);
1406 		writeq(val64, &bar0->tx_w_round_robin_4);
1407 		break;
1408 	case 2:
1409 		val64 = 0x0001000100010001ULL;
1410 		writeq(val64, &bar0->tx_w_round_robin_0);
1411 		writeq(val64, &bar0->tx_w_round_robin_1);
1412 		writeq(val64, &bar0->tx_w_round_robin_2);
1413 		writeq(val64, &bar0->tx_w_round_robin_3);
1414 		val64 = 0x0001000100000000ULL;
1415 		writeq(val64, &bar0->tx_w_round_robin_4);
1416 		break;
1417 	case 3:
1418 		val64 = 0x0001020001020001ULL;
1419 		writeq(val64, &bar0->tx_w_round_robin_0);
1420 		val64 = 0x0200010200010200ULL;
1421 		writeq(val64, &bar0->tx_w_round_robin_1);
1422 		val64 = 0x0102000102000102ULL;
1423 		writeq(val64, &bar0->tx_w_round_robin_2);
1424 		val64 = 0x0001020001020001ULL;
1425 		writeq(val64, &bar0->tx_w_round_robin_3);
1426 		val64 = 0x0200010200000000ULL;
1427 		writeq(val64, &bar0->tx_w_round_robin_4);
1428 		break;
1429 	case 4:
1430 		val64 = 0x0001020300010203ULL;
1431 		writeq(val64, &bar0->tx_w_round_robin_0);
1432 		writeq(val64, &bar0->tx_w_round_robin_1);
1433 		writeq(val64, &bar0->tx_w_round_robin_2);
1434 		writeq(val64, &bar0->tx_w_round_robin_3);
1435 		val64 = 0x0001020300000000ULL;
1436 		writeq(val64, &bar0->tx_w_round_robin_4);
1437 		break;
1438 	case 5:
1439 		val64 = 0x0001020304000102ULL;
1440 		writeq(val64, &bar0->tx_w_round_robin_0);
1441 		val64 = 0x0304000102030400ULL;
1442 		writeq(val64, &bar0->tx_w_round_robin_1);
1443 		val64 = 0x0102030400010203ULL;
1444 		writeq(val64, &bar0->tx_w_round_robin_2);
1445 		val64 = 0x0400010203040001ULL;
1446 		writeq(val64, &bar0->tx_w_round_robin_3);
1447 		val64 = 0x0203040000000000ULL;
1448 		writeq(val64, &bar0->tx_w_round_robin_4);
1449 		break;
1450 	case 6:
1451 		val64 = 0x0001020304050001ULL;
1452 		writeq(val64, &bar0->tx_w_round_robin_0);
1453 		val64 = 0x0203040500010203ULL;
1454 		writeq(val64, &bar0->tx_w_round_robin_1);
1455 		val64 = 0x0405000102030405ULL;
1456 		writeq(val64, &bar0->tx_w_round_robin_2);
1457 		val64 = 0x0001020304050001ULL;
1458 		writeq(val64, &bar0->tx_w_round_robin_3);
1459 		val64 = 0x0203040500000000ULL;
1460 		writeq(val64, &bar0->tx_w_round_robin_4);
1461 		break;
1462 	case 7:
1463 		val64 = 0x0001020304050600ULL;
1464 		writeq(val64, &bar0->tx_w_round_robin_0);
1465 		val64 = 0x0102030405060001ULL;
1466 		writeq(val64, &bar0->tx_w_round_robin_1);
1467 		val64 = 0x0203040506000102ULL;
1468 		writeq(val64, &bar0->tx_w_round_robin_2);
1469 		val64 = 0x0304050600010203ULL;
1470 		writeq(val64, &bar0->tx_w_round_robin_3);
1471 		val64 = 0x0405060000000000ULL;
1472 		writeq(val64, &bar0->tx_w_round_robin_4);
1473 		break;
1474 	case 8:
1475 		val64 = 0x0001020304050607ULL;
1476 		writeq(val64, &bar0->tx_w_round_robin_0);
1477 		writeq(val64, &bar0->tx_w_round_robin_1);
1478 		writeq(val64, &bar0->tx_w_round_robin_2);
1479 		writeq(val64, &bar0->tx_w_round_robin_3);
1480 		val64 = 0x0001020300000000ULL;
1481 		writeq(val64, &bar0->tx_w_round_robin_4);
1482 		break;
1483 	}
1484 
1485 	/* Enable all configured Tx FIFO partitions */
1486 	val64 = readq(&bar0->tx_fifo_partition_0);
1487 	val64 |= (TX_FIFO_PARTITION_EN);
1488 	writeq(val64, &bar0->tx_fifo_partition_0);
1489 
1490 	/* Filling the Rx round robin registers as per the
1491 	 * number of Rings and steering based on QoS with
1492 	 * equal priority.
1493 	 */
1494 	switch (config->rx_ring_num) {
1495 	case 1:
1496 		val64 = 0x0;
1497 		writeq(val64, &bar0->rx_w_round_robin_0);
1498 		writeq(val64, &bar0->rx_w_round_robin_1);
1499 		writeq(val64, &bar0->rx_w_round_robin_2);
1500 		writeq(val64, &bar0->rx_w_round_robin_3);
1501 		writeq(val64, &bar0->rx_w_round_robin_4);
1502 
1503 		val64 = 0x8080808080808080ULL;
1504 		writeq(val64, &bar0->rts_qos_steering);
1505 		break;
1506 	case 2:
1507 		val64 = 0x0001000100010001ULL;
1508 		writeq(val64, &bar0->rx_w_round_robin_0);
1509 		writeq(val64, &bar0->rx_w_round_robin_1);
1510 		writeq(val64, &bar0->rx_w_round_robin_2);
1511 		writeq(val64, &bar0->rx_w_round_robin_3);
1512 		val64 = 0x0001000100000000ULL;
1513 		writeq(val64, &bar0->rx_w_round_robin_4);
1514 
1515 		val64 = 0x8080808040404040ULL;
1516 		writeq(val64, &bar0->rts_qos_steering);
1517 		break;
1518 	case 3:
1519 		val64 = 0x0001020001020001ULL;
1520 		writeq(val64, &bar0->rx_w_round_robin_0);
1521 		val64 = 0x0200010200010200ULL;
1522 		writeq(val64, &bar0->rx_w_round_robin_1);
1523 		val64 = 0x0102000102000102ULL;
1524 		writeq(val64, &bar0->rx_w_round_robin_2);
1525 		val64 = 0x0001020001020001ULL;
1526 		writeq(val64, &bar0->rx_w_round_robin_3);
1527 		val64 = 0x0200010200000000ULL;
1528 		writeq(val64, &bar0->rx_w_round_robin_4);
1529 
1530 		val64 = 0x8080804040402020ULL;
1531 		writeq(val64, &bar0->rts_qos_steering);
1532 		break;
1533 	case 4:
1534 		val64 = 0x0001020300010203ULL;
1535 		writeq(val64, &bar0->rx_w_round_robin_0);
1536 		writeq(val64, &bar0->rx_w_round_robin_1);
1537 		writeq(val64, &bar0->rx_w_round_robin_2);
1538 		writeq(val64, &bar0->rx_w_round_robin_3);
1539 		val64 = 0x0001020300000000ULL;
1540 		writeq(val64, &bar0->rx_w_round_robin_4);
1541 
1542 		val64 = 0x8080404020201010ULL;
1543 		writeq(val64, &bar0->rts_qos_steering);
1544 		break;
1545 	case 5:
1546 		val64 = 0x0001020304000102ULL;
1547 		writeq(val64, &bar0->rx_w_round_robin_0);
1548 		val64 = 0x0304000102030400ULL;
1549 		writeq(val64, &bar0->rx_w_round_robin_1);
1550 		val64 = 0x0102030400010203ULL;
1551 		writeq(val64, &bar0->rx_w_round_robin_2);
1552 		val64 = 0x0400010203040001ULL;
1553 		writeq(val64, &bar0->rx_w_round_robin_3);
1554 		val64 = 0x0203040000000000ULL;
1555 		writeq(val64, &bar0->rx_w_round_robin_4);
1556 
1557 		val64 = 0x8080404020201008ULL;
1558 		writeq(val64, &bar0->rts_qos_steering);
1559 		break;
1560 	case 6:
1561 		val64 = 0x0001020304050001ULL;
1562 		writeq(val64, &bar0->rx_w_round_robin_0);
1563 		val64 = 0x0203040500010203ULL;
1564 		writeq(val64, &bar0->rx_w_round_robin_1);
1565 		val64 = 0x0405000102030405ULL;
1566 		writeq(val64, &bar0->rx_w_round_robin_2);
1567 		val64 = 0x0001020304050001ULL;
1568 		writeq(val64, &bar0->rx_w_round_robin_3);
1569 		val64 = 0x0203040500000000ULL;
1570 		writeq(val64, &bar0->rx_w_round_robin_4);
1571 
1572 		val64 = 0x8080404020100804ULL;
1573 		writeq(val64, &bar0->rts_qos_steering);
1574 		break;
1575 	case 7:
1576 		val64 = 0x0001020304050600ULL;
1577 		writeq(val64, &bar0->rx_w_round_robin_0);
1578 		val64 = 0x0102030405060001ULL;
1579 		writeq(val64, &bar0->rx_w_round_robin_1);
1580 		val64 = 0x0203040506000102ULL;
1581 		writeq(val64, &bar0->rx_w_round_robin_2);
1582 		val64 = 0x0304050600010203ULL;
1583 		writeq(val64, &bar0->rx_w_round_robin_3);
1584 		val64 = 0x0405060000000000ULL;
1585 		writeq(val64, &bar0->rx_w_round_robin_4);
1586 
1587 		val64 = 0x8080402010080402ULL;
1588 		writeq(val64, &bar0->rts_qos_steering);
1589 		break;
1590 	case 8:
1591 		val64 = 0x0001020304050607ULL;
1592 		writeq(val64, &bar0->rx_w_round_robin_0);
1593 		writeq(val64, &bar0->rx_w_round_robin_1);
1594 		writeq(val64, &bar0->rx_w_round_robin_2);
1595 		writeq(val64, &bar0->rx_w_round_robin_3);
1596 		val64 = 0x0001020300000000ULL;
1597 		writeq(val64, &bar0->rx_w_round_robin_4);
1598 
1599 		val64 = 0x8040201008040201ULL;
1600 		writeq(val64, &bar0->rts_qos_steering);
1601 		break;
1602 	}
1603 
1604 	/* UDP Fix */
1605 	val64 = 0;
1606 	for (i = 0; i < 8; i++)
1607 		writeq(val64, &bar0->rts_frm_len_n[i]);
1608 
1609 	/* Set the default rts frame length for the rings configured */
1610 	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1611 	for (i = 0 ; i < config->rx_ring_num ; i++)
1612 		writeq(val64, &bar0->rts_frm_len_n[i]);
1613 
1614 	/* Set the frame length for the configured rings
1615 	 * desired by the user
1616 	 */
1617 	for (i = 0; i < config->rx_ring_num; i++) {
1618 		/* If rts_frm_len[i] == 0 then it is assumed that user not
1619 		 * specified frame length steering.
1620 		 * If the user provides the frame length then program
1621 		 * the rts_frm_len register for those values or else
1622 		 * leave it as it is.
1623 		 */
1624 		if (rts_frm_len[i] != 0) {
1625 			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1626 			       &bar0->rts_frm_len_n[i]);
1627 		}
1628 	}
1629 
1630 	/* Disable differentiated services steering logic */
1631 	for (i = 0; i < 64; i++) {
1632 		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1633 			DBG_PRINT(ERR_DBG,
1634 				  "%s: rts_ds_steer failed on codepoint %d\n",
1635 				  dev->name, i);
1636 			return -ENODEV;
1637 		}
1638 	}
1639 
1640 	/* Program statistics memory */
1641 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1642 
1643 	if (nic->device_type == XFRAME_II_DEVICE) {
1644 		val64 = STAT_BC(0x320);
1645 		writeq(val64, &bar0->stat_byte_cnt);
1646 	}
1647 
1648 	/*
1649 	 * Initializing the sampling rate for the device to calculate the
1650 	 * bandwidth utilization.
1651 	 */
1652 	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1653 		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1654 	writeq(val64, &bar0->mac_link_util);
1655 
1656 	/*
1657 	 * Initializing the Transmit and Receive Traffic Interrupt
1658 	 * Scheme.
1659 	 */
1660 
1661 	/* Initialize TTI */
1662 	if (SUCCESS != init_tti(nic, nic->last_link_state))
1663 		return -ENODEV;
1664 
1665 	/* RTI Initialization */
1666 	if (nic->device_type == XFRAME_II_DEVICE) {
1667 		/*
1668 		 * Programmed to generate Apprx 500 Intrs per
1669 		 * second
1670 		 */
1671 		int count = (nic->config.bus_speed * 125)/4;
1672 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1673 	} else
1674 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1675 	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1676 		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1677 		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1678 		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1679 
1680 	writeq(val64, &bar0->rti_data1_mem);
1681 
1682 	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1683 		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1684 	if (nic->config.intr_type == MSI_X)
1685 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1686 			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1687 	else
1688 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1689 			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1690 	writeq(val64, &bar0->rti_data2_mem);
1691 
1692 	for (i = 0; i < config->rx_ring_num; i++) {
1693 		val64 = RTI_CMD_MEM_WE |
1694 			RTI_CMD_MEM_STROBE_NEW_CMD |
1695 			RTI_CMD_MEM_OFFSET(i);
1696 		writeq(val64, &bar0->rti_command_mem);
1697 
1698 		/*
1699 		 * Once the operation completes, the Strobe bit of the
1700 		 * command register will be reset. We poll for this
1701 		 * particular condition. We wait for a maximum of 500ms
1702 		 * for the operation to complete, if it's not complete
1703 		 * by then we return error.
1704 		 */
1705 		time = 0;
1706 		while (true) {
1707 			val64 = readq(&bar0->rti_command_mem);
1708 			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1709 				break;
1710 
1711 			if (time > 10) {
1712 				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1713 					  dev->name);
1714 				return -ENODEV;
1715 			}
1716 			time++;
1717 			msleep(50);
1718 		}
1719 	}
1720 
1721 	/*
1722 	 * Initializing proper values as Pause threshold into all
1723 	 * the 8 Queues on Rx side.
1724 	 */
1725 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1726 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1727 
1728 	/* Disable RMAC PAD STRIPPING */
1729 	add = &bar0->mac_cfg;
1730 	val64 = readq(&bar0->mac_cfg);
1731 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1732 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1733 	writel((u32) (val64), add);
1734 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735 	writel((u32) (val64 >> 32), (add + 4));
1736 	val64 = readq(&bar0->mac_cfg);
1737 
1738 	/* Enable FCS stripping by adapter */
1739 	add = &bar0->mac_cfg;
1740 	val64 = readq(&bar0->mac_cfg);
1741 	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1742 	if (nic->device_type == XFRAME_II_DEVICE)
1743 		writeq(val64, &bar0->mac_cfg);
1744 	else {
1745 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1746 		writel((u32) (val64), add);
1747 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748 		writel((u32) (val64 >> 32), (add + 4));
1749 	}
1750 
1751 	/*
1752 	 * Set the time value to be inserted in the pause frame
1753 	 * generated by xena.
1754 	 */
1755 	val64 = readq(&bar0->rmac_pause_cfg);
1756 	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1757 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1758 	writeq(val64, &bar0->rmac_pause_cfg);
1759 
1760 	/*
1761 	 * Set the Threshold Limit for Generating the pause frame
1762 	 * If the amount of data in any Queue exceeds ratio of
1763 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1764 	 * pause frame is generated
1765 	 */
1766 	val64 = 0;
1767 	for (i = 0; i < 4; i++) {
1768 		val64 |= (((u64)0xFF00 |
1769 			   nic->mac_control.mc_pause_threshold_q0q3)
1770 			  << (i * 2 * 8));
1771 	}
1772 	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1773 
1774 	val64 = 0;
1775 	for (i = 0; i < 4; i++) {
1776 		val64 |= (((u64)0xFF00 |
1777 			   nic->mac_control.mc_pause_threshold_q4q7)
1778 			  << (i * 2 * 8));
1779 	}
1780 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1781 
1782 	/*
1783 	 * TxDMA will stop Read request if the number of read split has
1784 	 * exceeded the limit pointed by shared_splits
1785 	 */
1786 	val64 = readq(&bar0->pic_control);
1787 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1788 	writeq(val64, &bar0->pic_control);
1789 
1790 	if (nic->config.bus_speed == 266) {
1791 		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1792 		writeq(0x0, &bar0->read_retry_delay);
1793 		writeq(0x0, &bar0->write_retry_delay);
1794 	}
1795 
1796 	/*
1797 	 * Programming the Herc to split every write transaction
1798 	 * that does not start on an ADB to reduce disconnects.
1799 	 */
1800 	if (nic->device_type == XFRAME_II_DEVICE) {
1801 		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1802 			MISC_LINK_STABILITY_PRD(3);
1803 		writeq(val64, &bar0->misc_control);
1804 		val64 = readq(&bar0->pic_control2);
1805 		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1806 		writeq(val64, &bar0->pic_control2);
1807 	}
1808 	if (strstr(nic->product_name, "CX4")) {
1809 		val64 = TMAC_AVG_IPG(0x17);
1810 		writeq(val64, &bar0->tmac_avg_ipg);
1811 	}
1812 
1813 	return SUCCESS;
1814 }
1815 #define LINK_UP_DOWN_INTERRUPT		1
1816 #define MAC_RMAC_ERR_TIMER		2
1817 
1818 static int s2io_link_fault_indication(struct s2io_nic *nic)
1819 {
1820 	if (nic->device_type == XFRAME_II_DEVICE)
1821 		return LINK_UP_DOWN_INTERRUPT;
1822 	else
1823 		return MAC_RMAC_ERR_TIMER;
1824 }
1825 
1826 /**
1827  *  do_s2io_write_bits -  update alarm bits in alarm register
1828  *  @value: alarm bits
1829  *  @flag: interrupt status
1830  *  @addr: address value
1831  *  Description: update alarm bits in alarm register
1832  *  Return Value:
1833  *  NONE.
1834  */
1835 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1836 {
1837 	u64 temp64;
1838 
1839 	temp64 = readq(addr);
1840 
1841 	if (flag == ENABLE_INTRS)
1842 		temp64 &= ~((u64)value);
1843 	else
1844 		temp64 |= ((u64)value);
1845 	writeq(temp64, addr);
1846 }
1847 
1848 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1849 {
1850 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1851 	register u64 gen_int_mask = 0;
1852 	u64 interruptible;
1853 
1854 	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1855 	if (mask & TX_DMA_INTR) {
1856 		gen_int_mask |= TXDMA_INT_M;
1857 
1858 		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1859 				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1860 				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1861 				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1862 
1863 		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1864 				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1865 				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1866 				   &bar0->pfc_err_mask);
1867 
1868 		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1869 				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1870 				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1871 
1872 		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1873 				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1874 				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1875 				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1876 				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1877 				   PCC_TXB_ECC_SG_ERR,
1878 				   flag, &bar0->pcc_err_mask);
1879 
1880 		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1881 				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1882 
1883 		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1884 				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1885 				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1886 				   flag, &bar0->lso_err_mask);
1887 
1888 		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1889 				   flag, &bar0->tpa_err_mask);
1890 
1891 		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1892 	}
1893 
1894 	if (mask & TX_MAC_INTR) {
1895 		gen_int_mask |= TXMAC_INT_M;
1896 		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1897 				   &bar0->mac_int_mask);
1898 		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1899 				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1900 				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1901 				   flag, &bar0->mac_tmac_err_mask);
1902 	}
1903 
1904 	if (mask & TX_XGXS_INTR) {
1905 		gen_int_mask |= TXXGXS_INT_M;
1906 		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1907 				   &bar0->xgxs_int_mask);
1908 		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1909 				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1910 				   flag, &bar0->xgxs_txgxs_err_mask);
1911 	}
1912 
1913 	if (mask & RX_DMA_INTR) {
1914 		gen_int_mask |= RXDMA_INT_M;
1915 		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1916 				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1917 				   flag, &bar0->rxdma_int_mask);
1918 		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1919 				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1920 				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1921 				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1922 		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1923 				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1924 				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1925 				   &bar0->prc_pcix_err_mask);
1926 		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1927 				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1928 				   &bar0->rpa_err_mask);
1929 		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1930 				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1931 				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1932 				   RDA_FRM_ECC_SG_ERR |
1933 				   RDA_MISC_ERR|RDA_PCIX_ERR,
1934 				   flag, &bar0->rda_err_mask);
1935 		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1936 				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1937 				   flag, &bar0->rti_err_mask);
1938 	}
1939 
1940 	if (mask & RX_MAC_INTR) {
1941 		gen_int_mask |= RXMAC_INT_M;
1942 		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1943 				   &bar0->mac_int_mask);
1944 		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1945 				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1946 				 RMAC_DOUBLE_ECC_ERR);
1947 		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1948 			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1949 		do_s2io_write_bits(interruptible,
1950 				   flag, &bar0->mac_rmac_err_mask);
1951 	}
1952 
1953 	if (mask & RX_XGXS_INTR) {
1954 		gen_int_mask |= RXXGXS_INT_M;
1955 		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1956 				   &bar0->xgxs_int_mask);
1957 		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1958 				   &bar0->xgxs_rxgxs_err_mask);
1959 	}
1960 
1961 	if (mask & MC_INTR) {
1962 		gen_int_mask |= MC_INT_M;
1963 		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1964 				   flag, &bar0->mc_int_mask);
1965 		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1966 				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1967 				   &bar0->mc_err_mask);
1968 	}
1969 	nic->general_int_mask = gen_int_mask;
1970 
1971 	/* Remove this line when alarm interrupts are enabled */
1972 	nic->general_int_mask = 0;
1973 }
1974 
1975 /**
1976  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1977  *  @nic: device private variable,
1978  *  @mask: A mask indicating which Intr block must be modified and,
1979  *  @flag: A flag indicating whether to enable or disable the Intrs.
1980  *  Description: This function will either disable or enable the interrupts
1981  *  depending on the flag argument. The mask argument can be used to
1982  *  enable/disable any Intr block.
1983  *  Return Value: NONE.
1984  */
1985 
1986 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1987 {
1988 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1989 	register u64 temp64 = 0, intr_mask = 0;
1990 
1991 	intr_mask = nic->general_int_mask;
1992 
1993 	/*  Top level interrupt classification */
1994 	/*  PIC Interrupts */
1995 	if (mask & TX_PIC_INTR) {
1996 		/*  Enable PIC Intrs in the general intr mask register */
1997 		intr_mask |= TXPIC_INT_M;
1998 		if (flag == ENABLE_INTRS) {
1999 			/*
2000 			 * If Hercules adapter enable GPIO otherwise
2001 			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2002 			 * interrupts for now.
2003 			 * TODO
2004 			 */
2005 			if (s2io_link_fault_indication(nic) ==
2006 			    LINK_UP_DOWN_INTERRUPT) {
2007 				do_s2io_write_bits(PIC_INT_GPIO, flag,
2008 						   &bar0->pic_int_mask);
2009 				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2010 						   &bar0->gpio_int_mask);
2011 			} else
2012 				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2013 		} else if (flag == DISABLE_INTRS) {
2014 			/*
2015 			 * Disable PIC Intrs in the general
2016 			 * intr mask register
2017 			 */
2018 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2019 		}
2020 	}
2021 
2022 	/*  Tx traffic interrupts */
2023 	if (mask & TX_TRAFFIC_INTR) {
2024 		intr_mask |= TXTRAFFIC_INT_M;
2025 		if (flag == ENABLE_INTRS) {
2026 			/*
2027 			 * Enable all the Tx side interrupts
2028 			 * writing 0 Enables all 64 TX interrupt levels
2029 			 */
2030 			writeq(0x0, &bar0->tx_traffic_mask);
2031 		} else if (flag == DISABLE_INTRS) {
2032 			/*
2033 			 * Disable Tx Traffic Intrs in the general intr mask
2034 			 * register.
2035 			 */
2036 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2037 		}
2038 	}
2039 
2040 	/*  Rx traffic interrupts */
2041 	if (mask & RX_TRAFFIC_INTR) {
2042 		intr_mask |= RXTRAFFIC_INT_M;
2043 		if (flag == ENABLE_INTRS) {
2044 			/* writing 0 Enables all 8 RX interrupt levels */
2045 			writeq(0x0, &bar0->rx_traffic_mask);
2046 		} else if (flag == DISABLE_INTRS) {
2047 			/*
2048 			 * Disable Rx Traffic Intrs in the general intr mask
2049 			 * register.
2050 			 */
2051 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2052 		}
2053 	}
2054 
2055 	temp64 = readq(&bar0->general_int_mask);
2056 	if (flag == ENABLE_INTRS)
2057 		temp64 &= ~((u64)intr_mask);
2058 	else
2059 		temp64 = DISABLE_ALL_INTRS;
2060 	writeq(temp64, &bar0->general_int_mask);
2061 
2062 	nic->general_int_mask = readq(&bar0->general_int_mask);
2063 }
2064 
2065 /**
2066  *  verify_pcc_quiescent- Checks for PCC quiescent state
2067  *  Return: 1 If PCC is quiescence
2068  *          0 If PCC is not quiescence
2069  */
2070 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2071 {
2072 	int ret = 0, herc;
2073 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2074 	u64 val64 = readq(&bar0->adapter_status);
2075 
2076 	herc = (sp->device_type == XFRAME_II_DEVICE);
2077 
2078 	if (flag == false) {
2079 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2080 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2081 				ret = 1;
2082 		} else {
2083 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2084 				ret = 1;
2085 		}
2086 	} else {
2087 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2088 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2089 			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2090 				ret = 1;
2091 		} else {
2092 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2093 			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2094 				ret = 1;
2095 		}
2096 	}
2097 
2098 	return ret;
2099 }
2100 /**
2101  *  verify_xena_quiescence - Checks whether the H/W is ready
2102  *  Description: Returns whether the H/W is ready to go or not. Depending
2103  *  on whether adapter enable bit was written or not the comparison
2104  *  differs and the calling function passes the input argument flag to
2105  *  indicate this.
2106  *  Return: 1 If xena is quiescence
2107  *          0 If Xena is not quiescence
2108  */
2109 
2110 static int verify_xena_quiescence(struct s2io_nic *sp)
2111 {
2112 	int  mode;
2113 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2114 	u64 val64 = readq(&bar0->adapter_status);
2115 	mode = s2io_verify_pci_mode(sp);
2116 
2117 	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2118 		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2119 		return 0;
2120 	}
2121 	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2122 		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2123 		return 0;
2124 	}
2125 	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2126 		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2127 		return 0;
2128 	}
2129 	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2130 		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2131 		return 0;
2132 	}
2133 	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2134 		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2135 		return 0;
2136 	}
2137 	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2138 		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2139 		return 0;
2140 	}
2141 	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2142 		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2143 		return 0;
2144 	}
2145 	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2146 		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2147 		return 0;
2148 	}
2149 
2150 	/*
2151 	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2152 	 * the the P_PLL_LOCK bit in the adapter_status register will
2153 	 * not be asserted.
2154 	 */
2155 	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2156 	    sp->device_type == XFRAME_II_DEVICE &&
2157 	    mode != PCI_MODE_PCI_33) {
2158 		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2159 		return 0;
2160 	}
2161 	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2162 	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2163 		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2164 		return 0;
2165 	}
2166 	return 1;
2167 }
2168 
2169 /**
2170  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2171  * @sp: Pointer to device specifc structure
2172  * Description :
2173  * New procedure to clear mac address reading  problems on Alpha platforms
2174  *
2175  */
2176 
2177 static void fix_mac_address(struct s2io_nic *sp)
2178 {
2179 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2180 	int i = 0;
2181 
2182 	while (fix_mac[i] != END_SIGN) {
2183 		writeq(fix_mac[i++], &bar0->gpio_control);
2184 		udelay(10);
2185 		(void) readq(&bar0->gpio_control);
2186 	}
2187 }
2188 
2189 /**
2190  *  start_nic - Turns the device on
2191  *  @nic : device private variable.
2192  *  Description:
2193  *  This function actually turns the device on. Before this  function is
2194  *  called,all Registers are configured from their reset states
2195  *  and shared memory is allocated but the NIC is still quiescent. On
2196  *  calling this function, the device interrupts are cleared and the NIC is
2197  *  literally switched on by writing into the adapter control register.
2198  *  Return Value:
2199  *  SUCCESS on success and -1 on failure.
2200  */
2201 
2202 static int start_nic(struct s2io_nic *nic)
2203 {
2204 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2205 	struct net_device *dev = nic->dev;
2206 	register u64 val64 = 0;
2207 	u16 subid, i;
2208 	struct config_param *config = &nic->config;
2209 	struct mac_info *mac_control = &nic->mac_control;
2210 
2211 	/*  PRC Initialization and configuration */
2212 	for (i = 0; i < config->rx_ring_num; i++) {
2213 		struct ring_info *ring = &mac_control->rings[i];
2214 
2215 		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2216 		       &bar0->prc_rxd0_n[i]);
2217 
2218 		val64 = readq(&bar0->prc_ctrl_n[i]);
2219 		if (nic->rxd_mode == RXD_MODE_1)
2220 			val64 |= PRC_CTRL_RC_ENABLED;
2221 		else
2222 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2223 		if (nic->device_type == XFRAME_II_DEVICE)
2224 			val64 |= PRC_CTRL_GROUP_READS;
2225 		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2226 		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2227 		writeq(val64, &bar0->prc_ctrl_n[i]);
2228 	}
2229 
2230 	if (nic->rxd_mode == RXD_MODE_3B) {
2231 		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2232 		val64 = readq(&bar0->rx_pa_cfg);
2233 		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2234 		writeq(val64, &bar0->rx_pa_cfg);
2235 	}
2236 
2237 	if (vlan_tag_strip == 0) {
2238 		val64 = readq(&bar0->rx_pa_cfg);
2239 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2240 		writeq(val64, &bar0->rx_pa_cfg);
2241 		nic->vlan_strip_flag = 0;
2242 	}
2243 
2244 	/*
2245 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2246 	 * for around 100ms, which is approximately the time required
2247 	 * for the device to be ready for operation.
2248 	 */
2249 	val64 = readq(&bar0->mc_rldram_mrs);
2250 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2251 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2252 	val64 = readq(&bar0->mc_rldram_mrs);
2253 
2254 	msleep(100);	/* Delay by around 100 ms. */
2255 
2256 	/* Enabling ECC Protection. */
2257 	val64 = readq(&bar0->adapter_control);
2258 	val64 &= ~ADAPTER_ECC_EN;
2259 	writeq(val64, &bar0->adapter_control);
2260 
2261 	/*
2262 	 * Verify if the device is ready to be enabled, if so enable
2263 	 * it.
2264 	 */
2265 	val64 = readq(&bar0->adapter_status);
2266 	if (!verify_xena_quiescence(nic)) {
2267 		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2268 			  "Adapter status reads: 0x%llx\n",
2269 			  dev->name, (unsigned long long)val64);
2270 		return FAILURE;
2271 	}
2272 
2273 	/*
2274 	 * With some switches, link might be already up at this point.
2275 	 * Because of this weird behavior, when we enable laser,
2276 	 * we may not get link. We need to handle this. We cannot
2277 	 * figure out which switch is misbehaving. So we are forced to
2278 	 * make a global change.
2279 	 */
2280 
2281 	/* Enabling Laser. */
2282 	val64 = readq(&bar0->adapter_control);
2283 	val64 |= ADAPTER_EOI_TX_ON;
2284 	writeq(val64, &bar0->adapter_control);
2285 
2286 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2287 		/*
2288 		 * Dont see link state interrupts initially on some switches,
2289 		 * so directly scheduling the link state task here.
2290 		 */
2291 		schedule_work(&nic->set_link_task);
2292 	}
2293 	/* SXE-002: Initialize link and activity LED */
2294 	subid = nic->pdev->subsystem_device;
2295 	if (((subid & 0xFF) >= 0x07) &&
2296 	    (nic->device_type == XFRAME_I_DEVICE)) {
2297 		val64 = readq(&bar0->gpio_control);
2298 		val64 |= 0x0000800000000000ULL;
2299 		writeq(val64, &bar0->gpio_control);
2300 		val64 = 0x0411040400000000ULL;
2301 		writeq(val64, (void __iomem *)bar0 + 0x2700);
2302 	}
2303 
2304 	return SUCCESS;
2305 }
2306 /**
2307  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2308  */
2309 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2310 					struct TxD *txdlp, int get_off)
2311 {
2312 	struct s2io_nic *nic = fifo_data->nic;
2313 	struct sk_buff *skb;
2314 	struct TxD *txds;
2315 	u16 j, frg_cnt;
2316 
2317 	txds = txdlp;
2318 	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2319 		pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2320 				 sizeof(u64), PCI_DMA_TODEVICE);
2321 		txds++;
2322 	}
2323 
2324 	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2325 	if (!skb) {
2326 		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2327 		return NULL;
2328 	}
2329 	pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2330 			 skb_headlen(skb), PCI_DMA_TODEVICE);
2331 	frg_cnt = skb_shinfo(skb)->nr_frags;
2332 	if (frg_cnt) {
2333 		txds++;
2334 		for (j = 0; j < frg_cnt; j++, txds++) {
2335 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2336 			if (!txds->Buffer_Pointer)
2337 				break;
2338 			pci_unmap_page(nic->pdev,
2339 				       (dma_addr_t)txds->Buffer_Pointer,
2340 				       skb_frag_size(frag), PCI_DMA_TODEVICE);
2341 		}
2342 	}
2343 	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2344 	return skb;
2345 }
2346 
2347 /**
2348  *  free_tx_buffers - Free all queued Tx buffers
2349  *  @nic : device private variable.
2350  *  Description:
2351  *  Free all queued Tx buffers.
2352  *  Return Value: void
2353  */
2354 
2355 static void free_tx_buffers(struct s2io_nic *nic)
2356 {
2357 	struct net_device *dev = nic->dev;
2358 	struct sk_buff *skb;
2359 	struct TxD *txdp;
2360 	int i, j;
2361 	int cnt = 0;
2362 	struct config_param *config = &nic->config;
2363 	struct mac_info *mac_control = &nic->mac_control;
2364 	struct stat_block *stats = mac_control->stats_info;
2365 	struct swStat *swstats = &stats->sw_stat;
2366 
2367 	for (i = 0; i < config->tx_fifo_num; i++) {
2368 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2369 		struct fifo_info *fifo = &mac_control->fifos[i];
2370 		unsigned long flags;
2371 
2372 		spin_lock_irqsave(&fifo->tx_lock, flags);
2373 		for (j = 0; j < tx_cfg->fifo_len; j++) {
2374 			txdp = fifo->list_info[j].list_virt_addr;
2375 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2376 			if (skb) {
2377 				swstats->mem_freed += skb->truesize;
2378 				dev_kfree_skb(skb);
2379 				cnt++;
2380 			}
2381 		}
2382 		DBG_PRINT(INTR_DBG,
2383 			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2384 			  dev->name, cnt, i);
2385 		fifo->tx_curr_get_info.offset = 0;
2386 		fifo->tx_curr_put_info.offset = 0;
2387 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2388 	}
2389 }
2390 
2391 /**
2392  *   stop_nic -  To stop the nic
2393  *   @nic ; device private variable.
2394  *   Description:
2395  *   This function does exactly the opposite of what the start_nic()
2396  *   function does. This function is called to stop the device.
2397  *   Return Value:
2398  *   void.
2399  */
2400 
2401 static void stop_nic(struct s2io_nic *nic)
2402 {
2403 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2404 	register u64 val64 = 0;
2405 	u16 interruptible;
2406 
2407 	/*  Disable all interrupts */
2408 	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2409 	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2410 	interruptible |= TX_PIC_INTR;
2411 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2412 
2413 	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2414 	val64 = readq(&bar0->adapter_control);
2415 	val64 &= ~(ADAPTER_CNTL_EN);
2416 	writeq(val64, &bar0->adapter_control);
2417 }
2418 
2419 /**
2420  *  fill_rx_buffers - Allocates the Rx side skbs
2421  *  @ring_info: per ring structure
2422  *  @from_card_up: If this is true, we will map the buffer to get
2423  *     the dma address for buf0 and buf1 to give it to the card.
2424  *     Else we will sync the already mapped buffer to give it to the card.
2425  *  Description:
2426  *  The function allocates Rx side skbs and puts the physical
2427  *  address of these buffers into the RxD buffer pointers, so that the NIC
2428  *  can DMA the received frame into these locations.
2429  *  The NIC supports 3 receive modes, viz
2430  *  1. single buffer,
2431  *  2. three buffer and
2432  *  3. Five buffer modes.
2433  *  Each mode defines how many fragments the received frame will be split
2434  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2435  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2436  *  is split into 3 fragments. As of now only single buffer mode is
2437  *  supported.
2438  *   Return Value:
2439  *  SUCCESS on success or an appropriate -ve value on failure.
2440  */
2441 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2442 			   int from_card_up)
2443 {
2444 	struct sk_buff *skb;
2445 	struct RxD_t *rxdp;
2446 	int off, size, block_no, block_no1;
2447 	u32 alloc_tab = 0;
2448 	u32 alloc_cnt;
2449 	u64 tmp;
2450 	struct buffAdd *ba;
2451 	struct RxD_t *first_rxdp = NULL;
2452 	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2453 	struct RxD1 *rxdp1;
2454 	struct RxD3 *rxdp3;
2455 	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2456 
2457 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2458 
2459 	block_no1 = ring->rx_curr_get_info.block_index;
2460 	while (alloc_tab < alloc_cnt) {
2461 		block_no = ring->rx_curr_put_info.block_index;
2462 
2463 		off = ring->rx_curr_put_info.offset;
2464 
2465 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2466 
2467 		if ((block_no == block_no1) &&
2468 		    (off == ring->rx_curr_get_info.offset) &&
2469 		    (rxdp->Host_Control)) {
2470 			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2471 				  ring->dev->name);
2472 			goto end;
2473 		}
2474 		if (off && (off == ring->rxd_count)) {
2475 			ring->rx_curr_put_info.block_index++;
2476 			if (ring->rx_curr_put_info.block_index ==
2477 			    ring->block_count)
2478 				ring->rx_curr_put_info.block_index = 0;
2479 			block_no = ring->rx_curr_put_info.block_index;
2480 			off = 0;
2481 			ring->rx_curr_put_info.offset = off;
2482 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2483 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2484 				  ring->dev->name, rxdp);
2485 
2486 		}
2487 
2488 		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2489 		    ((ring->rxd_mode == RXD_MODE_3B) &&
2490 		     (rxdp->Control_2 & s2BIT(0)))) {
2491 			ring->rx_curr_put_info.offset = off;
2492 			goto end;
2493 		}
2494 		/* calculate size of skb based on ring mode */
2495 		size = ring->mtu +
2496 			HEADER_ETHERNET_II_802_3_SIZE +
2497 			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2498 		if (ring->rxd_mode == RXD_MODE_1)
2499 			size += NET_IP_ALIGN;
2500 		else
2501 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2502 
2503 		/* allocate skb */
2504 		skb = netdev_alloc_skb(nic->dev, size);
2505 		if (!skb) {
2506 			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2507 				  ring->dev->name);
2508 			if (first_rxdp) {
2509 				dma_wmb();
2510 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2511 			}
2512 			swstats->mem_alloc_fail_cnt++;
2513 
2514 			return -ENOMEM ;
2515 		}
2516 		swstats->mem_allocated += skb->truesize;
2517 
2518 		if (ring->rxd_mode == RXD_MODE_1) {
2519 			/* 1 buffer mode - normal operation mode */
2520 			rxdp1 = (struct RxD1 *)rxdp;
2521 			memset(rxdp, 0, sizeof(struct RxD1));
2522 			skb_reserve(skb, NET_IP_ALIGN);
2523 			rxdp1->Buffer0_ptr =
2524 				pci_map_single(ring->pdev, skb->data,
2525 					       size - NET_IP_ALIGN,
2526 					       PCI_DMA_FROMDEVICE);
2527 			if (pci_dma_mapping_error(nic->pdev,
2528 						  rxdp1->Buffer0_ptr))
2529 				goto pci_map_failed;
2530 
2531 			rxdp->Control_2 =
2532 				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2533 			rxdp->Host_Control = (unsigned long)skb;
2534 		} else if (ring->rxd_mode == RXD_MODE_3B) {
2535 			/*
2536 			 * 2 buffer mode -
2537 			 * 2 buffer mode provides 128
2538 			 * byte aligned receive buffers.
2539 			 */
2540 
2541 			rxdp3 = (struct RxD3 *)rxdp;
2542 			/* save buffer pointers to avoid frequent dma mapping */
2543 			Buffer0_ptr = rxdp3->Buffer0_ptr;
2544 			Buffer1_ptr = rxdp3->Buffer1_ptr;
2545 			memset(rxdp, 0, sizeof(struct RxD3));
2546 			/* restore the buffer pointers for dma sync*/
2547 			rxdp3->Buffer0_ptr = Buffer0_ptr;
2548 			rxdp3->Buffer1_ptr = Buffer1_ptr;
2549 
2550 			ba = &ring->ba[block_no][off];
2551 			skb_reserve(skb, BUF0_LEN);
2552 			tmp = (u64)(unsigned long)skb->data;
2553 			tmp += ALIGN_SIZE;
2554 			tmp &= ~ALIGN_SIZE;
2555 			skb->data = (void *) (unsigned long)tmp;
2556 			skb_reset_tail_pointer(skb);
2557 
2558 			if (from_card_up) {
2559 				rxdp3->Buffer0_ptr =
2560 					pci_map_single(ring->pdev, ba->ba_0,
2561 						       BUF0_LEN,
2562 						       PCI_DMA_FROMDEVICE);
2563 				if (pci_dma_mapping_error(nic->pdev,
2564 							  rxdp3->Buffer0_ptr))
2565 					goto pci_map_failed;
2566 			} else
2567 				pci_dma_sync_single_for_device(ring->pdev,
2568 							       (dma_addr_t)rxdp3->Buffer0_ptr,
2569 							       BUF0_LEN,
2570 							       PCI_DMA_FROMDEVICE);
2571 
2572 			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2573 			if (ring->rxd_mode == RXD_MODE_3B) {
2574 				/* Two buffer mode */
2575 
2576 				/*
2577 				 * Buffer2 will have L3/L4 header plus
2578 				 * L4 payload
2579 				 */
2580 				rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2581 								    skb->data,
2582 								    ring->mtu + 4,
2583 								    PCI_DMA_FROMDEVICE);
2584 
2585 				if (pci_dma_mapping_error(nic->pdev,
2586 							  rxdp3->Buffer2_ptr))
2587 					goto pci_map_failed;
2588 
2589 				if (from_card_up) {
2590 					rxdp3->Buffer1_ptr =
2591 						pci_map_single(ring->pdev,
2592 							       ba->ba_1,
2593 							       BUF1_LEN,
2594 							       PCI_DMA_FROMDEVICE);
2595 
2596 					if (pci_dma_mapping_error(nic->pdev,
2597 								  rxdp3->Buffer1_ptr)) {
2598 						pci_unmap_single(ring->pdev,
2599 								 (dma_addr_t)(unsigned long)
2600 								 skb->data,
2601 								 ring->mtu + 4,
2602 								 PCI_DMA_FROMDEVICE);
2603 						goto pci_map_failed;
2604 					}
2605 				}
2606 				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2607 				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2608 					(ring->mtu + 4);
2609 			}
2610 			rxdp->Control_2 |= s2BIT(0);
2611 			rxdp->Host_Control = (unsigned long) (skb);
2612 		}
2613 		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2614 			rxdp->Control_1 |= RXD_OWN_XENA;
2615 		off++;
2616 		if (off == (ring->rxd_count + 1))
2617 			off = 0;
2618 		ring->rx_curr_put_info.offset = off;
2619 
2620 		rxdp->Control_2 |= SET_RXD_MARKER;
2621 		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2622 			if (first_rxdp) {
2623 				dma_wmb();
2624 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2625 			}
2626 			first_rxdp = rxdp;
2627 		}
2628 		ring->rx_bufs_left += 1;
2629 		alloc_tab++;
2630 	}
2631 
2632 end:
2633 	/* Transfer ownership of first descriptor to adapter just before
2634 	 * exiting. Before that, use memory barrier so that ownership
2635 	 * and other fields are seen by adapter correctly.
2636 	 */
2637 	if (first_rxdp) {
2638 		dma_wmb();
2639 		first_rxdp->Control_1 |= RXD_OWN_XENA;
2640 	}
2641 
2642 	return SUCCESS;
2643 
2644 pci_map_failed:
2645 	swstats->pci_map_fail_cnt++;
2646 	swstats->mem_freed += skb->truesize;
2647 	dev_kfree_skb_irq(skb);
2648 	return -ENOMEM;
2649 }
2650 
2651 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2652 {
2653 	struct net_device *dev = sp->dev;
2654 	int j;
2655 	struct sk_buff *skb;
2656 	struct RxD_t *rxdp;
2657 	struct RxD1 *rxdp1;
2658 	struct RxD3 *rxdp3;
2659 	struct mac_info *mac_control = &sp->mac_control;
2660 	struct stat_block *stats = mac_control->stats_info;
2661 	struct swStat *swstats = &stats->sw_stat;
2662 
2663 	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2664 		rxdp = mac_control->rings[ring_no].
2665 			rx_blocks[blk].rxds[j].virt_addr;
2666 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2667 		if (!skb)
2668 			continue;
2669 		if (sp->rxd_mode == RXD_MODE_1) {
2670 			rxdp1 = (struct RxD1 *)rxdp;
2671 			pci_unmap_single(sp->pdev,
2672 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2673 					 dev->mtu +
2674 					 HEADER_ETHERNET_II_802_3_SIZE +
2675 					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2676 					 PCI_DMA_FROMDEVICE);
2677 			memset(rxdp, 0, sizeof(struct RxD1));
2678 		} else if (sp->rxd_mode == RXD_MODE_3B) {
2679 			rxdp3 = (struct RxD3 *)rxdp;
2680 			pci_unmap_single(sp->pdev,
2681 					 (dma_addr_t)rxdp3->Buffer0_ptr,
2682 					 BUF0_LEN,
2683 					 PCI_DMA_FROMDEVICE);
2684 			pci_unmap_single(sp->pdev,
2685 					 (dma_addr_t)rxdp3->Buffer1_ptr,
2686 					 BUF1_LEN,
2687 					 PCI_DMA_FROMDEVICE);
2688 			pci_unmap_single(sp->pdev,
2689 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2690 					 dev->mtu + 4,
2691 					 PCI_DMA_FROMDEVICE);
2692 			memset(rxdp, 0, sizeof(struct RxD3));
2693 		}
2694 		swstats->mem_freed += skb->truesize;
2695 		dev_kfree_skb(skb);
2696 		mac_control->rings[ring_no].rx_bufs_left -= 1;
2697 	}
2698 }
2699 
2700 /**
2701  *  free_rx_buffers - Frees all Rx buffers
2702  *  @sp: device private variable.
2703  *  Description:
2704  *  This function will free all Rx buffers allocated by host.
2705  *  Return Value:
2706  *  NONE.
2707  */
2708 
2709 static void free_rx_buffers(struct s2io_nic *sp)
2710 {
2711 	struct net_device *dev = sp->dev;
2712 	int i, blk = 0, buf_cnt = 0;
2713 	struct config_param *config = &sp->config;
2714 	struct mac_info *mac_control = &sp->mac_control;
2715 
2716 	for (i = 0; i < config->rx_ring_num; i++) {
2717 		struct ring_info *ring = &mac_control->rings[i];
2718 
2719 		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2720 			free_rxd_blk(sp, i, blk);
2721 
2722 		ring->rx_curr_put_info.block_index = 0;
2723 		ring->rx_curr_get_info.block_index = 0;
2724 		ring->rx_curr_put_info.offset = 0;
2725 		ring->rx_curr_get_info.offset = 0;
2726 		ring->rx_bufs_left = 0;
2727 		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2728 			  dev->name, buf_cnt, i);
2729 	}
2730 }
2731 
2732 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2733 {
2734 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2735 		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2736 			  ring->dev->name);
2737 	}
2738 	return 0;
2739 }
2740 
2741 /**
2742  * s2io_poll - Rx interrupt handler for NAPI support
2743  * @napi : pointer to the napi structure.
2744  * @budget : The number of packets that were budgeted to be processed
2745  * during  one pass through the 'Poll" function.
2746  * Description:
2747  * Comes into picture only if NAPI support has been incorporated. It does
2748  * the same thing that rx_intr_handler does, but not in a interrupt context
2749  * also It will process only a given number of packets.
2750  * Return value:
2751  * 0 on success and 1 if there are No Rx packets to be processed.
2752  */
2753 
2754 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2755 {
2756 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2757 	struct net_device *dev = ring->dev;
2758 	int pkts_processed = 0;
2759 	u8 __iomem *addr = NULL;
2760 	u8 val8 = 0;
2761 	struct s2io_nic *nic = netdev_priv(dev);
2762 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2763 	int budget_org = budget;
2764 
2765 	if (unlikely(!is_s2io_card_up(nic)))
2766 		return 0;
2767 
2768 	pkts_processed = rx_intr_handler(ring, budget);
2769 	s2io_chk_rx_buffers(nic, ring);
2770 
2771 	if (pkts_processed < budget_org) {
2772 		napi_complete_done(napi, pkts_processed);
2773 		/*Re Enable MSI-Rx Vector*/
2774 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2775 		addr += 7 - ring->ring_no;
2776 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2777 		writeb(val8, addr);
2778 		val8 = readb(addr);
2779 	}
2780 	return pkts_processed;
2781 }
2782 
2783 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2784 {
2785 	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2786 	int pkts_processed = 0;
2787 	int ring_pkts_processed, i;
2788 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2789 	int budget_org = budget;
2790 	struct config_param *config = &nic->config;
2791 	struct mac_info *mac_control = &nic->mac_control;
2792 
2793 	if (unlikely(!is_s2io_card_up(nic)))
2794 		return 0;
2795 
2796 	for (i = 0; i < config->rx_ring_num; i++) {
2797 		struct ring_info *ring = &mac_control->rings[i];
2798 		ring_pkts_processed = rx_intr_handler(ring, budget);
2799 		s2io_chk_rx_buffers(nic, ring);
2800 		pkts_processed += ring_pkts_processed;
2801 		budget -= ring_pkts_processed;
2802 		if (budget <= 0)
2803 			break;
2804 	}
2805 	if (pkts_processed < budget_org) {
2806 		napi_complete_done(napi, pkts_processed);
2807 		/* Re enable the Rx interrupts for the ring */
2808 		writeq(0, &bar0->rx_traffic_mask);
2809 		readl(&bar0->rx_traffic_mask);
2810 	}
2811 	return pkts_processed;
2812 }
2813 
2814 #ifdef CONFIG_NET_POLL_CONTROLLER
2815 /**
2816  * s2io_netpoll - netpoll event handler entry point
2817  * @dev : pointer to the device structure.
2818  * Description:
2819  * 	This function will be called by upper layer to check for events on the
2820  * interface in situations where interrupts are disabled. It is used for
2821  * specific in-kernel networking tasks, such as remote consoles and kernel
2822  * debugging over the network (example netdump in RedHat).
2823  */
2824 static void s2io_netpoll(struct net_device *dev)
2825 {
2826 	struct s2io_nic *nic = netdev_priv(dev);
2827 	const int irq = nic->pdev->irq;
2828 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2829 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2830 	int i;
2831 	struct config_param *config = &nic->config;
2832 	struct mac_info *mac_control = &nic->mac_control;
2833 
2834 	if (pci_channel_offline(nic->pdev))
2835 		return;
2836 
2837 	disable_irq(irq);
2838 
2839 	writeq(val64, &bar0->rx_traffic_int);
2840 	writeq(val64, &bar0->tx_traffic_int);
2841 
2842 	/* we need to free up the transmitted skbufs or else netpoll will
2843 	 * run out of skbs and will fail and eventually netpoll application such
2844 	 * as netdump will fail.
2845 	 */
2846 	for (i = 0; i < config->tx_fifo_num; i++)
2847 		tx_intr_handler(&mac_control->fifos[i]);
2848 
2849 	/* check for received packet and indicate up to network */
2850 	for (i = 0; i < config->rx_ring_num; i++) {
2851 		struct ring_info *ring = &mac_control->rings[i];
2852 
2853 		rx_intr_handler(ring, 0);
2854 	}
2855 
2856 	for (i = 0; i < config->rx_ring_num; i++) {
2857 		struct ring_info *ring = &mac_control->rings[i];
2858 
2859 		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2860 			DBG_PRINT(INFO_DBG,
2861 				  "%s: Out of memory in Rx Netpoll!!\n",
2862 				  dev->name);
2863 			break;
2864 		}
2865 	}
2866 	enable_irq(irq);
2867 }
2868 #endif
2869 
2870 /**
2871  *  rx_intr_handler - Rx interrupt handler
2872  *  @ring_info: per ring structure.
2873  *  @budget: budget for napi processing.
2874  *  Description:
2875  *  If the interrupt is because of a received frame or if the
2876  *  receive ring contains fresh as yet un-processed frames,this function is
2877  *  called. It picks out the RxD at which place the last Rx processing had
2878  *  stopped and sends the skb to the OSM's Rx handler and then increments
2879  *  the offset.
2880  *  Return Value:
2881  *  No. of napi packets processed.
2882  */
2883 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2884 {
2885 	int get_block, put_block;
2886 	struct rx_curr_get_info get_info, put_info;
2887 	struct RxD_t *rxdp;
2888 	struct sk_buff *skb;
2889 	int pkt_cnt = 0, napi_pkts = 0;
2890 	int i;
2891 	struct RxD1 *rxdp1;
2892 	struct RxD3 *rxdp3;
2893 
2894 	if (budget <= 0)
2895 		return napi_pkts;
2896 
2897 	get_info = ring_data->rx_curr_get_info;
2898 	get_block = get_info.block_index;
2899 	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2900 	put_block = put_info.block_index;
2901 	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2902 
2903 	while (RXD_IS_UP2DT(rxdp)) {
2904 		/*
2905 		 * If your are next to put index then it's
2906 		 * FIFO full condition
2907 		 */
2908 		if ((get_block == put_block) &&
2909 		    (get_info.offset + 1) == put_info.offset) {
2910 			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2911 				  ring_data->dev->name);
2912 			break;
2913 		}
2914 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2915 		if (skb == NULL) {
2916 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2917 				  ring_data->dev->name);
2918 			return 0;
2919 		}
2920 		if (ring_data->rxd_mode == RXD_MODE_1) {
2921 			rxdp1 = (struct RxD1 *)rxdp;
2922 			pci_unmap_single(ring_data->pdev, (dma_addr_t)
2923 					 rxdp1->Buffer0_ptr,
2924 					 ring_data->mtu +
2925 					 HEADER_ETHERNET_II_802_3_SIZE +
2926 					 HEADER_802_2_SIZE +
2927 					 HEADER_SNAP_SIZE,
2928 					 PCI_DMA_FROMDEVICE);
2929 		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2930 			rxdp3 = (struct RxD3 *)rxdp;
2931 			pci_dma_sync_single_for_cpu(ring_data->pdev,
2932 						    (dma_addr_t)rxdp3->Buffer0_ptr,
2933 						    BUF0_LEN,
2934 						    PCI_DMA_FROMDEVICE);
2935 			pci_unmap_single(ring_data->pdev,
2936 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2937 					 ring_data->mtu + 4,
2938 					 PCI_DMA_FROMDEVICE);
2939 		}
2940 		prefetch(skb->data);
2941 		rx_osm_handler(ring_data, rxdp);
2942 		get_info.offset++;
2943 		ring_data->rx_curr_get_info.offset = get_info.offset;
2944 		rxdp = ring_data->rx_blocks[get_block].
2945 			rxds[get_info.offset].virt_addr;
2946 		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2947 			get_info.offset = 0;
2948 			ring_data->rx_curr_get_info.offset = get_info.offset;
2949 			get_block++;
2950 			if (get_block == ring_data->block_count)
2951 				get_block = 0;
2952 			ring_data->rx_curr_get_info.block_index = get_block;
2953 			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2954 		}
2955 
2956 		if (ring_data->nic->config.napi) {
2957 			budget--;
2958 			napi_pkts++;
2959 			if (!budget)
2960 				break;
2961 		}
2962 		pkt_cnt++;
2963 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2964 			break;
2965 	}
2966 	if (ring_data->lro) {
2967 		/* Clear all LRO sessions before exiting */
2968 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2969 			struct lro *lro = &ring_data->lro0_n[i];
2970 			if (lro->in_use) {
2971 				update_L3L4_header(ring_data->nic, lro);
2972 				queue_rx_frame(lro->parent, lro->vlan_tag);
2973 				clear_lro_session(lro);
2974 			}
2975 		}
2976 	}
2977 	return napi_pkts;
2978 }
2979 
2980 /**
2981  *  tx_intr_handler - Transmit interrupt handler
2982  *  @nic : device private variable
2983  *  Description:
2984  *  If an interrupt was raised to indicate DMA complete of the
2985  *  Tx packet, this function is called. It identifies the last TxD
2986  *  whose buffer was freed and frees all skbs whose data have already
2987  *  DMA'ed into the NICs internal memory.
2988  *  Return Value:
2989  *  NONE
2990  */
2991 
2992 static void tx_intr_handler(struct fifo_info *fifo_data)
2993 {
2994 	struct s2io_nic *nic = fifo_data->nic;
2995 	struct tx_curr_get_info get_info, put_info;
2996 	struct sk_buff *skb = NULL;
2997 	struct TxD *txdlp;
2998 	int pkt_cnt = 0;
2999 	unsigned long flags = 0;
3000 	u8 err_mask;
3001 	struct stat_block *stats = nic->mac_control.stats_info;
3002 	struct swStat *swstats = &stats->sw_stat;
3003 
3004 	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3005 		return;
3006 
3007 	get_info = fifo_data->tx_curr_get_info;
3008 	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3009 	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3010 	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3011 	       (get_info.offset != put_info.offset) &&
3012 	       (txdlp->Host_Control)) {
3013 		/* Check for TxD errors */
3014 		if (txdlp->Control_1 & TXD_T_CODE) {
3015 			unsigned long long err;
3016 			err = txdlp->Control_1 & TXD_T_CODE;
3017 			if (err & 0x1) {
3018 				swstats->parity_err_cnt++;
3019 			}
3020 
3021 			/* update t_code statistics */
3022 			err_mask = err >> 48;
3023 			switch (err_mask) {
3024 			case 2:
3025 				swstats->tx_buf_abort_cnt++;
3026 				break;
3027 
3028 			case 3:
3029 				swstats->tx_desc_abort_cnt++;
3030 				break;
3031 
3032 			case 7:
3033 				swstats->tx_parity_err_cnt++;
3034 				break;
3035 
3036 			case 10:
3037 				swstats->tx_link_loss_cnt++;
3038 				break;
3039 
3040 			case 15:
3041 				swstats->tx_list_proc_err_cnt++;
3042 				break;
3043 			}
3044 		}
3045 
3046 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3047 		if (skb == NULL) {
3048 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3049 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3050 				  __func__);
3051 			return;
3052 		}
3053 		pkt_cnt++;
3054 
3055 		/* Updating the statistics block */
3056 		swstats->mem_freed += skb->truesize;
3057 		dev_consume_skb_irq(skb);
3058 
3059 		get_info.offset++;
3060 		if (get_info.offset == get_info.fifo_len + 1)
3061 			get_info.offset = 0;
3062 		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3063 		fifo_data->tx_curr_get_info.offset = get_info.offset;
3064 	}
3065 
3066 	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3067 
3068 	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3069 }
3070 
3071 /**
3072  *  s2io_mdio_write - Function to write in to MDIO registers
3073  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3074  *  @addr     : address value
3075  *  @value    : data value
3076  *  @dev      : pointer to net_device structure
3077  *  Description:
3078  *  This function is used to write values to the MDIO registers
3079  *  NONE
3080  */
3081 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3082 			    struct net_device *dev)
3083 {
3084 	u64 val64;
3085 	struct s2io_nic *sp = netdev_priv(dev);
3086 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3087 
3088 	/* address transaction */
3089 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3090 		MDIO_MMD_DEV_ADDR(mmd_type) |
3091 		MDIO_MMS_PRT_ADDR(0x0);
3092 	writeq(val64, &bar0->mdio_control);
3093 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3094 	writeq(val64, &bar0->mdio_control);
3095 	udelay(100);
3096 
3097 	/* Data transaction */
3098 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3099 		MDIO_MMD_DEV_ADDR(mmd_type) |
3100 		MDIO_MMS_PRT_ADDR(0x0) |
3101 		MDIO_MDIO_DATA(value) |
3102 		MDIO_OP(MDIO_OP_WRITE_TRANS);
3103 	writeq(val64, &bar0->mdio_control);
3104 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3105 	writeq(val64, &bar0->mdio_control);
3106 	udelay(100);
3107 
3108 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3109 		MDIO_MMD_DEV_ADDR(mmd_type) |
3110 		MDIO_MMS_PRT_ADDR(0x0) |
3111 		MDIO_OP(MDIO_OP_READ_TRANS);
3112 	writeq(val64, &bar0->mdio_control);
3113 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3114 	writeq(val64, &bar0->mdio_control);
3115 	udelay(100);
3116 }
3117 
3118 /**
3119  *  s2io_mdio_read - Function to write in to MDIO registers
3120  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3121  *  @addr     : address value
3122  *  @dev      : pointer to net_device structure
3123  *  Description:
3124  *  This function is used to read values to the MDIO registers
3125  *  NONE
3126  */
3127 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3128 {
3129 	u64 val64 = 0x0;
3130 	u64 rval64 = 0x0;
3131 	struct s2io_nic *sp = netdev_priv(dev);
3132 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3133 
3134 	/* address transaction */
3135 	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3136 			 | MDIO_MMD_DEV_ADDR(mmd_type)
3137 			 | MDIO_MMS_PRT_ADDR(0x0));
3138 	writeq(val64, &bar0->mdio_control);
3139 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3140 	writeq(val64, &bar0->mdio_control);
3141 	udelay(100);
3142 
3143 	/* Data transaction */
3144 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3145 		MDIO_MMD_DEV_ADDR(mmd_type) |
3146 		MDIO_MMS_PRT_ADDR(0x0) |
3147 		MDIO_OP(MDIO_OP_READ_TRANS);
3148 	writeq(val64, &bar0->mdio_control);
3149 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3150 	writeq(val64, &bar0->mdio_control);
3151 	udelay(100);
3152 
3153 	/* Read the value from regs */
3154 	rval64 = readq(&bar0->mdio_control);
3155 	rval64 = rval64 & 0xFFFF0000;
3156 	rval64 = rval64 >> 16;
3157 	return rval64;
3158 }
3159 
3160 /**
3161  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3162  *  @counter      : counter value to be updated
3163  *  @flag         : flag to indicate the status
3164  *  @type         : counter type
3165  *  Description:
3166  *  This function is to check the status of the xpak counters value
3167  *  NONE
3168  */
3169 
3170 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3171 				  u16 flag, u16 type)
3172 {
3173 	u64 mask = 0x3;
3174 	u64 val64;
3175 	int i;
3176 	for (i = 0; i < index; i++)
3177 		mask = mask << 0x2;
3178 
3179 	if (flag > 0) {
3180 		*counter = *counter + 1;
3181 		val64 = *regs_stat & mask;
3182 		val64 = val64 >> (index * 0x2);
3183 		val64 = val64 + 1;
3184 		if (val64 == 3) {
3185 			switch (type) {
3186 			case 1:
3187 				DBG_PRINT(ERR_DBG,
3188 					  "Take Xframe NIC out of service.\n");
3189 				DBG_PRINT(ERR_DBG,
3190 "Excessive temperatures may result in premature transceiver failure.\n");
3191 				break;
3192 			case 2:
3193 				DBG_PRINT(ERR_DBG,
3194 					  "Take Xframe NIC out of service.\n");
3195 				DBG_PRINT(ERR_DBG,
3196 "Excessive bias currents may indicate imminent laser diode failure.\n");
3197 				break;
3198 			case 3:
3199 				DBG_PRINT(ERR_DBG,
3200 					  "Take Xframe NIC out of service.\n");
3201 				DBG_PRINT(ERR_DBG,
3202 "Excessive laser output power may saturate far-end receiver.\n");
3203 				break;
3204 			default:
3205 				DBG_PRINT(ERR_DBG,
3206 					  "Incorrect XPAK Alarm type\n");
3207 			}
3208 			val64 = 0x0;
3209 		}
3210 		val64 = val64 << (index * 0x2);
3211 		*regs_stat = (*regs_stat & (~mask)) | (val64);
3212 
3213 	} else {
3214 		*regs_stat = *regs_stat & (~mask);
3215 	}
3216 }
3217 
3218 /**
3219  *  s2io_updt_xpak_counter - Function to update the xpak counters
3220  *  @dev         : pointer to net_device struct
3221  *  Description:
3222  *  This function is to upate the status of the xpak counters value
3223  *  NONE
3224  */
3225 static void s2io_updt_xpak_counter(struct net_device *dev)
3226 {
3227 	u16 flag  = 0x0;
3228 	u16 type  = 0x0;
3229 	u16 val16 = 0x0;
3230 	u64 val64 = 0x0;
3231 	u64 addr  = 0x0;
3232 
3233 	struct s2io_nic *sp = netdev_priv(dev);
3234 	struct stat_block *stats = sp->mac_control.stats_info;
3235 	struct xpakStat *xstats = &stats->xpak_stat;
3236 
3237 	/* Check the communication with the MDIO slave */
3238 	addr = MDIO_CTRL1;
3239 	val64 = 0x0;
3240 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3241 	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3242 		DBG_PRINT(ERR_DBG,
3243 			  "ERR: MDIO slave access failed - Returned %llx\n",
3244 			  (unsigned long long)val64);
3245 		return;
3246 	}
3247 
3248 	/* Check for the expected value of control reg 1 */
3249 	if (val64 != MDIO_CTRL1_SPEED10G) {
3250 		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3251 			  "Returned: %llx- Expected: 0x%x\n",
3252 			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3253 		return;
3254 	}
3255 
3256 	/* Loading the DOM register to MDIO register */
3257 	addr = 0xA100;
3258 	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3259 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3260 
3261 	/* Reading the Alarm flags */
3262 	addr = 0xA070;
3263 	val64 = 0x0;
3264 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3265 
3266 	flag = CHECKBIT(val64, 0x7);
3267 	type = 1;
3268 	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3269 			      &xstats->xpak_regs_stat,
3270 			      0x0, flag, type);
3271 
3272 	if (CHECKBIT(val64, 0x6))
3273 		xstats->alarm_transceiver_temp_low++;
3274 
3275 	flag = CHECKBIT(val64, 0x3);
3276 	type = 2;
3277 	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3278 			      &xstats->xpak_regs_stat,
3279 			      0x2, flag, type);
3280 
3281 	if (CHECKBIT(val64, 0x2))
3282 		xstats->alarm_laser_bias_current_low++;
3283 
3284 	flag = CHECKBIT(val64, 0x1);
3285 	type = 3;
3286 	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3287 			      &xstats->xpak_regs_stat,
3288 			      0x4, flag, type);
3289 
3290 	if (CHECKBIT(val64, 0x0))
3291 		xstats->alarm_laser_output_power_low++;
3292 
3293 	/* Reading the Warning flags */
3294 	addr = 0xA074;
3295 	val64 = 0x0;
3296 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3297 
3298 	if (CHECKBIT(val64, 0x7))
3299 		xstats->warn_transceiver_temp_high++;
3300 
3301 	if (CHECKBIT(val64, 0x6))
3302 		xstats->warn_transceiver_temp_low++;
3303 
3304 	if (CHECKBIT(val64, 0x3))
3305 		xstats->warn_laser_bias_current_high++;
3306 
3307 	if (CHECKBIT(val64, 0x2))
3308 		xstats->warn_laser_bias_current_low++;
3309 
3310 	if (CHECKBIT(val64, 0x1))
3311 		xstats->warn_laser_output_power_high++;
3312 
3313 	if (CHECKBIT(val64, 0x0))
3314 		xstats->warn_laser_output_power_low++;
3315 }
3316 
3317 /**
3318  *  wait_for_cmd_complete - waits for a command to complete.
3319  *  @sp : private member of the device structure, which is a pointer to the
3320  *  s2io_nic structure.
3321  *  Description: Function that waits for a command to Write into RMAC
3322  *  ADDR DATA registers to be completed and returns either success or
3323  *  error depending on whether the command was complete or not.
3324  *  Return value:
3325  *   SUCCESS on success and FAILURE on failure.
3326  */
3327 
3328 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3329 				 int bit_state)
3330 {
3331 	int ret = FAILURE, cnt = 0, delay = 1;
3332 	u64 val64;
3333 
3334 	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3335 		return FAILURE;
3336 
3337 	do {
3338 		val64 = readq(addr);
3339 		if (bit_state == S2IO_BIT_RESET) {
3340 			if (!(val64 & busy_bit)) {
3341 				ret = SUCCESS;
3342 				break;
3343 			}
3344 		} else {
3345 			if (val64 & busy_bit) {
3346 				ret = SUCCESS;
3347 				break;
3348 			}
3349 		}
3350 
3351 		if (in_interrupt())
3352 			mdelay(delay);
3353 		else
3354 			msleep(delay);
3355 
3356 		if (++cnt >= 10)
3357 			delay = 50;
3358 	} while (cnt < 20);
3359 	return ret;
3360 }
3361 /**
3362  * check_pci_device_id - Checks if the device id is supported
3363  * @id : device id
3364  * Description: Function to check if the pci device id is supported by driver.
3365  * Return value: Actual device id if supported else PCI_ANY_ID
3366  */
3367 static u16 check_pci_device_id(u16 id)
3368 {
3369 	switch (id) {
3370 	case PCI_DEVICE_ID_HERC_WIN:
3371 	case PCI_DEVICE_ID_HERC_UNI:
3372 		return XFRAME_II_DEVICE;
3373 	case PCI_DEVICE_ID_S2IO_UNI:
3374 	case PCI_DEVICE_ID_S2IO_WIN:
3375 		return XFRAME_I_DEVICE;
3376 	default:
3377 		return PCI_ANY_ID;
3378 	}
3379 }
3380 
3381 /**
3382  *  s2io_reset - Resets the card.
3383  *  @sp : private member of the device structure.
3384  *  Description: Function to Reset the card. This function then also
3385  *  restores the previously saved PCI configuration space registers as
3386  *  the card reset also resets the configuration space.
3387  *  Return value:
3388  *  void.
3389  */
3390 
3391 static void s2io_reset(struct s2io_nic *sp)
3392 {
3393 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3394 	u64 val64;
3395 	u16 subid, pci_cmd;
3396 	int i;
3397 	u16 val16;
3398 	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3399 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3400 	struct stat_block *stats;
3401 	struct swStat *swstats;
3402 
3403 	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3404 		  __func__, pci_name(sp->pdev));
3405 
3406 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3407 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3408 
3409 	val64 = SW_RESET_ALL;
3410 	writeq(val64, &bar0->sw_reset);
3411 	if (strstr(sp->product_name, "CX4"))
3412 		msleep(750);
3413 	msleep(250);
3414 	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3415 
3416 		/* Restore the PCI state saved during initialization. */
3417 		pci_restore_state(sp->pdev);
3418 		pci_save_state(sp->pdev);
3419 		pci_read_config_word(sp->pdev, 0x2, &val16);
3420 		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3421 			break;
3422 		msleep(200);
3423 	}
3424 
3425 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3426 		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3427 
3428 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3429 
3430 	s2io_init_pci(sp);
3431 
3432 	/* Set swapper to enable I/O register access */
3433 	s2io_set_swapper(sp);
3434 
3435 	/* restore mac_addr entries */
3436 	do_s2io_restore_unicast_mc(sp);
3437 
3438 	/* Restore the MSIX table entries from local variables */
3439 	restore_xmsi_data(sp);
3440 
3441 	/* Clear certain PCI/PCI-X fields after reset */
3442 	if (sp->device_type == XFRAME_II_DEVICE) {
3443 		/* Clear "detected parity error" bit */
3444 		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3445 
3446 		/* Clearing PCIX Ecc status register */
3447 		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3448 
3449 		/* Clearing PCI_STATUS error reflected here */
3450 		writeq(s2BIT(62), &bar0->txpic_int_reg);
3451 	}
3452 
3453 	/* Reset device statistics maintained by OS */
3454 	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3455 
3456 	stats = sp->mac_control.stats_info;
3457 	swstats = &stats->sw_stat;
3458 
3459 	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3460 	up_cnt = swstats->link_up_cnt;
3461 	down_cnt = swstats->link_down_cnt;
3462 	up_time = swstats->link_up_time;
3463 	down_time = swstats->link_down_time;
3464 	reset_cnt = swstats->soft_reset_cnt;
3465 	mem_alloc_cnt = swstats->mem_allocated;
3466 	mem_free_cnt = swstats->mem_freed;
3467 	watchdog_cnt = swstats->watchdog_timer_cnt;
3468 
3469 	memset(stats, 0, sizeof(struct stat_block));
3470 
3471 	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3472 	swstats->link_up_cnt = up_cnt;
3473 	swstats->link_down_cnt = down_cnt;
3474 	swstats->link_up_time = up_time;
3475 	swstats->link_down_time = down_time;
3476 	swstats->soft_reset_cnt = reset_cnt;
3477 	swstats->mem_allocated = mem_alloc_cnt;
3478 	swstats->mem_freed = mem_free_cnt;
3479 	swstats->watchdog_timer_cnt = watchdog_cnt;
3480 
3481 	/* SXE-002: Configure link and activity LED to turn it off */
3482 	subid = sp->pdev->subsystem_device;
3483 	if (((subid & 0xFF) >= 0x07) &&
3484 	    (sp->device_type == XFRAME_I_DEVICE)) {
3485 		val64 = readq(&bar0->gpio_control);
3486 		val64 |= 0x0000800000000000ULL;
3487 		writeq(val64, &bar0->gpio_control);
3488 		val64 = 0x0411040400000000ULL;
3489 		writeq(val64, (void __iomem *)bar0 + 0x2700);
3490 	}
3491 
3492 	/*
3493 	 * Clear spurious ECC interrupts that would have occurred on
3494 	 * XFRAME II cards after reset.
3495 	 */
3496 	if (sp->device_type == XFRAME_II_DEVICE) {
3497 		val64 = readq(&bar0->pcc_err_reg);
3498 		writeq(val64, &bar0->pcc_err_reg);
3499 	}
3500 
3501 	sp->device_enabled_once = false;
3502 }
3503 
3504 /**
3505  *  s2io_set_swapper - to set the swapper controle on the card
3506  *  @sp : private member of the device structure,
3507  *  pointer to the s2io_nic structure.
3508  *  Description: Function to set the swapper control on the card
3509  *  correctly depending on the 'endianness' of the system.
3510  *  Return value:
3511  *  SUCCESS on success and FAILURE on failure.
3512  */
3513 
3514 static int s2io_set_swapper(struct s2io_nic *sp)
3515 {
3516 	struct net_device *dev = sp->dev;
3517 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3518 	u64 val64, valt, valr;
3519 
3520 	/*
3521 	 * Set proper endian settings and verify the same by reading
3522 	 * the PIF Feed-back register.
3523 	 */
3524 
3525 	val64 = readq(&bar0->pif_rd_swapper_fb);
3526 	if (val64 != 0x0123456789ABCDEFULL) {
3527 		int i = 0;
3528 		static const u64 value[] = {
3529 			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3530 			0x8100008181000081ULL,	/* FE=1, SE=0 */
3531 			0x4200004242000042ULL,	/* FE=0, SE=1 */
3532 			0			/* FE=0, SE=0 */
3533 		};
3534 
3535 		while (i < 4) {
3536 			writeq(value[i], &bar0->swapper_ctrl);
3537 			val64 = readq(&bar0->pif_rd_swapper_fb);
3538 			if (val64 == 0x0123456789ABCDEFULL)
3539 				break;
3540 			i++;
3541 		}
3542 		if (i == 4) {
3543 			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3544 				  "feedback read %llx\n",
3545 				  dev->name, (unsigned long long)val64);
3546 			return FAILURE;
3547 		}
3548 		valr = value[i];
3549 	} else {
3550 		valr = readq(&bar0->swapper_ctrl);
3551 	}
3552 
3553 	valt = 0x0123456789ABCDEFULL;
3554 	writeq(valt, &bar0->xmsi_address);
3555 	val64 = readq(&bar0->xmsi_address);
3556 
3557 	if (val64 != valt) {
3558 		int i = 0;
3559 		static const u64 value[] = {
3560 			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3561 			0x0081810000818100ULL,	/* FE=1, SE=0 */
3562 			0x0042420000424200ULL,	/* FE=0, SE=1 */
3563 			0			/* FE=0, SE=0 */
3564 		};
3565 
3566 		while (i < 4) {
3567 			writeq((value[i] | valr), &bar0->swapper_ctrl);
3568 			writeq(valt, &bar0->xmsi_address);
3569 			val64 = readq(&bar0->xmsi_address);
3570 			if (val64 == valt)
3571 				break;
3572 			i++;
3573 		}
3574 		if (i == 4) {
3575 			unsigned long long x = val64;
3576 			DBG_PRINT(ERR_DBG,
3577 				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3578 			return FAILURE;
3579 		}
3580 	}
3581 	val64 = readq(&bar0->swapper_ctrl);
3582 	val64 &= 0xFFFF000000000000ULL;
3583 
3584 #ifdef __BIG_ENDIAN
3585 	/*
3586 	 * The device by default set to a big endian format, so a
3587 	 * big endian driver need not set anything.
3588 	 */
3589 	val64 |= (SWAPPER_CTRL_TXP_FE |
3590 		  SWAPPER_CTRL_TXP_SE |
3591 		  SWAPPER_CTRL_TXD_R_FE |
3592 		  SWAPPER_CTRL_TXD_W_FE |
3593 		  SWAPPER_CTRL_TXF_R_FE |
3594 		  SWAPPER_CTRL_RXD_R_FE |
3595 		  SWAPPER_CTRL_RXD_W_FE |
3596 		  SWAPPER_CTRL_RXF_W_FE |
3597 		  SWAPPER_CTRL_XMSI_FE |
3598 		  SWAPPER_CTRL_STATS_FE |
3599 		  SWAPPER_CTRL_STATS_SE);
3600 	if (sp->config.intr_type == INTA)
3601 		val64 |= SWAPPER_CTRL_XMSI_SE;
3602 	writeq(val64, &bar0->swapper_ctrl);
3603 #else
3604 	/*
3605 	 * Initially we enable all bits to make it accessible by the
3606 	 * driver, then we selectively enable only those bits that
3607 	 * we want to set.
3608 	 */
3609 	val64 |= (SWAPPER_CTRL_TXP_FE |
3610 		  SWAPPER_CTRL_TXP_SE |
3611 		  SWAPPER_CTRL_TXD_R_FE |
3612 		  SWAPPER_CTRL_TXD_R_SE |
3613 		  SWAPPER_CTRL_TXD_W_FE |
3614 		  SWAPPER_CTRL_TXD_W_SE |
3615 		  SWAPPER_CTRL_TXF_R_FE |
3616 		  SWAPPER_CTRL_RXD_R_FE |
3617 		  SWAPPER_CTRL_RXD_R_SE |
3618 		  SWAPPER_CTRL_RXD_W_FE |
3619 		  SWAPPER_CTRL_RXD_W_SE |
3620 		  SWAPPER_CTRL_RXF_W_FE |
3621 		  SWAPPER_CTRL_XMSI_FE |
3622 		  SWAPPER_CTRL_STATS_FE |
3623 		  SWAPPER_CTRL_STATS_SE);
3624 	if (sp->config.intr_type == INTA)
3625 		val64 |= SWAPPER_CTRL_XMSI_SE;
3626 	writeq(val64, &bar0->swapper_ctrl);
3627 #endif
3628 	val64 = readq(&bar0->swapper_ctrl);
3629 
3630 	/*
3631 	 * Verifying if endian settings are accurate by reading a
3632 	 * feedback register.
3633 	 */
3634 	val64 = readq(&bar0->pif_rd_swapper_fb);
3635 	if (val64 != 0x0123456789ABCDEFULL) {
3636 		/* Endian settings are incorrect, calls for another dekko. */
3637 		DBG_PRINT(ERR_DBG,
3638 			  "%s: Endian settings are wrong, feedback read %llx\n",
3639 			  dev->name, (unsigned long long)val64);
3640 		return FAILURE;
3641 	}
3642 
3643 	return SUCCESS;
3644 }
3645 
3646 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3647 {
3648 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3649 	u64 val64;
3650 	int ret = 0, cnt = 0;
3651 
3652 	do {
3653 		val64 = readq(&bar0->xmsi_access);
3654 		if (!(val64 & s2BIT(15)))
3655 			break;
3656 		mdelay(1);
3657 		cnt++;
3658 	} while (cnt < 5);
3659 	if (cnt == 5) {
3660 		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3661 		ret = 1;
3662 	}
3663 
3664 	return ret;
3665 }
3666 
3667 static void restore_xmsi_data(struct s2io_nic *nic)
3668 {
3669 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3670 	u64 val64;
3671 	int i, msix_index;
3672 
3673 	if (nic->device_type == XFRAME_I_DEVICE)
3674 		return;
3675 
3676 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3677 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3678 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3679 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3680 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3681 		writeq(val64, &bar0->xmsi_access);
3682 		if (wait_for_msix_trans(nic, msix_index))
3683 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3684 				  __func__, msix_index);
3685 	}
3686 }
3687 
3688 static void store_xmsi_data(struct s2io_nic *nic)
3689 {
3690 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3691 	u64 val64, addr, data;
3692 	int i, msix_index;
3693 
3694 	if (nic->device_type == XFRAME_I_DEVICE)
3695 		return;
3696 
3697 	/* Store and display */
3698 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3699 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3700 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3701 		writeq(val64, &bar0->xmsi_access);
3702 		if (wait_for_msix_trans(nic, msix_index)) {
3703 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3704 				  __func__, msix_index);
3705 			continue;
3706 		}
3707 		addr = readq(&bar0->xmsi_address);
3708 		data = readq(&bar0->xmsi_data);
3709 		if (addr && data) {
3710 			nic->msix_info[i].addr = addr;
3711 			nic->msix_info[i].data = data;
3712 		}
3713 	}
3714 }
3715 
3716 static int s2io_enable_msi_x(struct s2io_nic *nic)
3717 {
3718 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3719 	u64 rx_mat;
3720 	u16 msi_control; /* Temp variable */
3721 	int ret, i, j, msix_indx = 1;
3722 	int size;
3723 	struct stat_block *stats = nic->mac_control.stats_info;
3724 	struct swStat *swstats = &stats->sw_stat;
3725 
3726 	size = nic->num_entries * sizeof(struct msix_entry);
3727 	nic->entries = kzalloc(size, GFP_KERNEL);
3728 	if (!nic->entries) {
3729 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3730 			  __func__);
3731 		swstats->mem_alloc_fail_cnt++;
3732 		return -ENOMEM;
3733 	}
3734 	swstats->mem_allocated += size;
3735 
3736 	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3737 	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3738 	if (!nic->s2io_entries) {
3739 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3740 			  __func__);
3741 		swstats->mem_alloc_fail_cnt++;
3742 		kfree(nic->entries);
3743 		swstats->mem_freed
3744 			+= (nic->num_entries * sizeof(struct msix_entry));
3745 		return -ENOMEM;
3746 	}
3747 	swstats->mem_allocated += size;
3748 
3749 	nic->entries[0].entry = 0;
3750 	nic->s2io_entries[0].entry = 0;
3751 	nic->s2io_entries[0].in_use = MSIX_FLG;
3752 	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3753 	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3754 
3755 	for (i = 1; i < nic->num_entries; i++) {
3756 		nic->entries[i].entry = ((i - 1) * 8) + 1;
3757 		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3758 		nic->s2io_entries[i].arg = NULL;
3759 		nic->s2io_entries[i].in_use = 0;
3760 	}
3761 
3762 	rx_mat = readq(&bar0->rx_mat);
3763 	for (j = 0; j < nic->config.rx_ring_num; j++) {
3764 		rx_mat |= RX_MAT_SET(j, msix_indx);
3765 		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3766 		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3767 		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3768 		msix_indx += 8;
3769 	}
3770 	writeq(rx_mat, &bar0->rx_mat);
3771 	readq(&bar0->rx_mat);
3772 
3773 	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3774 				    nic->num_entries, nic->num_entries);
3775 	/* We fail init if error or we get less vectors than min required */
3776 	if (ret < 0) {
3777 		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3778 		kfree(nic->entries);
3779 		swstats->mem_freed += nic->num_entries *
3780 			sizeof(struct msix_entry);
3781 		kfree(nic->s2io_entries);
3782 		swstats->mem_freed += nic->num_entries *
3783 			sizeof(struct s2io_msix_entry);
3784 		nic->entries = NULL;
3785 		nic->s2io_entries = NULL;
3786 		return -ENOMEM;
3787 	}
3788 
3789 	/*
3790 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3791 	 * in the herc NIC. (Temp change, needs to be removed later)
3792 	 */
3793 	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3794 	msi_control |= 0x1; /* Enable MSI */
3795 	pci_write_config_word(nic->pdev, 0x42, msi_control);
3796 
3797 	return 0;
3798 }
3799 
3800 /* Handle software interrupt used during MSI(X) test */
3801 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3802 {
3803 	struct s2io_nic *sp = dev_id;
3804 
3805 	sp->msi_detected = 1;
3806 	wake_up(&sp->msi_wait);
3807 
3808 	return IRQ_HANDLED;
3809 }
3810 
3811 /* Test interrupt path by forcing a a software IRQ */
3812 static int s2io_test_msi(struct s2io_nic *sp)
3813 {
3814 	struct pci_dev *pdev = sp->pdev;
3815 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3816 	int err;
3817 	u64 val64, saved64;
3818 
3819 	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3820 			  sp->name, sp);
3821 	if (err) {
3822 		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3823 			  sp->dev->name, pci_name(pdev), pdev->irq);
3824 		return err;
3825 	}
3826 
3827 	init_waitqueue_head(&sp->msi_wait);
3828 	sp->msi_detected = 0;
3829 
3830 	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3831 	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3832 	val64 |= SCHED_INT_CTRL_TIMER_EN;
3833 	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3834 	writeq(val64, &bar0->scheduled_int_ctrl);
3835 
3836 	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3837 
3838 	if (!sp->msi_detected) {
3839 		/* MSI(X) test failed, go back to INTx mode */
3840 		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3841 			  "using MSI(X) during test\n",
3842 			  sp->dev->name, pci_name(pdev));
3843 
3844 		err = -EOPNOTSUPP;
3845 	}
3846 
3847 	free_irq(sp->entries[1].vector, sp);
3848 
3849 	writeq(saved64, &bar0->scheduled_int_ctrl);
3850 
3851 	return err;
3852 }
3853 
3854 static void remove_msix_isr(struct s2io_nic *sp)
3855 {
3856 	int i;
3857 	u16 msi_control;
3858 
3859 	for (i = 0; i < sp->num_entries; i++) {
3860 		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3861 			int vector = sp->entries[i].vector;
3862 			void *arg = sp->s2io_entries[i].arg;
3863 			free_irq(vector, arg);
3864 		}
3865 	}
3866 
3867 	kfree(sp->entries);
3868 	kfree(sp->s2io_entries);
3869 	sp->entries = NULL;
3870 	sp->s2io_entries = NULL;
3871 
3872 	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3873 	msi_control &= 0xFFFE; /* Disable MSI */
3874 	pci_write_config_word(sp->pdev, 0x42, msi_control);
3875 
3876 	pci_disable_msix(sp->pdev);
3877 }
3878 
3879 static void remove_inta_isr(struct s2io_nic *sp)
3880 {
3881 	free_irq(sp->pdev->irq, sp->dev);
3882 }
3883 
3884 /* ********************************************************* *
3885  * Functions defined below concern the OS part of the driver *
3886  * ********************************************************* */
3887 
3888 /**
3889  *  s2io_open - open entry point of the driver
3890  *  @dev : pointer to the device structure.
3891  *  Description:
3892  *  This function is the open entry point of the driver. It mainly calls a
3893  *  function to allocate Rx buffers and inserts them into the buffer
3894  *  descriptors and then enables the Rx part of the NIC.
3895  *  Return value:
3896  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3897  *   file on failure.
3898  */
3899 
3900 static int s2io_open(struct net_device *dev)
3901 {
3902 	struct s2io_nic *sp = netdev_priv(dev);
3903 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3904 	int err = 0;
3905 
3906 	/*
3907 	 * Make sure you have link off by default every time
3908 	 * Nic is initialized
3909 	 */
3910 	netif_carrier_off(dev);
3911 	sp->last_link_state = 0;
3912 
3913 	/* Initialize H/W and enable interrupts */
3914 	err = s2io_card_up(sp);
3915 	if (err) {
3916 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3917 			  dev->name);
3918 		goto hw_init_failed;
3919 	}
3920 
3921 	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3922 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3923 		s2io_card_down(sp);
3924 		err = -ENODEV;
3925 		goto hw_init_failed;
3926 	}
3927 	s2io_start_all_tx_queue(sp);
3928 	return 0;
3929 
3930 hw_init_failed:
3931 	if (sp->config.intr_type == MSI_X) {
3932 		if (sp->entries) {
3933 			kfree(sp->entries);
3934 			swstats->mem_freed += sp->num_entries *
3935 				sizeof(struct msix_entry);
3936 		}
3937 		if (sp->s2io_entries) {
3938 			kfree(sp->s2io_entries);
3939 			swstats->mem_freed += sp->num_entries *
3940 				sizeof(struct s2io_msix_entry);
3941 		}
3942 	}
3943 	return err;
3944 }
3945 
3946 /**
3947  *  s2io_close -close entry point of the driver
3948  *  @dev : device pointer.
3949  *  Description:
3950  *  This is the stop entry point of the driver. It needs to undo exactly
3951  *  whatever was done by the open entry point,thus it's usually referred to
3952  *  as the close function.Among other things this function mainly stops the
3953  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3954  *  Return value:
3955  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3956  *  file on failure.
3957  */
3958 
3959 static int s2io_close(struct net_device *dev)
3960 {
3961 	struct s2io_nic *sp = netdev_priv(dev);
3962 	struct config_param *config = &sp->config;
3963 	u64 tmp64;
3964 	int offset;
3965 
3966 	/* Return if the device is already closed               *
3967 	 *  Can happen when s2io_card_up failed in change_mtu    *
3968 	 */
3969 	if (!is_s2io_card_up(sp))
3970 		return 0;
3971 
3972 	s2io_stop_all_tx_queue(sp);
3973 	/* delete all populated mac entries */
3974 	for (offset = 1; offset < config->max_mc_addr; offset++) {
3975 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3976 		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3977 			do_s2io_delete_unicast_mc(sp, tmp64);
3978 	}
3979 
3980 	s2io_card_down(sp);
3981 
3982 	return 0;
3983 }
3984 
3985 /**
3986  *  s2io_xmit - Tx entry point of te driver
3987  *  @skb : the socket buffer containing the Tx data.
3988  *  @dev : device pointer.
3989  *  Description :
3990  *  This function is the Tx entry point of the driver. S2IO NIC supports
3991  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3992  *  NOTE: when device can't queue the pkt,just the trans_start variable will
3993  *  not be upadted.
3994  *  Return value:
3995  *  0 on success & 1 on failure.
3996  */
3997 
3998 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3999 {
4000 	struct s2io_nic *sp = netdev_priv(dev);
4001 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4002 	register u64 val64;
4003 	struct TxD *txdp;
4004 	struct TxFIFO_element __iomem *tx_fifo;
4005 	unsigned long flags = 0;
4006 	u16 vlan_tag = 0;
4007 	struct fifo_info *fifo = NULL;
4008 	int offload_type;
4009 	int enable_per_list_interrupt = 0;
4010 	struct config_param *config = &sp->config;
4011 	struct mac_info *mac_control = &sp->mac_control;
4012 	struct stat_block *stats = mac_control->stats_info;
4013 	struct swStat *swstats = &stats->sw_stat;
4014 
4015 	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4016 
4017 	if (unlikely(skb->len <= 0)) {
4018 		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4019 		dev_kfree_skb_any(skb);
4020 		return NETDEV_TX_OK;
4021 	}
4022 
4023 	if (!is_s2io_card_up(sp)) {
4024 		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4025 			  dev->name);
4026 		dev_kfree_skb_any(skb);
4027 		return NETDEV_TX_OK;
4028 	}
4029 
4030 	queue = 0;
4031 	if (skb_vlan_tag_present(skb))
4032 		vlan_tag = skb_vlan_tag_get(skb);
4033 	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4034 		if (skb->protocol == htons(ETH_P_IP)) {
4035 			struct iphdr *ip;
4036 			struct tcphdr *th;
4037 			ip = ip_hdr(skb);
4038 
4039 			if (!ip_is_fragment(ip)) {
4040 				th = (struct tcphdr *)(((unsigned char *)ip) +
4041 						       ip->ihl*4);
4042 
4043 				if (ip->protocol == IPPROTO_TCP) {
4044 					queue_len = sp->total_tcp_fifos;
4045 					queue = (ntohs(th->source) +
4046 						 ntohs(th->dest)) &
4047 						sp->fifo_selector[queue_len - 1];
4048 					if (queue >= queue_len)
4049 						queue = queue_len - 1;
4050 				} else if (ip->protocol == IPPROTO_UDP) {
4051 					queue_len = sp->total_udp_fifos;
4052 					queue = (ntohs(th->source) +
4053 						 ntohs(th->dest)) &
4054 						sp->fifo_selector[queue_len - 1];
4055 					if (queue >= queue_len)
4056 						queue = queue_len - 1;
4057 					queue += sp->udp_fifo_idx;
4058 					if (skb->len > 1024)
4059 						enable_per_list_interrupt = 1;
4060 				}
4061 			}
4062 		}
4063 	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4064 		/* get fifo number based on skb->priority value */
4065 		queue = config->fifo_mapping
4066 			[skb->priority & (MAX_TX_FIFOS - 1)];
4067 	fifo = &mac_control->fifos[queue];
4068 
4069 	spin_lock_irqsave(&fifo->tx_lock, flags);
4070 
4071 	if (sp->config.multiq) {
4072 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4073 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4074 			return NETDEV_TX_BUSY;
4075 		}
4076 	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4077 		if (netif_queue_stopped(dev)) {
4078 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4079 			return NETDEV_TX_BUSY;
4080 		}
4081 	}
4082 
4083 	put_off = (u16)fifo->tx_curr_put_info.offset;
4084 	get_off = (u16)fifo->tx_curr_get_info.offset;
4085 	txdp = fifo->list_info[put_off].list_virt_addr;
4086 
4087 	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4088 	/* Avoid "put" pointer going beyond "get" pointer */
4089 	if (txdp->Host_Control ||
4090 	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4091 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4092 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4093 		dev_kfree_skb_any(skb);
4094 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4095 		return NETDEV_TX_OK;
4096 	}
4097 
4098 	offload_type = s2io_offload_type(skb);
4099 	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4100 		txdp->Control_1 |= TXD_TCP_LSO_EN;
4101 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4102 	}
4103 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4104 		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4105 				    TXD_TX_CKO_TCP_EN |
4106 				    TXD_TX_CKO_UDP_EN);
4107 	}
4108 	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4109 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4110 	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4111 	if (enable_per_list_interrupt)
4112 		if (put_off & (queue_len >> 5))
4113 			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4114 	if (vlan_tag) {
4115 		txdp->Control_2 |= TXD_VLAN_ENABLE;
4116 		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4117 	}
4118 
4119 	frg_len = skb_headlen(skb);
4120 	txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4121 					      frg_len, PCI_DMA_TODEVICE);
4122 	if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4123 		goto pci_map_failed;
4124 
4125 	txdp->Host_Control = (unsigned long)skb;
4126 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4127 
4128 	frg_cnt = skb_shinfo(skb)->nr_frags;
4129 	/* For fragmented SKB. */
4130 	for (i = 0; i < frg_cnt; i++) {
4131 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4132 		/* A '0' length fragment will be ignored */
4133 		if (!skb_frag_size(frag))
4134 			continue;
4135 		txdp++;
4136 		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4137 							     frag, 0,
4138 							     skb_frag_size(frag),
4139 							     DMA_TO_DEVICE);
4140 		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4141 	}
4142 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4143 
4144 	tx_fifo = mac_control->tx_FIFO_start[queue];
4145 	val64 = fifo->list_info[put_off].list_phy_addr;
4146 	writeq(val64, &tx_fifo->TxDL_Pointer);
4147 
4148 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4149 		 TX_FIFO_LAST_LIST);
4150 	if (offload_type)
4151 		val64 |= TX_FIFO_SPECIAL_FUNC;
4152 
4153 	writeq(val64, &tx_fifo->List_Control);
4154 
4155 	put_off++;
4156 	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4157 		put_off = 0;
4158 	fifo->tx_curr_put_info.offset = put_off;
4159 
4160 	/* Avoid "put" pointer going beyond "get" pointer */
4161 	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4162 		swstats->fifo_full_cnt++;
4163 		DBG_PRINT(TX_DBG,
4164 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4165 			  put_off, get_off);
4166 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4167 	}
4168 	swstats->mem_allocated += skb->truesize;
4169 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4170 
4171 	if (sp->config.intr_type == MSI_X)
4172 		tx_intr_handler(fifo);
4173 
4174 	return NETDEV_TX_OK;
4175 
4176 pci_map_failed:
4177 	swstats->pci_map_fail_cnt++;
4178 	s2io_stop_tx_queue(sp, fifo->fifo_no);
4179 	swstats->mem_freed += skb->truesize;
4180 	dev_kfree_skb_any(skb);
4181 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4182 	return NETDEV_TX_OK;
4183 }
4184 
4185 static void
4186 s2io_alarm_handle(struct timer_list *t)
4187 {
4188 	struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4189 	struct net_device *dev = sp->dev;
4190 
4191 	s2io_handle_errors(dev);
4192 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4193 }
4194 
4195 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4196 {
4197 	struct ring_info *ring = (struct ring_info *)dev_id;
4198 	struct s2io_nic *sp = ring->nic;
4199 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4200 
4201 	if (unlikely(!is_s2io_card_up(sp)))
4202 		return IRQ_HANDLED;
4203 
4204 	if (sp->config.napi) {
4205 		u8 __iomem *addr = NULL;
4206 		u8 val8 = 0;
4207 
4208 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4209 		addr += (7 - ring->ring_no);
4210 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4211 		writeb(val8, addr);
4212 		val8 = readb(addr);
4213 		napi_schedule(&ring->napi);
4214 	} else {
4215 		rx_intr_handler(ring, 0);
4216 		s2io_chk_rx_buffers(sp, ring);
4217 	}
4218 
4219 	return IRQ_HANDLED;
4220 }
4221 
4222 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4223 {
4224 	int i;
4225 	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4226 	struct s2io_nic *sp = fifos->nic;
4227 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4228 	struct config_param *config  = &sp->config;
4229 	u64 reason;
4230 
4231 	if (unlikely(!is_s2io_card_up(sp)))
4232 		return IRQ_NONE;
4233 
4234 	reason = readq(&bar0->general_int_status);
4235 	if (unlikely(reason == S2IO_MINUS_ONE))
4236 		/* Nothing much can be done. Get out */
4237 		return IRQ_HANDLED;
4238 
4239 	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4240 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4241 
4242 		if (reason & GEN_INTR_TXPIC)
4243 			s2io_txpic_intr_handle(sp);
4244 
4245 		if (reason & GEN_INTR_TXTRAFFIC)
4246 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4247 
4248 		for (i = 0; i < config->tx_fifo_num; i++)
4249 			tx_intr_handler(&fifos[i]);
4250 
4251 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4252 		readl(&bar0->general_int_status);
4253 		return IRQ_HANDLED;
4254 	}
4255 	/* The interrupt was not raised by us */
4256 	return IRQ_NONE;
4257 }
4258 
4259 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4260 {
4261 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4262 	u64 val64;
4263 
4264 	val64 = readq(&bar0->pic_int_status);
4265 	if (val64 & PIC_INT_GPIO) {
4266 		val64 = readq(&bar0->gpio_int_reg);
4267 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4268 		    (val64 & GPIO_INT_REG_LINK_UP)) {
4269 			/*
4270 			 * This is unstable state so clear both up/down
4271 			 * interrupt and adapter to re-evaluate the link state.
4272 			 */
4273 			val64 |= GPIO_INT_REG_LINK_DOWN;
4274 			val64 |= GPIO_INT_REG_LINK_UP;
4275 			writeq(val64, &bar0->gpio_int_reg);
4276 			val64 = readq(&bar0->gpio_int_mask);
4277 			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4278 				   GPIO_INT_MASK_LINK_DOWN);
4279 			writeq(val64, &bar0->gpio_int_mask);
4280 		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4281 			val64 = readq(&bar0->adapter_status);
4282 			/* Enable Adapter */
4283 			val64 = readq(&bar0->adapter_control);
4284 			val64 |= ADAPTER_CNTL_EN;
4285 			writeq(val64, &bar0->adapter_control);
4286 			val64 |= ADAPTER_LED_ON;
4287 			writeq(val64, &bar0->adapter_control);
4288 			if (!sp->device_enabled_once)
4289 				sp->device_enabled_once = 1;
4290 
4291 			s2io_link(sp, LINK_UP);
4292 			/*
4293 			 * unmask link down interrupt and mask link-up
4294 			 * intr
4295 			 */
4296 			val64 = readq(&bar0->gpio_int_mask);
4297 			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4298 			val64 |= GPIO_INT_MASK_LINK_UP;
4299 			writeq(val64, &bar0->gpio_int_mask);
4300 
4301 		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4302 			val64 = readq(&bar0->adapter_status);
4303 			s2io_link(sp, LINK_DOWN);
4304 			/* Link is down so unmaks link up interrupt */
4305 			val64 = readq(&bar0->gpio_int_mask);
4306 			val64 &= ~GPIO_INT_MASK_LINK_UP;
4307 			val64 |= GPIO_INT_MASK_LINK_DOWN;
4308 			writeq(val64, &bar0->gpio_int_mask);
4309 
4310 			/* turn off LED */
4311 			val64 = readq(&bar0->adapter_control);
4312 			val64 = val64 & (~ADAPTER_LED_ON);
4313 			writeq(val64, &bar0->adapter_control);
4314 		}
4315 	}
4316 	val64 = readq(&bar0->gpio_int_mask);
4317 }
4318 
4319 /**
4320  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4321  *  @value: alarm bits
4322  *  @addr: address value
4323  *  @cnt: counter variable
4324  *  Description: Check for alarm and increment the counter
4325  *  Return Value:
4326  *  1 - if alarm bit set
4327  *  0 - if alarm bit is not set
4328  */
4329 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4330 				 unsigned long long *cnt)
4331 {
4332 	u64 val64;
4333 	val64 = readq(addr);
4334 	if (val64 & value) {
4335 		writeq(val64, addr);
4336 		(*cnt)++;
4337 		return 1;
4338 	}
4339 	return 0;
4340 
4341 }
4342 
4343 /**
4344  *  s2io_handle_errors - Xframe error indication handler
4345  *  @nic: device private variable
4346  *  Description: Handle alarms such as loss of link, single or
4347  *  double ECC errors, critical and serious errors.
4348  *  Return Value:
4349  *  NONE
4350  */
4351 static void s2io_handle_errors(void *dev_id)
4352 {
4353 	struct net_device *dev = (struct net_device *)dev_id;
4354 	struct s2io_nic *sp = netdev_priv(dev);
4355 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4356 	u64 temp64 = 0, val64 = 0;
4357 	int i = 0;
4358 
4359 	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4360 	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4361 
4362 	if (!is_s2io_card_up(sp))
4363 		return;
4364 
4365 	if (pci_channel_offline(sp->pdev))
4366 		return;
4367 
4368 	memset(&sw_stat->ring_full_cnt, 0,
4369 	       sizeof(sw_stat->ring_full_cnt));
4370 
4371 	/* Handling the XPAK counters update */
4372 	if (stats->xpak_timer_count < 72000) {
4373 		/* waiting for an hour */
4374 		stats->xpak_timer_count++;
4375 	} else {
4376 		s2io_updt_xpak_counter(dev);
4377 		/* reset the count to zero */
4378 		stats->xpak_timer_count = 0;
4379 	}
4380 
4381 	/* Handling link status change error Intr */
4382 	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4383 		val64 = readq(&bar0->mac_rmac_err_reg);
4384 		writeq(val64, &bar0->mac_rmac_err_reg);
4385 		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4386 			schedule_work(&sp->set_link_task);
4387 	}
4388 
4389 	/* In case of a serious error, the device will be Reset. */
4390 	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4391 				  &sw_stat->serious_err_cnt))
4392 		goto reset;
4393 
4394 	/* Check for data parity error */
4395 	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4396 				  &sw_stat->parity_err_cnt))
4397 		goto reset;
4398 
4399 	/* Check for ring full counter */
4400 	if (sp->device_type == XFRAME_II_DEVICE) {
4401 		val64 = readq(&bar0->ring_bump_counter1);
4402 		for (i = 0; i < 4; i++) {
4403 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4404 			temp64 >>= 64 - ((i+1)*16);
4405 			sw_stat->ring_full_cnt[i] += temp64;
4406 		}
4407 
4408 		val64 = readq(&bar0->ring_bump_counter2);
4409 		for (i = 0; i < 4; i++) {
4410 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4411 			temp64 >>= 64 - ((i+1)*16);
4412 			sw_stat->ring_full_cnt[i+4] += temp64;
4413 		}
4414 	}
4415 
4416 	val64 = readq(&bar0->txdma_int_status);
4417 	/*check for pfc_err*/
4418 	if (val64 & TXDMA_PFC_INT) {
4419 		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4420 					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4421 					  PFC_PCIX_ERR,
4422 					  &bar0->pfc_err_reg,
4423 					  &sw_stat->pfc_err_cnt))
4424 			goto reset;
4425 		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4426 				      &bar0->pfc_err_reg,
4427 				      &sw_stat->pfc_err_cnt);
4428 	}
4429 
4430 	/*check for tda_err*/
4431 	if (val64 & TXDMA_TDA_INT) {
4432 		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4433 					  TDA_SM0_ERR_ALARM |
4434 					  TDA_SM1_ERR_ALARM,
4435 					  &bar0->tda_err_reg,
4436 					  &sw_stat->tda_err_cnt))
4437 			goto reset;
4438 		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4439 				      &bar0->tda_err_reg,
4440 				      &sw_stat->tda_err_cnt);
4441 	}
4442 	/*check for pcc_err*/
4443 	if (val64 & TXDMA_PCC_INT) {
4444 		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4445 					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4446 					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4447 					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4448 					  PCC_TXB_ECC_DB_ERR,
4449 					  &bar0->pcc_err_reg,
4450 					  &sw_stat->pcc_err_cnt))
4451 			goto reset;
4452 		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4453 				      &bar0->pcc_err_reg,
4454 				      &sw_stat->pcc_err_cnt);
4455 	}
4456 
4457 	/*check for tti_err*/
4458 	if (val64 & TXDMA_TTI_INT) {
4459 		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4460 					  &bar0->tti_err_reg,
4461 					  &sw_stat->tti_err_cnt))
4462 			goto reset;
4463 		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4464 				      &bar0->tti_err_reg,
4465 				      &sw_stat->tti_err_cnt);
4466 	}
4467 
4468 	/*check for lso_err*/
4469 	if (val64 & TXDMA_LSO_INT) {
4470 		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4471 					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4472 					  &bar0->lso_err_reg,
4473 					  &sw_stat->lso_err_cnt))
4474 			goto reset;
4475 		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4476 				      &bar0->lso_err_reg,
4477 				      &sw_stat->lso_err_cnt);
4478 	}
4479 
4480 	/*check for tpa_err*/
4481 	if (val64 & TXDMA_TPA_INT) {
4482 		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4483 					  &bar0->tpa_err_reg,
4484 					  &sw_stat->tpa_err_cnt))
4485 			goto reset;
4486 		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4487 				      &bar0->tpa_err_reg,
4488 				      &sw_stat->tpa_err_cnt);
4489 	}
4490 
4491 	/*check for sm_err*/
4492 	if (val64 & TXDMA_SM_INT) {
4493 		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4494 					  &bar0->sm_err_reg,
4495 					  &sw_stat->sm_err_cnt))
4496 			goto reset;
4497 	}
4498 
4499 	val64 = readq(&bar0->mac_int_status);
4500 	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4501 		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4502 					  &bar0->mac_tmac_err_reg,
4503 					  &sw_stat->mac_tmac_err_cnt))
4504 			goto reset;
4505 		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4506 				      TMAC_DESC_ECC_SG_ERR |
4507 				      TMAC_DESC_ECC_DB_ERR,
4508 				      &bar0->mac_tmac_err_reg,
4509 				      &sw_stat->mac_tmac_err_cnt);
4510 	}
4511 
4512 	val64 = readq(&bar0->xgxs_int_status);
4513 	if (val64 & XGXS_INT_STATUS_TXGXS) {
4514 		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4515 					  &bar0->xgxs_txgxs_err_reg,
4516 					  &sw_stat->xgxs_txgxs_err_cnt))
4517 			goto reset;
4518 		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4519 				      &bar0->xgxs_txgxs_err_reg,
4520 				      &sw_stat->xgxs_txgxs_err_cnt);
4521 	}
4522 
4523 	val64 = readq(&bar0->rxdma_int_status);
4524 	if (val64 & RXDMA_INT_RC_INT_M) {
4525 		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4526 					  RC_FTC_ECC_DB_ERR |
4527 					  RC_PRCn_SM_ERR_ALARM |
4528 					  RC_FTC_SM_ERR_ALARM,
4529 					  &bar0->rc_err_reg,
4530 					  &sw_stat->rc_err_cnt))
4531 			goto reset;
4532 		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4533 				      RC_FTC_ECC_SG_ERR |
4534 				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4535 				      &sw_stat->rc_err_cnt);
4536 		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4537 					  PRC_PCI_AB_WR_Rn |
4538 					  PRC_PCI_AB_F_WR_Rn,
4539 					  &bar0->prc_pcix_err_reg,
4540 					  &sw_stat->prc_pcix_err_cnt))
4541 			goto reset;
4542 		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4543 				      PRC_PCI_DP_WR_Rn |
4544 				      PRC_PCI_DP_F_WR_Rn,
4545 				      &bar0->prc_pcix_err_reg,
4546 				      &sw_stat->prc_pcix_err_cnt);
4547 	}
4548 
4549 	if (val64 & RXDMA_INT_RPA_INT_M) {
4550 		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4551 					  &bar0->rpa_err_reg,
4552 					  &sw_stat->rpa_err_cnt))
4553 			goto reset;
4554 		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4555 				      &bar0->rpa_err_reg,
4556 				      &sw_stat->rpa_err_cnt);
4557 	}
4558 
4559 	if (val64 & RXDMA_INT_RDA_INT_M) {
4560 		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4561 					  RDA_FRM_ECC_DB_N_AERR |
4562 					  RDA_SM1_ERR_ALARM |
4563 					  RDA_SM0_ERR_ALARM |
4564 					  RDA_RXD_ECC_DB_SERR,
4565 					  &bar0->rda_err_reg,
4566 					  &sw_stat->rda_err_cnt))
4567 			goto reset;
4568 		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4569 				      RDA_FRM_ECC_SG_ERR |
4570 				      RDA_MISC_ERR |
4571 				      RDA_PCIX_ERR,
4572 				      &bar0->rda_err_reg,
4573 				      &sw_stat->rda_err_cnt);
4574 	}
4575 
4576 	if (val64 & RXDMA_INT_RTI_INT_M) {
4577 		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4578 					  &bar0->rti_err_reg,
4579 					  &sw_stat->rti_err_cnt))
4580 			goto reset;
4581 		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4582 				      &bar0->rti_err_reg,
4583 				      &sw_stat->rti_err_cnt);
4584 	}
4585 
4586 	val64 = readq(&bar0->mac_int_status);
4587 	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4588 		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4589 					  &bar0->mac_rmac_err_reg,
4590 					  &sw_stat->mac_rmac_err_cnt))
4591 			goto reset;
4592 		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4593 				      RMAC_SINGLE_ECC_ERR |
4594 				      RMAC_DOUBLE_ECC_ERR,
4595 				      &bar0->mac_rmac_err_reg,
4596 				      &sw_stat->mac_rmac_err_cnt);
4597 	}
4598 
4599 	val64 = readq(&bar0->xgxs_int_status);
4600 	if (val64 & XGXS_INT_STATUS_RXGXS) {
4601 		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4602 					  &bar0->xgxs_rxgxs_err_reg,
4603 					  &sw_stat->xgxs_rxgxs_err_cnt))
4604 			goto reset;
4605 	}
4606 
4607 	val64 = readq(&bar0->mc_int_status);
4608 	if (val64 & MC_INT_STATUS_MC_INT) {
4609 		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4610 					  &bar0->mc_err_reg,
4611 					  &sw_stat->mc_err_cnt))
4612 			goto reset;
4613 
4614 		/* Handling Ecc errors */
4615 		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4616 			writeq(val64, &bar0->mc_err_reg);
4617 			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4618 				sw_stat->double_ecc_errs++;
4619 				if (sp->device_type != XFRAME_II_DEVICE) {
4620 					/*
4621 					 * Reset XframeI only if critical error
4622 					 */
4623 					if (val64 &
4624 					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4625 					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4626 						goto reset;
4627 				}
4628 			} else
4629 				sw_stat->single_ecc_errs++;
4630 		}
4631 	}
4632 	return;
4633 
4634 reset:
4635 	s2io_stop_all_tx_queue(sp);
4636 	schedule_work(&sp->rst_timer_task);
4637 	sw_stat->soft_reset_cnt++;
4638 }
4639 
4640 /**
4641  *  s2io_isr - ISR handler of the device .
4642  *  @irq: the irq of the device.
4643  *  @dev_id: a void pointer to the dev structure of the NIC.
4644  *  Description:  This function is the ISR handler of the device. It
4645  *  identifies the reason for the interrupt and calls the relevant
4646  *  service routines. As a contongency measure, this ISR allocates the
4647  *  recv buffers, if their numbers are below the panic value which is
4648  *  presently set to 25% of the original number of rcv buffers allocated.
4649  *  Return value:
4650  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4651  *   IRQ_NONE: will be returned if interrupt is not from our device
4652  */
4653 static irqreturn_t s2io_isr(int irq, void *dev_id)
4654 {
4655 	struct net_device *dev = (struct net_device *)dev_id;
4656 	struct s2io_nic *sp = netdev_priv(dev);
4657 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4658 	int i;
4659 	u64 reason = 0;
4660 	struct mac_info *mac_control;
4661 	struct config_param *config;
4662 
4663 	/* Pretend we handled any irq's from a disconnected card */
4664 	if (pci_channel_offline(sp->pdev))
4665 		return IRQ_NONE;
4666 
4667 	if (!is_s2io_card_up(sp))
4668 		return IRQ_NONE;
4669 
4670 	config = &sp->config;
4671 	mac_control = &sp->mac_control;
4672 
4673 	/*
4674 	 * Identify the cause for interrupt and call the appropriate
4675 	 * interrupt handler. Causes for the interrupt could be;
4676 	 * 1. Rx of packet.
4677 	 * 2. Tx complete.
4678 	 * 3. Link down.
4679 	 */
4680 	reason = readq(&bar0->general_int_status);
4681 
4682 	if (unlikely(reason == S2IO_MINUS_ONE))
4683 		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4684 
4685 	if (reason &
4686 	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4687 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4688 
4689 		if (config->napi) {
4690 			if (reason & GEN_INTR_RXTRAFFIC) {
4691 				napi_schedule(&sp->napi);
4692 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4693 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4694 				readl(&bar0->rx_traffic_int);
4695 			}
4696 		} else {
4697 			/*
4698 			 * rx_traffic_int reg is an R1 register, writing all 1's
4699 			 * will ensure that the actual interrupt causing bit
4700 			 * get's cleared and hence a read can be avoided.
4701 			 */
4702 			if (reason & GEN_INTR_RXTRAFFIC)
4703 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4704 
4705 			for (i = 0; i < config->rx_ring_num; i++) {
4706 				struct ring_info *ring = &mac_control->rings[i];
4707 
4708 				rx_intr_handler(ring, 0);
4709 			}
4710 		}
4711 
4712 		/*
4713 		 * tx_traffic_int reg is an R1 register, writing all 1's
4714 		 * will ensure that the actual interrupt causing bit get's
4715 		 * cleared and hence a read can be avoided.
4716 		 */
4717 		if (reason & GEN_INTR_TXTRAFFIC)
4718 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4719 
4720 		for (i = 0; i < config->tx_fifo_num; i++)
4721 			tx_intr_handler(&mac_control->fifos[i]);
4722 
4723 		if (reason & GEN_INTR_TXPIC)
4724 			s2io_txpic_intr_handle(sp);
4725 
4726 		/*
4727 		 * Reallocate the buffers from the interrupt handler itself.
4728 		 */
4729 		if (!config->napi) {
4730 			for (i = 0; i < config->rx_ring_num; i++) {
4731 				struct ring_info *ring = &mac_control->rings[i];
4732 
4733 				s2io_chk_rx_buffers(sp, ring);
4734 			}
4735 		}
4736 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4737 		readl(&bar0->general_int_status);
4738 
4739 		return IRQ_HANDLED;
4740 
4741 	} else if (!reason) {
4742 		/* The interrupt was not raised by us */
4743 		return IRQ_NONE;
4744 	}
4745 
4746 	return IRQ_HANDLED;
4747 }
4748 
4749 /**
4750  * s2io_updt_stats -
4751  */
4752 static void s2io_updt_stats(struct s2io_nic *sp)
4753 {
4754 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4755 	u64 val64;
4756 	int cnt = 0;
4757 
4758 	if (is_s2io_card_up(sp)) {
4759 		/* Apprx 30us on a 133 MHz bus */
4760 		val64 = SET_UPDT_CLICKS(10) |
4761 			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4762 		writeq(val64, &bar0->stat_cfg);
4763 		do {
4764 			udelay(100);
4765 			val64 = readq(&bar0->stat_cfg);
4766 			if (!(val64 & s2BIT(0)))
4767 				break;
4768 			cnt++;
4769 			if (cnt == 5)
4770 				break; /* Updt failed */
4771 		} while (1);
4772 	}
4773 }
4774 
4775 /**
4776  *  s2io_get_stats - Updates the device statistics structure.
4777  *  @dev : pointer to the device structure.
4778  *  Description:
4779  *  This function updates the device statistics structure in the s2io_nic
4780  *  structure and returns a pointer to the same.
4781  *  Return value:
4782  *  pointer to the updated net_device_stats structure.
4783  */
4784 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4785 {
4786 	struct s2io_nic *sp = netdev_priv(dev);
4787 	struct mac_info *mac_control = &sp->mac_control;
4788 	struct stat_block *stats = mac_control->stats_info;
4789 	u64 delta;
4790 
4791 	/* Configure Stats for immediate updt */
4792 	s2io_updt_stats(sp);
4793 
4794 	/* A device reset will cause the on-adapter statistics to be zero'ed.
4795 	 * This can be done while running by changing the MTU.  To prevent the
4796 	 * system from having the stats zero'ed, the driver keeps a copy of the
4797 	 * last update to the system (which is also zero'ed on reset).  This
4798 	 * enables the driver to accurately know the delta between the last
4799 	 * update and the current update.
4800 	 */
4801 	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4802 		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4803 	sp->stats.rx_packets += delta;
4804 	dev->stats.rx_packets += delta;
4805 
4806 	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4807 		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4808 	sp->stats.tx_packets += delta;
4809 	dev->stats.tx_packets += delta;
4810 
4811 	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4812 		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4813 	sp->stats.rx_bytes += delta;
4814 	dev->stats.rx_bytes += delta;
4815 
4816 	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4817 		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4818 	sp->stats.tx_bytes += delta;
4819 	dev->stats.tx_bytes += delta;
4820 
4821 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4822 	sp->stats.rx_errors += delta;
4823 	dev->stats.rx_errors += delta;
4824 
4825 	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4826 		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4827 	sp->stats.tx_errors += delta;
4828 	dev->stats.tx_errors += delta;
4829 
4830 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4831 	sp->stats.rx_dropped += delta;
4832 	dev->stats.rx_dropped += delta;
4833 
4834 	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4835 	sp->stats.tx_dropped += delta;
4836 	dev->stats.tx_dropped += delta;
4837 
4838 	/* The adapter MAC interprets pause frames as multicast packets, but
4839 	 * does not pass them up.  This erroneously increases the multicast
4840 	 * packet count and needs to be deducted when the multicast frame count
4841 	 * is queried.
4842 	 */
4843 	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4844 		le32_to_cpu(stats->rmac_vld_mcst_frms);
4845 	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4846 	delta -= sp->stats.multicast;
4847 	sp->stats.multicast += delta;
4848 	dev->stats.multicast += delta;
4849 
4850 	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4851 		le32_to_cpu(stats->rmac_usized_frms)) +
4852 		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4853 	sp->stats.rx_length_errors += delta;
4854 	dev->stats.rx_length_errors += delta;
4855 
4856 	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4857 	sp->stats.rx_crc_errors += delta;
4858 	dev->stats.rx_crc_errors += delta;
4859 
4860 	return &dev->stats;
4861 }
4862 
4863 /**
4864  *  s2io_set_multicast - entry point for multicast address enable/disable.
4865  *  @dev : pointer to the device structure
4866  *  Description:
4867  *  This function is a driver entry point which gets called by the kernel
4868  *  whenever multicast addresses must be enabled/disabled. This also gets
4869  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4870  *  determine, if multicast address must be enabled or if promiscuous mode
4871  *  is to be disabled etc.
4872  *  Return value:
4873  *  void.
4874  */
4875 
4876 static void s2io_set_multicast(struct net_device *dev)
4877 {
4878 	int i, j, prev_cnt;
4879 	struct netdev_hw_addr *ha;
4880 	struct s2io_nic *sp = netdev_priv(dev);
4881 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4882 	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4883 		0xfeffffffffffULL;
4884 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4885 	void __iomem *add;
4886 	struct config_param *config = &sp->config;
4887 
4888 	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4889 		/*  Enable all Multicast addresses */
4890 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4891 		       &bar0->rmac_addr_data0_mem);
4892 		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4893 		       &bar0->rmac_addr_data1_mem);
4894 		val64 = RMAC_ADDR_CMD_MEM_WE |
4895 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4896 			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4897 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4898 		/* Wait till command completes */
4899 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4900 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4901 				      S2IO_BIT_RESET);
4902 
4903 		sp->m_cast_flg = 1;
4904 		sp->all_multi_pos = config->max_mc_addr - 1;
4905 	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4906 		/*  Disable all Multicast addresses */
4907 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4908 		       &bar0->rmac_addr_data0_mem);
4909 		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4910 		       &bar0->rmac_addr_data1_mem);
4911 		val64 = RMAC_ADDR_CMD_MEM_WE |
4912 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4913 			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4914 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4915 		/* Wait till command completes */
4916 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4917 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4918 				      S2IO_BIT_RESET);
4919 
4920 		sp->m_cast_flg = 0;
4921 		sp->all_multi_pos = 0;
4922 	}
4923 
4924 	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4925 		/*  Put the NIC into promiscuous mode */
4926 		add = &bar0->mac_cfg;
4927 		val64 = readq(&bar0->mac_cfg);
4928 		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4929 
4930 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4931 		writel((u32)val64, add);
4932 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4933 		writel((u32) (val64 >> 32), (add + 4));
4934 
4935 		if (vlan_tag_strip != 1) {
4936 			val64 = readq(&bar0->rx_pa_cfg);
4937 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4938 			writeq(val64, &bar0->rx_pa_cfg);
4939 			sp->vlan_strip_flag = 0;
4940 		}
4941 
4942 		val64 = readq(&bar0->mac_cfg);
4943 		sp->promisc_flg = 1;
4944 		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4945 			  dev->name);
4946 	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4947 		/*  Remove the NIC from promiscuous mode */
4948 		add = &bar0->mac_cfg;
4949 		val64 = readq(&bar0->mac_cfg);
4950 		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4951 
4952 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4953 		writel((u32)val64, add);
4954 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4955 		writel((u32) (val64 >> 32), (add + 4));
4956 
4957 		if (vlan_tag_strip != 0) {
4958 			val64 = readq(&bar0->rx_pa_cfg);
4959 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4960 			writeq(val64, &bar0->rx_pa_cfg);
4961 			sp->vlan_strip_flag = 1;
4962 		}
4963 
4964 		val64 = readq(&bar0->mac_cfg);
4965 		sp->promisc_flg = 0;
4966 		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4967 	}
4968 
4969 	/*  Update individual M_CAST address list */
4970 	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4971 		if (netdev_mc_count(dev) >
4972 		    (config->max_mc_addr - config->max_mac_addr)) {
4973 			DBG_PRINT(ERR_DBG,
4974 				  "%s: No more Rx filters can be added - "
4975 				  "please enable ALL_MULTI instead\n",
4976 				  dev->name);
4977 			return;
4978 		}
4979 
4980 		prev_cnt = sp->mc_addr_count;
4981 		sp->mc_addr_count = netdev_mc_count(dev);
4982 
4983 		/* Clear out the previous list of Mc in the H/W. */
4984 		for (i = 0; i < prev_cnt; i++) {
4985 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4986 			       &bar0->rmac_addr_data0_mem);
4987 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4988 			       &bar0->rmac_addr_data1_mem);
4989 			val64 = RMAC_ADDR_CMD_MEM_WE |
4990 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4991 				RMAC_ADDR_CMD_MEM_OFFSET
4992 				(config->mc_start_offset + i);
4993 			writeq(val64, &bar0->rmac_addr_cmd_mem);
4994 
4995 			/* Wait for command completes */
4996 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4997 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4998 						  S2IO_BIT_RESET)) {
4999 				DBG_PRINT(ERR_DBG,
5000 					  "%s: Adding Multicasts failed\n",
5001 					  dev->name);
5002 				return;
5003 			}
5004 		}
5005 
5006 		/* Create the new Rx filter list and update the same in H/W. */
5007 		i = 0;
5008 		netdev_for_each_mc_addr(ha, dev) {
5009 			mac_addr = 0;
5010 			for (j = 0; j < ETH_ALEN; j++) {
5011 				mac_addr |= ha->addr[j];
5012 				mac_addr <<= 8;
5013 			}
5014 			mac_addr >>= 8;
5015 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5016 			       &bar0->rmac_addr_data0_mem);
5017 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5018 			       &bar0->rmac_addr_data1_mem);
5019 			val64 = RMAC_ADDR_CMD_MEM_WE |
5020 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5021 				RMAC_ADDR_CMD_MEM_OFFSET
5022 				(i + config->mc_start_offset);
5023 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5024 
5025 			/* Wait for command completes */
5026 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5027 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5028 						  S2IO_BIT_RESET)) {
5029 				DBG_PRINT(ERR_DBG,
5030 					  "%s: Adding Multicasts failed\n",
5031 					  dev->name);
5032 				return;
5033 			}
5034 			i++;
5035 		}
5036 	}
5037 }
5038 
5039 /* read from CAM unicast & multicast addresses and store it in
5040  * def_mac_addr structure
5041  */
5042 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5043 {
5044 	int offset;
5045 	u64 mac_addr = 0x0;
5046 	struct config_param *config = &sp->config;
5047 
5048 	/* store unicast & multicast mac addresses */
5049 	for (offset = 0; offset < config->max_mc_addr; offset++) {
5050 		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5051 		/* if read fails disable the entry */
5052 		if (mac_addr == FAILURE)
5053 			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5054 		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5055 	}
5056 }
5057 
5058 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5059 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5060 {
5061 	int offset;
5062 	struct config_param *config = &sp->config;
5063 	/* restore unicast mac address */
5064 	for (offset = 0; offset < config->max_mac_addr; offset++)
5065 		do_s2io_prog_unicast(sp->dev,
5066 				     sp->def_mac_addr[offset].mac_addr);
5067 
5068 	/* restore multicast mac address */
5069 	for (offset = config->mc_start_offset;
5070 	     offset < config->max_mc_addr; offset++)
5071 		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5072 }
5073 
5074 /* add a multicast MAC address to CAM */
5075 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5076 {
5077 	int i;
5078 	u64 mac_addr = 0;
5079 	struct config_param *config = &sp->config;
5080 
5081 	for (i = 0; i < ETH_ALEN; i++) {
5082 		mac_addr <<= 8;
5083 		mac_addr |= addr[i];
5084 	}
5085 	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5086 		return SUCCESS;
5087 
5088 	/* check if the multicast mac already preset in CAM */
5089 	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5090 		u64 tmp64;
5091 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5092 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5093 			break;
5094 
5095 		if (tmp64 == mac_addr)
5096 			return SUCCESS;
5097 	}
5098 	if (i == config->max_mc_addr) {
5099 		DBG_PRINT(ERR_DBG,
5100 			  "CAM full no space left for multicast MAC\n");
5101 		return FAILURE;
5102 	}
5103 	/* Update the internal structure with this new mac address */
5104 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5105 
5106 	return do_s2io_add_mac(sp, mac_addr, i);
5107 }
5108 
5109 /* add MAC address to CAM */
5110 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5111 {
5112 	u64 val64;
5113 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5114 
5115 	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5116 	       &bar0->rmac_addr_data0_mem);
5117 
5118 	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5119 		RMAC_ADDR_CMD_MEM_OFFSET(off);
5120 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5121 
5122 	/* Wait till command completes */
5123 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5124 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5125 				  S2IO_BIT_RESET)) {
5126 		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5127 		return FAILURE;
5128 	}
5129 	return SUCCESS;
5130 }
5131 /* deletes a specified unicast/multicast mac entry from CAM */
5132 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5133 {
5134 	int offset;
5135 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5136 	struct config_param *config = &sp->config;
5137 
5138 	for (offset = 1;
5139 	     offset < config->max_mc_addr; offset++) {
5140 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5141 		if (tmp64 == addr) {
5142 			/* disable the entry by writing  0xffffffffffffULL */
5143 			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5144 				return FAILURE;
5145 			/* store the new mac list from CAM */
5146 			do_s2io_store_unicast_mc(sp);
5147 			return SUCCESS;
5148 		}
5149 	}
5150 	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5151 		  (unsigned long long)addr);
5152 	return FAILURE;
5153 }
5154 
5155 /* read mac entries from CAM */
5156 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5157 {
5158 	u64 tmp64 = 0xffffffffffff0000ULL, val64;
5159 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5160 
5161 	/* read mac addr */
5162 	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5163 		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5164 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5165 
5166 	/* Wait till command completes */
5167 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5168 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5169 				  S2IO_BIT_RESET)) {
5170 		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5171 		return FAILURE;
5172 	}
5173 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5174 
5175 	return tmp64 >> 16;
5176 }
5177 
5178 /**
5179  * s2io_set_mac_addr - driver entry point
5180  */
5181 
5182 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5183 {
5184 	struct sockaddr *addr = p;
5185 
5186 	if (!is_valid_ether_addr(addr->sa_data))
5187 		return -EADDRNOTAVAIL;
5188 
5189 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5190 
5191 	/* store the MAC address in CAM */
5192 	return do_s2io_prog_unicast(dev, dev->dev_addr);
5193 }
5194 /**
5195  *  do_s2io_prog_unicast - Programs the Xframe mac address
5196  *  @dev : pointer to the device structure.
5197  *  @addr: a uchar pointer to the new mac address which is to be set.
5198  *  Description : This procedure will program the Xframe to receive
5199  *  frames with new Mac Address
5200  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5201  *  as defined in errno.h file on failure.
5202  */
5203 
5204 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5205 {
5206 	struct s2io_nic *sp = netdev_priv(dev);
5207 	register u64 mac_addr = 0, perm_addr = 0;
5208 	int i;
5209 	u64 tmp64;
5210 	struct config_param *config = &sp->config;
5211 
5212 	/*
5213 	 * Set the new MAC address as the new unicast filter and reflect this
5214 	 * change on the device address registered with the OS. It will be
5215 	 * at offset 0.
5216 	 */
5217 	for (i = 0; i < ETH_ALEN; i++) {
5218 		mac_addr <<= 8;
5219 		mac_addr |= addr[i];
5220 		perm_addr <<= 8;
5221 		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5222 	}
5223 
5224 	/* check if the dev_addr is different than perm_addr */
5225 	if (mac_addr == perm_addr)
5226 		return SUCCESS;
5227 
5228 	/* check if the mac already preset in CAM */
5229 	for (i = 1; i < config->max_mac_addr; i++) {
5230 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5231 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5232 			break;
5233 
5234 		if (tmp64 == mac_addr) {
5235 			DBG_PRINT(INFO_DBG,
5236 				  "MAC addr:0x%llx already present in CAM\n",
5237 				  (unsigned long long)mac_addr);
5238 			return SUCCESS;
5239 		}
5240 	}
5241 	if (i == config->max_mac_addr) {
5242 		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5243 		return FAILURE;
5244 	}
5245 	/* Update the internal structure with this new mac address */
5246 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5247 
5248 	return do_s2io_add_mac(sp, mac_addr, i);
5249 }
5250 
5251 /**
5252  * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5253  * @sp : private member of the device structure, which is a pointer to the
5254  * s2io_nic structure.
5255  * @cmd: pointer to the structure with parameters given by ethtool to set
5256  * link information.
5257  * Description:
5258  * The function sets different link parameters provided by the user onto
5259  * the NIC.
5260  * Return value:
5261  * 0 on success.
5262  */
5263 
5264 static int
5265 s2io_ethtool_set_link_ksettings(struct net_device *dev,
5266 				const struct ethtool_link_ksettings *cmd)
5267 {
5268 	struct s2io_nic *sp = netdev_priv(dev);
5269 	if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5270 	    (cmd->base.speed != SPEED_10000) ||
5271 	    (cmd->base.duplex != DUPLEX_FULL))
5272 		return -EINVAL;
5273 	else {
5274 		s2io_close(sp->dev);
5275 		s2io_open(sp->dev);
5276 	}
5277 
5278 	return 0;
5279 }
5280 
5281 /**
5282  * s2io_ethtol_get_link_ksettings - Return link specific information.
5283  * @sp : private member of the device structure, pointer to the
5284  *      s2io_nic structure.
5285  * @cmd : pointer to the structure with parameters given by ethtool
5286  * to return link information.
5287  * Description:
5288  * Returns link specific information like speed, duplex etc.. to ethtool.
5289  * Return value :
5290  * return 0 on success.
5291  */
5292 
5293 static int
5294 s2io_ethtool_get_link_ksettings(struct net_device *dev,
5295 				struct ethtool_link_ksettings *cmd)
5296 {
5297 	struct s2io_nic *sp = netdev_priv(dev);
5298 
5299 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
5300 	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5301 	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5302 
5303 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5304 	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5305 	ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5306 
5307 	cmd->base.port = PORT_FIBRE;
5308 
5309 	if (netif_carrier_ok(sp->dev)) {
5310 		cmd->base.speed = SPEED_10000;
5311 		cmd->base.duplex = DUPLEX_FULL;
5312 	} else {
5313 		cmd->base.speed = SPEED_UNKNOWN;
5314 		cmd->base.duplex = DUPLEX_UNKNOWN;
5315 	}
5316 
5317 	cmd->base.autoneg = AUTONEG_DISABLE;
5318 	return 0;
5319 }
5320 
5321 /**
5322  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5323  * @sp : private member of the device structure, which is a pointer to the
5324  * s2io_nic structure.
5325  * @info : pointer to the structure with parameters given by ethtool to
5326  * return driver information.
5327  * Description:
5328  * Returns driver specefic information like name, version etc.. to ethtool.
5329  * Return value:
5330  *  void
5331  */
5332 
5333 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5334 				  struct ethtool_drvinfo *info)
5335 {
5336 	struct s2io_nic *sp = netdev_priv(dev);
5337 
5338 	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5339 	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5340 	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5341 }
5342 
5343 /**
5344  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5345  *  @sp: private member of the device structure, which is a pointer to the
5346  *  s2io_nic structure.
5347  *  @regs : pointer to the structure with parameters given by ethtool for
5348  *  dumping the registers.
5349  *  @reg_space: The input argument into which all the registers are dumped.
5350  *  Description:
5351  *  Dumps the entire register space of xFrame NIC into the user given
5352  *  buffer area.
5353  * Return value :
5354  * void .
5355  */
5356 
5357 static void s2io_ethtool_gregs(struct net_device *dev,
5358 			       struct ethtool_regs *regs, void *space)
5359 {
5360 	int i;
5361 	u64 reg;
5362 	u8 *reg_space = (u8 *)space;
5363 	struct s2io_nic *sp = netdev_priv(dev);
5364 
5365 	regs->len = XENA_REG_SPACE;
5366 	regs->version = sp->pdev->subsystem_device;
5367 
5368 	for (i = 0; i < regs->len; i += 8) {
5369 		reg = readq(sp->bar0 + i);
5370 		memcpy((reg_space + i), &reg, 8);
5371 	}
5372 }
5373 
5374 /*
5375  *  s2io_set_led - control NIC led
5376  */
5377 static void s2io_set_led(struct s2io_nic *sp, bool on)
5378 {
5379 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5380 	u16 subid = sp->pdev->subsystem_device;
5381 	u64 val64;
5382 
5383 	if ((sp->device_type == XFRAME_II_DEVICE) ||
5384 	    ((subid & 0xFF) >= 0x07)) {
5385 		val64 = readq(&bar0->gpio_control);
5386 		if (on)
5387 			val64 |= GPIO_CTRL_GPIO_0;
5388 		else
5389 			val64 &= ~GPIO_CTRL_GPIO_0;
5390 
5391 		writeq(val64, &bar0->gpio_control);
5392 	} else {
5393 		val64 = readq(&bar0->adapter_control);
5394 		if (on)
5395 			val64 |= ADAPTER_LED_ON;
5396 		else
5397 			val64 &= ~ADAPTER_LED_ON;
5398 
5399 		writeq(val64, &bar0->adapter_control);
5400 	}
5401 
5402 }
5403 
5404 /**
5405  * s2io_ethtool_set_led - To physically identify the nic on the system.
5406  * @dev : network device
5407  * @state: led setting
5408  *
5409  * Description: Used to physically identify the NIC on the system.
5410  * The Link LED will blink for a time specified by the user for
5411  * identification.
5412  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5413  * identification is possible only if it's link is up.
5414  */
5415 
5416 static int s2io_ethtool_set_led(struct net_device *dev,
5417 				enum ethtool_phys_id_state state)
5418 {
5419 	struct s2io_nic *sp = netdev_priv(dev);
5420 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5421 	u16 subid = sp->pdev->subsystem_device;
5422 
5423 	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5424 		u64 val64 = readq(&bar0->adapter_control);
5425 		if (!(val64 & ADAPTER_CNTL_EN)) {
5426 			pr_err("Adapter Link down, cannot blink LED\n");
5427 			return -EAGAIN;
5428 		}
5429 	}
5430 
5431 	switch (state) {
5432 	case ETHTOOL_ID_ACTIVE:
5433 		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5434 		return 1;	/* cycle on/off once per second */
5435 
5436 	case ETHTOOL_ID_ON:
5437 		s2io_set_led(sp, true);
5438 		break;
5439 
5440 	case ETHTOOL_ID_OFF:
5441 		s2io_set_led(sp, false);
5442 		break;
5443 
5444 	case ETHTOOL_ID_INACTIVE:
5445 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5446 			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5447 	}
5448 
5449 	return 0;
5450 }
5451 
5452 static void s2io_ethtool_gringparam(struct net_device *dev,
5453 				    struct ethtool_ringparam *ering)
5454 {
5455 	struct s2io_nic *sp = netdev_priv(dev);
5456 	int i, tx_desc_count = 0, rx_desc_count = 0;
5457 
5458 	if (sp->rxd_mode == RXD_MODE_1) {
5459 		ering->rx_max_pending = MAX_RX_DESC_1;
5460 		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5461 	} else {
5462 		ering->rx_max_pending = MAX_RX_DESC_2;
5463 		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5464 	}
5465 
5466 	ering->tx_max_pending = MAX_TX_DESC;
5467 
5468 	for (i = 0; i < sp->config.rx_ring_num; i++)
5469 		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5470 	ering->rx_pending = rx_desc_count;
5471 	ering->rx_jumbo_pending = rx_desc_count;
5472 
5473 	for (i = 0; i < sp->config.tx_fifo_num; i++)
5474 		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5475 	ering->tx_pending = tx_desc_count;
5476 	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5477 }
5478 
5479 /**
5480  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5481  * @sp : private member of the device structure, which is a pointer to the
5482  *	s2io_nic structure.
5483  * @ep : pointer to the structure with pause parameters given by ethtool.
5484  * Description:
5485  * Returns the Pause frame generation and reception capability of the NIC.
5486  * Return value:
5487  *  void
5488  */
5489 static void s2io_ethtool_getpause_data(struct net_device *dev,
5490 				       struct ethtool_pauseparam *ep)
5491 {
5492 	u64 val64;
5493 	struct s2io_nic *sp = netdev_priv(dev);
5494 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5495 
5496 	val64 = readq(&bar0->rmac_pause_cfg);
5497 	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5498 		ep->tx_pause = true;
5499 	if (val64 & RMAC_PAUSE_RX_ENABLE)
5500 		ep->rx_pause = true;
5501 	ep->autoneg = false;
5502 }
5503 
5504 /**
5505  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5506  * @sp : private member of the device structure, which is a pointer to the
5507  *      s2io_nic structure.
5508  * @ep : pointer to the structure with pause parameters given by ethtool.
5509  * Description:
5510  * It can be used to set or reset Pause frame generation or reception
5511  * support of the NIC.
5512  * Return value:
5513  * int, returns 0 on Success
5514  */
5515 
5516 static int s2io_ethtool_setpause_data(struct net_device *dev,
5517 				      struct ethtool_pauseparam *ep)
5518 {
5519 	u64 val64;
5520 	struct s2io_nic *sp = netdev_priv(dev);
5521 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5522 
5523 	val64 = readq(&bar0->rmac_pause_cfg);
5524 	if (ep->tx_pause)
5525 		val64 |= RMAC_PAUSE_GEN_ENABLE;
5526 	else
5527 		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5528 	if (ep->rx_pause)
5529 		val64 |= RMAC_PAUSE_RX_ENABLE;
5530 	else
5531 		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5532 	writeq(val64, &bar0->rmac_pause_cfg);
5533 	return 0;
5534 }
5535 
5536 /**
5537  * read_eeprom - reads 4 bytes of data from user given offset.
5538  * @sp : private member of the device structure, which is a pointer to the
5539  *      s2io_nic structure.
5540  * @off : offset at which the data must be written
5541  * @data : Its an output parameter where the data read at the given
5542  *	offset is stored.
5543  * Description:
5544  * Will read 4 bytes of data from the user given offset and return the
5545  * read data.
5546  * NOTE: Will allow to read only part of the EEPROM visible through the
5547  *   I2C bus.
5548  * Return value:
5549  *  -1 on failure and 0 on success.
5550  */
5551 
5552 #define S2IO_DEV_ID		5
5553 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5554 {
5555 	int ret = -1;
5556 	u32 exit_cnt = 0;
5557 	u64 val64;
5558 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5559 
5560 	if (sp->device_type == XFRAME_I_DEVICE) {
5561 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5562 			I2C_CONTROL_ADDR(off) |
5563 			I2C_CONTROL_BYTE_CNT(0x3) |
5564 			I2C_CONTROL_READ |
5565 			I2C_CONTROL_CNTL_START;
5566 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5567 
5568 		while (exit_cnt < 5) {
5569 			val64 = readq(&bar0->i2c_control);
5570 			if (I2C_CONTROL_CNTL_END(val64)) {
5571 				*data = I2C_CONTROL_GET_DATA(val64);
5572 				ret = 0;
5573 				break;
5574 			}
5575 			msleep(50);
5576 			exit_cnt++;
5577 		}
5578 	}
5579 
5580 	if (sp->device_type == XFRAME_II_DEVICE) {
5581 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5582 			SPI_CONTROL_BYTECNT(0x3) |
5583 			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5584 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5585 		val64 |= SPI_CONTROL_REQ;
5586 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5587 		while (exit_cnt < 5) {
5588 			val64 = readq(&bar0->spi_control);
5589 			if (val64 & SPI_CONTROL_NACK) {
5590 				ret = 1;
5591 				break;
5592 			} else if (val64 & SPI_CONTROL_DONE) {
5593 				*data = readq(&bar0->spi_data);
5594 				*data &= 0xffffff;
5595 				ret = 0;
5596 				break;
5597 			}
5598 			msleep(50);
5599 			exit_cnt++;
5600 		}
5601 	}
5602 	return ret;
5603 }
5604 
5605 /**
5606  *  write_eeprom - actually writes the relevant part of the data value.
5607  *  @sp : private member of the device structure, which is a pointer to the
5608  *       s2io_nic structure.
5609  *  @off : offset at which the data must be written
5610  *  @data : The data that is to be written
5611  *  @cnt : Number of bytes of the data that are actually to be written into
5612  *  the Eeprom. (max of 3)
5613  * Description:
5614  *  Actually writes the relevant part of the data value into the Eeprom
5615  *  through the I2C bus.
5616  * Return value:
5617  *  0 on success, -1 on failure.
5618  */
5619 
5620 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5621 {
5622 	int exit_cnt = 0, ret = -1;
5623 	u64 val64;
5624 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5625 
5626 	if (sp->device_type == XFRAME_I_DEVICE) {
5627 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5628 			I2C_CONTROL_ADDR(off) |
5629 			I2C_CONTROL_BYTE_CNT(cnt) |
5630 			I2C_CONTROL_SET_DATA((u32)data) |
5631 			I2C_CONTROL_CNTL_START;
5632 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5633 
5634 		while (exit_cnt < 5) {
5635 			val64 = readq(&bar0->i2c_control);
5636 			if (I2C_CONTROL_CNTL_END(val64)) {
5637 				if (!(val64 & I2C_CONTROL_NACK))
5638 					ret = 0;
5639 				break;
5640 			}
5641 			msleep(50);
5642 			exit_cnt++;
5643 		}
5644 	}
5645 
5646 	if (sp->device_type == XFRAME_II_DEVICE) {
5647 		int write_cnt = (cnt == 8) ? 0 : cnt;
5648 		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5649 
5650 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5651 			SPI_CONTROL_BYTECNT(write_cnt) |
5652 			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5653 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5654 		val64 |= SPI_CONTROL_REQ;
5655 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5656 		while (exit_cnt < 5) {
5657 			val64 = readq(&bar0->spi_control);
5658 			if (val64 & SPI_CONTROL_NACK) {
5659 				ret = 1;
5660 				break;
5661 			} else if (val64 & SPI_CONTROL_DONE) {
5662 				ret = 0;
5663 				break;
5664 			}
5665 			msleep(50);
5666 			exit_cnt++;
5667 		}
5668 	}
5669 	return ret;
5670 }
5671 static void s2io_vpd_read(struct s2io_nic *nic)
5672 {
5673 	u8 *vpd_data;
5674 	u8 data;
5675 	int i = 0, cnt, len, fail = 0;
5676 	int vpd_addr = 0x80;
5677 	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5678 
5679 	if (nic->device_type == XFRAME_II_DEVICE) {
5680 		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5681 		vpd_addr = 0x80;
5682 	} else {
5683 		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5684 		vpd_addr = 0x50;
5685 	}
5686 	strcpy(nic->serial_num, "NOT AVAILABLE");
5687 
5688 	vpd_data = kmalloc(256, GFP_KERNEL);
5689 	if (!vpd_data) {
5690 		swstats->mem_alloc_fail_cnt++;
5691 		return;
5692 	}
5693 	swstats->mem_allocated += 256;
5694 
5695 	for (i = 0; i < 256; i += 4) {
5696 		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5697 		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5698 		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5699 		for (cnt = 0; cnt < 5; cnt++) {
5700 			msleep(2);
5701 			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5702 			if (data == 0x80)
5703 				break;
5704 		}
5705 		if (cnt >= 5) {
5706 			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5707 			fail = 1;
5708 			break;
5709 		}
5710 		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5711 				      (u32 *)&vpd_data[i]);
5712 	}
5713 
5714 	if (!fail) {
5715 		/* read serial number of adapter */
5716 		for (cnt = 0; cnt < 252; cnt++) {
5717 			if ((vpd_data[cnt] == 'S') &&
5718 			    (vpd_data[cnt+1] == 'N')) {
5719 				len = vpd_data[cnt+2];
5720 				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5721 					memcpy(nic->serial_num,
5722 					       &vpd_data[cnt + 3],
5723 					       len);
5724 					memset(nic->serial_num+len,
5725 					       0,
5726 					       VPD_STRING_LEN-len);
5727 					break;
5728 				}
5729 			}
5730 		}
5731 	}
5732 
5733 	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5734 		len = vpd_data[1];
5735 		memcpy(nic->product_name, &vpd_data[3], len);
5736 		nic->product_name[len] = 0;
5737 	}
5738 	kfree(vpd_data);
5739 	swstats->mem_freed += 256;
5740 }
5741 
5742 /**
5743  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5744  *  @sp : private member of the device structure, which is a pointer to the
5745  *  s2io_nic structure.
5746  *  @eeprom : pointer to the user level structure provided by ethtool,
5747  *  containing all relevant information.
5748  *  @data_buf : user defined value to be written into Eeprom.
5749  *  Description: Reads the values stored in the Eeprom at given offset
5750  *  for a given length. Stores these values int the input argument data
5751  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5752  *  Return value:
5753  *  int  0 on success
5754  */
5755 
5756 static int s2io_ethtool_geeprom(struct net_device *dev,
5757 				struct ethtool_eeprom *eeprom, u8 * data_buf)
5758 {
5759 	u32 i, valid;
5760 	u64 data;
5761 	struct s2io_nic *sp = netdev_priv(dev);
5762 
5763 	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5764 
5765 	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5766 		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5767 
5768 	for (i = 0; i < eeprom->len; i += 4) {
5769 		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5770 			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5771 			return -EFAULT;
5772 		}
5773 		valid = INV(data);
5774 		memcpy((data_buf + i), &valid, 4);
5775 	}
5776 	return 0;
5777 }
5778 
5779 /**
5780  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5781  *  @sp : private member of the device structure, which is a pointer to the
5782  *  s2io_nic structure.
5783  *  @eeprom : pointer to the user level structure provided by ethtool,
5784  *  containing all relevant information.
5785  *  @data_buf ; user defined value to be written into Eeprom.
5786  *  Description:
5787  *  Tries to write the user provided value in the Eeprom, at the offset
5788  *  given by the user.
5789  *  Return value:
5790  *  0 on success, -EFAULT on failure.
5791  */
5792 
5793 static int s2io_ethtool_seeprom(struct net_device *dev,
5794 				struct ethtool_eeprom *eeprom,
5795 				u8 *data_buf)
5796 {
5797 	int len = eeprom->len, cnt = 0;
5798 	u64 valid = 0, data;
5799 	struct s2io_nic *sp = netdev_priv(dev);
5800 
5801 	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5802 		DBG_PRINT(ERR_DBG,
5803 			  "ETHTOOL_WRITE_EEPROM Err: "
5804 			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5805 			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5806 			  eeprom->magic);
5807 		return -EFAULT;
5808 	}
5809 
5810 	while (len) {
5811 		data = (u32)data_buf[cnt] & 0x000000FF;
5812 		if (data)
5813 			valid = (u32)(data << 24);
5814 		else
5815 			valid = data;
5816 
5817 		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5818 			DBG_PRINT(ERR_DBG,
5819 				  "ETHTOOL_WRITE_EEPROM Err: "
5820 				  "Cannot write into the specified offset\n");
5821 			return -EFAULT;
5822 		}
5823 		cnt++;
5824 		len--;
5825 	}
5826 
5827 	return 0;
5828 }
5829 
5830 /**
5831  * s2io_register_test - reads and writes into all clock domains.
5832  * @sp : private member of the device structure, which is a pointer to the
5833  * s2io_nic structure.
5834  * @data : variable that returns the result of each of the test conducted b
5835  * by the driver.
5836  * Description:
5837  * Read and write into all clock domains. The NIC has 3 clock domains,
5838  * see that registers in all the three regions are accessible.
5839  * Return value:
5840  * 0 on success.
5841  */
5842 
5843 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5844 {
5845 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5846 	u64 val64 = 0, exp_val;
5847 	int fail = 0;
5848 
5849 	val64 = readq(&bar0->pif_rd_swapper_fb);
5850 	if (val64 != 0x123456789abcdefULL) {
5851 		fail = 1;
5852 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5853 	}
5854 
5855 	val64 = readq(&bar0->rmac_pause_cfg);
5856 	if (val64 != 0xc000ffff00000000ULL) {
5857 		fail = 1;
5858 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5859 	}
5860 
5861 	val64 = readq(&bar0->rx_queue_cfg);
5862 	if (sp->device_type == XFRAME_II_DEVICE)
5863 		exp_val = 0x0404040404040404ULL;
5864 	else
5865 		exp_val = 0x0808080808080808ULL;
5866 	if (val64 != exp_val) {
5867 		fail = 1;
5868 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5869 	}
5870 
5871 	val64 = readq(&bar0->xgxs_efifo_cfg);
5872 	if (val64 != 0x000000001923141EULL) {
5873 		fail = 1;
5874 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5875 	}
5876 
5877 	val64 = 0x5A5A5A5A5A5A5A5AULL;
5878 	writeq(val64, &bar0->xmsi_data);
5879 	val64 = readq(&bar0->xmsi_data);
5880 	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5881 		fail = 1;
5882 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5883 	}
5884 
5885 	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5886 	writeq(val64, &bar0->xmsi_data);
5887 	val64 = readq(&bar0->xmsi_data);
5888 	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5889 		fail = 1;
5890 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5891 	}
5892 
5893 	*data = fail;
5894 	return fail;
5895 }
5896 
5897 /**
5898  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5899  * @sp : private member of the device structure, which is a pointer to the
5900  * s2io_nic structure.
5901  * @data:variable that returns the result of each of the test conducted by
5902  * the driver.
5903  * Description:
5904  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5905  * register.
5906  * Return value:
5907  * 0 on success.
5908  */
5909 
5910 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5911 {
5912 	int fail = 0;
5913 	u64 ret_data, org_4F0, org_7F0;
5914 	u8 saved_4F0 = 0, saved_7F0 = 0;
5915 	struct net_device *dev = sp->dev;
5916 
5917 	/* Test Write Error at offset 0 */
5918 	/* Note that SPI interface allows write access to all areas
5919 	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5920 	 */
5921 	if (sp->device_type == XFRAME_I_DEVICE)
5922 		if (!write_eeprom(sp, 0, 0, 3))
5923 			fail = 1;
5924 
5925 	/* Save current values at offsets 0x4F0 and 0x7F0 */
5926 	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5927 		saved_4F0 = 1;
5928 	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5929 		saved_7F0 = 1;
5930 
5931 	/* Test Write at offset 4f0 */
5932 	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5933 		fail = 1;
5934 	if (read_eeprom(sp, 0x4F0, &ret_data))
5935 		fail = 1;
5936 
5937 	if (ret_data != 0x012345) {
5938 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5939 			  "Data written %llx Data read %llx\n",
5940 			  dev->name, (unsigned long long)0x12345,
5941 			  (unsigned long long)ret_data);
5942 		fail = 1;
5943 	}
5944 
5945 	/* Reset the EEPROM data go FFFF */
5946 	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5947 
5948 	/* Test Write Request Error at offset 0x7c */
5949 	if (sp->device_type == XFRAME_I_DEVICE)
5950 		if (!write_eeprom(sp, 0x07C, 0, 3))
5951 			fail = 1;
5952 
5953 	/* Test Write Request at offset 0x7f0 */
5954 	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5955 		fail = 1;
5956 	if (read_eeprom(sp, 0x7F0, &ret_data))
5957 		fail = 1;
5958 
5959 	if (ret_data != 0x012345) {
5960 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5961 			  "Data written %llx Data read %llx\n",
5962 			  dev->name, (unsigned long long)0x12345,
5963 			  (unsigned long long)ret_data);
5964 		fail = 1;
5965 	}
5966 
5967 	/* Reset the EEPROM data go FFFF */
5968 	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5969 
5970 	if (sp->device_type == XFRAME_I_DEVICE) {
5971 		/* Test Write Error at offset 0x80 */
5972 		if (!write_eeprom(sp, 0x080, 0, 3))
5973 			fail = 1;
5974 
5975 		/* Test Write Error at offset 0xfc */
5976 		if (!write_eeprom(sp, 0x0FC, 0, 3))
5977 			fail = 1;
5978 
5979 		/* Test Write Error at offset 0x100 */
5980 		if (!write_eeprom(sp, 0x100, 0, 3))
5981 			fail = 1;
5982 
5983 		/* Test Write Error at offset 4ec */
5984 		if (!write_eeprom(sp, 0x4EC, 0, 3))
5985 			fail = 1;
5986 	}
5987 
5988 	/* Restore values at offsets 0x4F0 and 0x7F0 */
5989 	if (saved_4F0)
5990 		write_eeprom(sp, 0x4F0, org_4F0, 3);
5991 	if (saved_7F0)
5992 		write_eeprom(sp, 0x7F0, org_7F0, 3);
5993 
5994 	*data = fail;
5995 	return fail;
5996 }
5997 
5998 /**
5999  * s2io_bist_test - invokes the MemBist test of the card .
6000  * @sp : private member of the device structure, which is a pointer to the
6001  * s2io_nic structure.
6002  * @data:variable that returns the result of each of the test conducted by
6003  * the driver.
6004  * Description:
6005  * This invokes the MemBist test of the card. We give around
6006  * 2 secs time for the Test to complete. If it's still not complete
6007  * within this peiod, we consider that the test failed.
6008  * Return value:
6009  * 0 on success and -1 on failure.
6010  */
6011 
6012 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6013 {
6014 	u8 bist = 0;
6015 	int cnt = 0, ret = -1;
6016 
6017 	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6018 	bist |= PCI_BIST_START;
6019 	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6020 
6021 	while (cnt < 20) {
6022 		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6023 		if (!(bist & PCI_BIST_START)) {
6024 			*data = (bist & PCI_BIST_CODE_MASK);
6025 			ret = 0;
6026 			break;
6027 		}
6028 		msleep(100);
6029 		cnt++;
6030 	}
6031 
6032 	return ret;
6033 }
6034 
6035 /**
6036  * s2io_link_test - verifies the link state of the nic
6037  * @sp ; private member of the device structure, which is a pointer to the
6038  * s2io_nic structure.
6039  * @data: variable that returns the result of each of the test conducted by
6040  * the driver.
6041  * Description:
6042  * The function verifies the link state of the NIC and updates the input
6043  * argument 'data' appropriately.
6044  * Return value:
6045  * 0 on success.
6046  */
6047 
6048 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6049 {
6050 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6051 	u64 val64;
6052 
6053 	val64 = readq(&bar0->adapter_status);
6054 	if (!(LINK_IS_UP(val64)))
6055 		*data = 1;
6056 	else
6057 		*data = 0;
6058 
6059 	return *data;
6060 }
6061 
6062 /**
6063  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6064  * @sp: private member of the device structure, which is a pointer to the
6065  * s2io_nic structure.
6066  * @data: variable that returns the result of each of the test
6067  * conducted by the driver.
6068  * Description:
6069  *  This is one of the offline test that tests the read and write
6070  *  access to the RldRam chip on the NIC.
6071  * Return value:
6072  *  0 on success.
6073  */
6074 
6075 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6076 {
6077 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6078 	u64 val64;
6079 	int cnt, iteration = 0, test_fail = 0;
6080 
6081 	val64 = readq(&bar0->adapter_control);
6082 	val64 &= ~ADAPTER_ECC_EN;
6083 	writeq(val64, &bar0->adapter_control);
6084 
6085 	val64 = readq(&bar0->mc_rldram_test_ctrl);
6086 	val64 |= MC_RLDRAM_TEST_MODE;
6087 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6088 
6089 	val64 = readq(&bar0->mc_rldram_mrs);
6090 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6091 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6092 
6093 	val64 |= MC_RLDRAM_MRS_ENABLE;
6094 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6095 
6096 	while (iteration < 2) {
6097 		val64 = 0x55555555aaaa0000ULL;
6098 		if (iteration == 1)
6099 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6100 		writeq(val64, &bar0->mc_rldram_test_d0);
6101 
6102 		val64 = 0xaaaa5a5555550000ULL;
6103 		if (iteration == 1)
6104 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6105 		writeq(val64, &bar0->mc_rldram_test_d1);
6106 
6107 		val64 = 0x55aaaaaaaa5a0000ULL;
6108 		if (iteration == 1)
6109 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6110 		writeq(val64, &bar0->mc_rldram_test_d2);
6111 
6112 		val64 = (u64) (0x0000003ffffe0100ULL);
6113 		writeq(val64, &bar0->mc_rldram_test_add);
6114 
6115 		val64 = MC_RLDRAM_TEST_MODE |
6116 			MC_RLDRAM_TEST_WRITE |
6117 			MC_RLDRAM_TEST_GO;
6118 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6119 
6120 		for (cnt = 0; cnt < 5; cnt++) {
6121 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6122 			if (val64 & MC_RLDRAM_TEST_DONE)
6123 				break;
6124 			msleep(200);
6125 		}
6126 
6127 		if (cnt == 5)
6128 			break;
6129 
6130 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6131 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6132 
6133 		for (cnt = 0; cnt < 5; cnt++) {
6134 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6135 			if (val64 & MC_RLDRAM_TEST_DONE)
6136 				break;
6137 			msleep(500);
6138 		}
6139 
6140 		if (cnt == 5)
6141 			break;
6142 
6143 		val64 = readq(&bar0->mc_rldram_test_ctrl);
6144 		if (!(val64 & MC_RLDRAM_TEST_PASS))
6145 			test_fail = 1;
6146 
6147 		iteration++;
6148 	}
6149 
6150 	*data = test_fail;
6151 
6152 	/* Bring the adapter out of test mode */
6153 	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6154 
6155 	return test_fail;
6156 }
6157 
6158 /**
6159  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6160  *  @sp : private member of the device structure, which is a pointer to the
6161  *  s2io_nic structure.
6162  *  @ethtest : pointer to a ethtool command specific structure that will be
6163  *  returned to the user.
6164  *  @data : variable that returns the result of each of the test
6165  * conducted by the driver.
6166  * Description:
6167  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6168  *  the health of the card.
6169  * Return value:
6170  *  void
6171  */
6172 
6173 static void s2io_ethtool_test(struct net_device *dev,
6174 			      struct ethtool_test *ethtest,
6175 			      uint64_t *data)
6176 {
6177 	struct s2io_nic *sp = netdev_priv(dev);
6178 	int orig_state = netif_running(sp->dev);
6179 
6180 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6181 		/* Offline Tests. */
6182 		if (orig_state)
6183 			s2io_close(sp->dev);
6184 
6185 		if (s2io_register_test(sp, &data[0]))
6186 			ethtest->flags |= ETH_TEST_FL_FAILED;
6187 
6188 		s2io_reset(sp);
6189 
6190 		if (s2io_rldram_test(sp, &data[3]))
6191 			ethtest->flags |= ETH_TEST_FL_FAILED;
6192 
6193 		s2io_reset(sp);
6194 
6195 		if (s2io_eeprom_test(sp, &data[1]))
6196 			ethtest->flags |= ETH_TEST_FL_FAILED;
6197 
6198 		if (s2io_bist_test(sp, &data[4]))
6199 			ethtest->flags |= ETH_TEST_FL_FAILED;
6200 
6201 		if (orig_state)
6202 			s2io_open(sp->dev);
6203 
6204 		data[2] = 0;
6205 	} else {
6206 		/* Online Tests. */
6207 		if (!orig_state) {
6208 			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6209 				  dev->name);
6210 			data[0] = -1;
6211 			data[1] = -1;
6212 			data[2] = -1;
6213 			data[3] = -1;
6214 			data[4] = -1;
6215 		}
6216 
6217 		if (s2io_link_test(sp, &data[2]))
6218 			ethtest->flags |= ETH_TEST_FL_FAILED;
6219 
6220 		data[0] = 0;
6221 		data[1] = 0;
6222 		data[3] = 0;
6223 		data[4] = 0;
6224 	}
6225 }
6226 
6227 static void s2io_get_ethtool_stats(struct net_device *dev,
6228 				   struct ethtool_stats *estats,
6229 				   u64 *tmp_stats)
6230 {
6231 	int i = 0, k;
6232 	struct s2io_nic *sp = netdev_priv(dev);
6233 	struct stat_block *stats = sp->mac_control.stats_info;
6234 	struct swStat *swstats = &stats->sw_stat;
6235 	struct xpakStat *xstats = &stats->xpak_stat;
6236 
6237 	s2io_updt_stats(sp);
6238 	tmp_stats[i++] =
6239 		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6240 		le32_to_cpu(stats->tmac_frms);
6241 	tmp_stats[i++] =
6242 		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6243 		le32_to_cpu(stats->tmac_data_octets);
6244 	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6245 	tmp_stats[i++] =
6246 		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6247 		le32_to_cpu(stats->tmac_mcst_frms);
6248 	tmp_stats[i++] =
6249 		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6250 		le32_to_cpu(stats->tmac_bcst_frms);
6251 	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6252 	tmp_stats[i++] =
6253 		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6254 		le32_to_cpu(stats->tmac_ttl_octets);
6255 	tmp_stats[i++] =
6256 		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6257 		le32_to_cpu(stats->tmac_ucst_frms);
6258 	tmp_stats[i++] =
6259 		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6260 		le32_to_cpu(stats->tmac_nucst_frms);
6261 	tmp_stats[i++] =
6262 		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6263 		le32_to_cpu(stats->tmac_any_err_frms);
6264 	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6265 	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6266 	tmp_stats[i++] =
6267 		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6268 		le32_to_cpu(stats->tmac_vld_ip);
6269 	tmp_stats[i++] =
6270 		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6271 		le32_to_cpu(stats->tmac_drop_ip);
6272 	tmp_stats[i++] =
6273 		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6274 		le32_to_cpu(stats->tmac_icmp);
6275 	tmp_stats[i++] =
6276 		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6277 		le32_to_cpu(stats->tmac_rst_tcp);
6278 	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6279 	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6280 		le32_to_cpu(stats->tmac_udp);
6281 	tmp_stats[i++] =
6282 		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6283 		le32_to_cpu(stats->rmac_vld_frms);
6284 	tmp_stats[i++] =
6285 		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6286 		le32_to_cpu(stats->rmac_data_octets);
6287 	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6288 	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6289 	tmp_stats[i++] =
6290 		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6291 		le32_to_cpu(stats->rmac_vld_mcst_frms);
6292 	tmp_stats[i++] =
6293 		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6294 		le32_to_cpu(stats->rmac_vld_bcst_frms);
6295 	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6296 	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6297 	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6298 	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6299 	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6300 	tmp_stats[i++] =
6301 		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6302 		le32_to_cpu(stats->rmac_ttl_octets);
6303 	tmp_stats[i++] =
6304 		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6305 		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6306 	tmp_stats[i++] =
6307 		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6308 		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6309 	tmp_stats[i++] =
6310 		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6311 		le32_to_cpu(stats->rmac_discarded_frms);
6312 	tmp_stats[i++] =
6313 		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6314 		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6315 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6316 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6317 	tmp_stats[i++] =
6318 		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6319 		le32_to_cpu(stats->rmac_usized_frms);
6320 	tmp_stats[i++] =
6321 		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6322 		le32_to_cpu(stats->rmac_osized_frms);
6323 	tmp_stats[i++] =
6324 		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6325 		le32_to_cpu(stats->rmac_frag_frms);
6326 	tmp_stats[i++] =
6327 		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6328 		le32_to_cpu(stats->rmac_jabber_frms);
6329 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6330 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6331 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6332 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6333 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6334 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6335 	tmp_stats[i++] =
6336 		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6337 		le32_to_cpu(stats->rmac_ip);
6338 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6339 	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6340 	tmp_stats[i++] =
6341 		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6342 		le32_to_cpu(stats->rmac_drop_ip);
6343 	tmp_stats[i++] =
6344 		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6345 		le32_to_cpu(stats->rmac_icmp);
6346 	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6347 	tmp_stats[i++] =
6348 		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6349 		le32_to_cpu(stats->rmac_udp);
6350 	tmp_stats[i++] =
6351 		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6352 		le32_to_cpu(stats->rmac_err_drp_udp);
6353 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6354 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6355 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6356 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6357 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6358 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6359 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6360 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6361 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6362 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6363 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6364 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6365 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6366 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6367 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6368 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6369 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6370 	tmp_stats[i++] =
6371 		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6372 		le32_to_cpu(stats->rmac_pause_cnt);
6373 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6374 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6375 	tmp_stats[i++] =
6376 		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6377 		le32_to_cpu(stats->rmac_accepted_ip);
6378 	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6379 	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6380 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6381 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6382 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6383 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6384 	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6385 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6386 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6387 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6388 	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6389 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6390 	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6391 	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6392 	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6393 	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6394 	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6395 	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6396 	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6397 
6398 	/* Enhanced statistics exist only for Hercules */
6399 	if (sp->device_type == XFRAME_II_DEVICE) {
6400 		tmp_stats[i++] =
6401 			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6402 		tmp_stats[i++] =
6403 			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6404 		tmp_stats[i++] =
6405 			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6406 		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6407 		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6408 		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6409 		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6410 		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6411 		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6412 		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6413 		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6414 		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6415 		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6416 		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6417 		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6418 		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6419 	}
6420 
6421 	tmp_stats[i++] = 0;
6422 	tmp_stats[i++] = swstats->single_ecc_errs;
6423 	tmp_stats[i++] = swstats->double_ecc_errs;
6424 	tmp_stats[i++] = swstats->parity_err_cnt;
6425 	tmp_stats[i++] = swstats->serious_err_cnt;
6426 	tmp_stats[i++] = swstats->soft_reset_cnt;
6427 	tmp_stats[i++] = swstats->fifo_full_cnt;
6428 	for (k = 0; k < MAX_RX_RINGS; k++)
6429 		tmp_stats[i++] = swstats->ring_full_cnt[k];
6430 	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6431 	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6432 	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6433 	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6434 	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6435 	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6436 	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6437 	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6438 	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6439 	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6440 	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6441 	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6442 	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6443 	tmp_stats[i++] = swstats->sending_both;
6444 	tmp_stats[i++] = swstats->outof_sequence_pkts;
6445 	tmp_stats[i++] = swstats->flush_max_pkts;
6446 	if (swstats->num_aggregations) {
6447 		u64 tmp = swstats->sum_avg_pkts_aggregated;
6448 		int count = 0;
6449 		/*
6450 		 * Since 64-bit divide does not work on all platforms,
6451 		 * do repeated subtraction.
6452 		 */
6453 		while (tmp >= swstats->num_aggregations) {
6454 			tmp -= swstats->num_aggregations;
6455 			count++;
6456 		}
6457 		tmp_stats[i++] = count;
6458 	} else
6459 		tmp_stats[i++] = 0;
6460 	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6461 	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6462 	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6463 	tmp_stats[i++] = swstats->mem_allocated;
6464 	tmp_stats[i++] = swstats->mem_freed;
6465 	tmp_stats[i++] = swstats->link_up_cnt;
6466 	tmp_stats[i++] = swstats->link_down_cnt;
6467 	tmp_stats[i++] = swstats->link_up_time;
6468 	tmp_stats[i++] = swstats->link_down_time;
6469 
6470 	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6471 	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6472 	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6473 	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6474 	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6475 
6476 	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6477 	tmp_stats[i++] = swstats->rx_abort_cnt;
6478 	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6479 	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6480 	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6481 	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6482 	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6483 	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6484 	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6485 	tmp_stats[i++] = swstats->tda_err_cnt;
6486 	tmp_stats[i++] = swstats->pfc_err_cnt;
6487 	tmp_stats[i++] = swstats->pcc_err_cnt;
6488 	tmp_stats[i++] = swstats->tti_err_cnt;
6489 	tmp_stats[i++] = swstats->tpa_err_cnt;
6490 	tmp_stats[i++] = swstats->sm_err_cnt;
6491 	tmp_stats[i++] = swstats->lso_err_cnt;
6492 	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6493 	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6494 	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6495 	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6496 	tmp_stats[i++] = swstats->rc_err_cnt;
6497 	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6498 	tmp_stats[i++] = swstats->rpa_err_cnt;
6499 	tmp_stats[i++] = swstats->rda_err_cnt;
6500 	tmp_stats[i++] = swstats->rti_err_cnt;
6501 	tmp_stats[i++] = swstats->mc_err_cnt;
6502 }
6503 
6504 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6505 {
6506 	return XENA_REG_SPACE;
6507 }
6508 
6509 
6510 static int s2io_get_eeprom_len(struct net_device *dev)
6511 {
6512 	return XENA_EEPROM_SPACE;
6513 }
6514 
6515 static int s2io_get_sset_count(struct net_device *dev, int sset)
6516 {
6517 	struct s2io_nic *sp = netdev_priv(dev);
6518 
6519 	switch (sset) {
6520 	case ETH_SS_TEST:
6521 		return S2IO_TEST_LEN;
6522 	case ETH_SS_STATS:
6523 		switch (sp->device_type) {
6524 		case XFRAME_I_DEVICE:
6525 			return XFRAME_I_STAT_LEN;
6526 		case XFRAME_II_DEVICE:
6527 			return XFRAME_II_STAT_LEN;
6528 		default:
6529 			return 0;
6530 		}
6531 	default:
6532 		return -EOPNOTSUPP;
6533 	}
6534 }
6535 
6536 static void s2io_ethtool_get_strings(struct net_device *dev,
6537 				     u32 stringset, u8 *data)
6538 {
6539 	int stat_size = 0;
6540 	struct s2io_nic *sp = netdev_priv(dev);
6541 
6542 	switch (stringset) {
6543 	case ETH_SS_TEST:
6544 		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6545 		break;
6546 	case ETH_SS_STATS:
6547 		stat_size = sizeof(ethtool_xena_stats_keys);
6548 		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6549 		if (sp->device_type == XFRAME_II_DEVICE) {
6550 			memcpy(data + stat_size,
6551 			       &ethtool_enhanced_stats_keys,
6552 			       sizeof(ethtool_enhanced_stats_keys));
6553 			stat_size += sizeof(ethtool_enhanced_stats_keys);
6554 		}
6555 
6556 		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6557 		       sizeof(ethtool_driver_stats_keys));
6558 	}
6559 }
6560 
6561 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6562 {
6563 	struct s2io_nic *sp = netdev_priv(dev);
6564 	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6565 
6566 	if (changed && netif_running(dev)) {
6567 		int rc;
6568 
6569 		s2io_stop_all_tx_queue(sp);
6570 		s2io_card_down(sp);
6571 		dev->features = features;
6572 		rc = s2io_card_up(sp);
6573 		if (rc)
6574 			s2io_reset(sp);
6575 		else
6576 			s2io_start_all_tx_queue(sp);
6577 
6578 		return rc ? rc : 1;
6579 	}
6580 
6581 	return 0;
6582 }
6583 
6584 static const struct ethtool_ops netdev_ethtool_ops = {
6585 	.get_drvinfo = s2io_ethtool_gdrvinfo,
6586 	.get_regs_len = s2io_ethtool_get_regs_len,
6587 	.get_regs = s2io_ethtool_gregs,
6588 	.get_link = ethtool_op_get_link,
6589 	.get_eeprom_len = s2io_get_eeprom_len,
6590 	.get_eeprom = s2io_ethtool_geeprom,
6591 	.set_eeprom = s2io_ethtool_seeprom,
6592 	.get_ringparam = s2io_ethtool_gringparam,
6593 	.get_pauseparam = s2io_ethtool_getpause_data,
6594 	.set_pauseparam = s2io_ethtool_setpause_data,
6595 	.self_test = s2io_ethtool_test,
6596 	.get_strings = s2io_ethtool_get_strings,
6597 	.set_phys_id = s2io_ethtool_set_led,
6598 	.get_ethtool_stats = s2io_get_ethtool_stats,
6599 	.get_sset_count = s2io_get_sset_count,
6600 	.get_link_ksettings = s2io_ethtool_get_link_ksettings,
6601 	.set_link_ksettings = s2io_ethtool_set_link_ksettings,
6602 };
6603 
6604 /**
6605  *  s2io_ioctl - Entry point for the Ioctl
6606  *  @dev :  Device pointer.
6607  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6608  *  a proprietary structure used to pass information to the driver.
6609  *  @cmd :  This is used to distinguish between the different commands that
6610  *  can be passed to the IOCTL functions.
6611  *  Description:
6612  *  Currently there are no special functionality supported in IOCTL, hence
6613  *  function always return EOPNOTSUPPORTED
6614  */
6615 
6616 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6617 {
6618 	return -EOPNOTSUPP;
6619 }
6620 
6621 /**
6622  *  s2io_change_mtu - entry point to change MTU size for the device.
6623  *   @dev : device pointer.
6624  *   @new_mtu : the new MTU size for the device.
6625  *   Description: A driver entry point to change MTU size for the device.
6626  *   Before changing the MTU the device must be stopped.
6627  *  Return value:
6628  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6629  *   file on failure.
6630  */
6631 
6632 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6633 {
6634 	struct s2io_nic *sp = netdev_priv(dev);
6635 	int ret = 0;
6636 
6637 	dev->mtu = new_mtu;
6638 	if (netif_running(dev)) {
6639 		s2io_stop_all_tx_queue(sp);
6640 		s2io_card_down(sp);
6641 		ret = s2io_card_up(sp);
6642 		if (ret) {
6643 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6644 				  __func__);
6645 			return ret;
6646 		}
6647 		s2io_wake_all_tx_queue(sp);
6648 	} else { /* Device is down */
6649 		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6650 		u64 val64 = new_mtu;
6651 
6652 		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6653 	}
6654 
6655 	return ret;
6656 }
6657 
6658 /**
6659  * s2io_set_link - Set the LInk status
6660  * @data: long pointer to device private structue
6661  * Description: Sets the link status for the adapter
6662  */
6663 
6664 static void s2io_set_link(struct work_struct *work)
6665 {
6666 	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6667 					    set_link_task);
6668 	struct net_device *dev = nic->dev;
6669 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6670 	register u64 val64;
6671 	u16 subid;
6672 
6673 	rtnl_lock();
6674 
6675 	if (!netif_running(dev))
6676 		goto out_unlock;
6677 
6678 	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6679 		/* The card is being reset, no point doing anything */
6680 		goto out_unlock;
6681 	}
6682 
6683 	subid = nic->pdev->subsystem_device;
6684 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6685 		/*
6686 		 * Allow a small delay for the NICs self initiated
6687 		 * cleanup to complete.
6688 		 */
6689 		msleep(100);
6690 	}
6691 
6692 	val64 = readq(&bar0->adapter_status);
6693 	if (LINK_IS_UP(val64)) {
6694 		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6695 			if (verify_xena_quiescence(nic)) {
6696 				val64 = readq(&bar0->adapter_control);
6697 				val64 |= ADAPTER_CNTL_EN;
6698 				writeq(val64, &bar0->adapter_control);
6699 				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6700 					    nic->device_type, subid)) {
6701 					val64 = readq(&bar0->gpio_control);
6702 					val64 |= GPIO_CTRL_GPIO_0;
6703 					writeq(val64, &bar0->gpio_control);
6704 					val64 = readq(&bar0->gpio_control);
6705 				} else {
6706 					val64 |= ADAPTER_LED_ON;
6707 					writeq(val64, &bar0->adapter_control);
6708 				}
6709 				nic->device_enabled_once = true;
6710 			} else {
6711 				DBG_PRINT(ERR_DBG,
6712 					  "%s: Error: device is not Quiescent\n",
6713 					  dev->name);
6714 				s2io_stop_all_tx_queue(nic);
6715 			}
6716 		}
6717 		val64 = readq(&bar0->adapter_control);
6718 		val64 |= ADAPTER_LED_ON;
6719 		writeq(val64, &bar0->adapter_control);
6720 		s2io_link(nic, LINK_UP);
6721 	} else {
6722 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6723 						      subid)) {
6724 			val64 = readq(&bar0->gpio_control);
6725 			val64 &= ~GPIO_CTRL_GPIO_0;
6726 			writeq(val64, &bar0->gpio_control);
6727 			val64 = readq(&bar0->gpio_control);
6728 		}
6729 		/* turn off LED */
6730 		val64 = readq(&bar0->adapter_control);
6731 		val64 = val64 & (~ADAPTER_LED_ON);
6732 		writeq(val64, &bar0->adapter_control);
6733 		s2io_link(nic, LINK_DOWN);
6734 	}
6735 	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6736 
6737 out_unlock:
6738 	rtnl_unlock();
6739 }
6740 
6741 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6742 				  struct buffAdd *ba,
6743 				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6744 				  u64 *temp2, int size)
6745 {
6746 	struct net_device *dev = sp->dev;
6747 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6748 
6749 	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6750 		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6751 		/* allocate skb */
6752 		if (*skb) {
6753 			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6754 			/*
6755 			 * As Rx frame are not going to be processed,
6756 			 * using same mapped address for the Rxd
6757 			 * buffer pointer
6758 			 */
6759 			rxdp1->Buffer0_ptr = *temp0;
6760 		} else {
6761 			*skb = netdev_alloc_skb(dev, size);
6762 			if (!(*skb)) {
6763 				DBG_PRINT(INFO_DBG,
6764 					  "%s: Out of memory to allocate %s\n",
6765 					  dev->name, "1 buf mode SKBs");
6766 				stats->mem_alloc_fail_cnt++;
6767 				return -ENOMEM ;
6768 			}
6769 			stats->mem_allocated += (*skb)->truesize;
6770 			/* storing the mapped addr in a temp variable
6771 			 * such it will be used for next rxd whose
6772 			 * Host Control is NULL
6773 			 */
6774 			rxdp1->Buffer0_ptr = *temp0 =
6775 				pci_map_single(sp->pdev, (*skb)->data,
6776 					       size - NET_IP_ALIGN,
6777 					       PCI_DMA_FROMDEVICE);
6778 			if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6779 				goto memalloc_failed;
6780 			rxdp->Host_Control = (unsigned long) (*skb);
6781 		}
6782 	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6783 		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6784 		/* Two buffer Mode */
6785 		if (*skb) {
6786 			rxdp3->Buffer2_ptr = *temp2;
6787 			rxdp3->Buffer0_ptr = *temp0;
6788 			rxdp3->Buffer1_ptr = *temp1;
6789 		} else {
6790 			*skb = netdev_alloc_skb(dev, size);
6791 			if (!(*skb)) {
6792 				DBG_PRINT(INFO_DBG,
6793 					  "%s: Out of memory to allocate %s\n",
6794 					  dev->name,
6795 					  "2 buf mode SKBs");
6796 				stats->mem_alloc_fail_cnt++;
6797 				return -ENOMEM;
6798 			}
6799 			stats->mem_allocated += (*skb)->truesize;
6800 			rxdp3->Buffer2_ptr = *temp2 =
6801 				pci_map_single(sp->pdev, (*skb)->data,
6802 					       dev->mtu + 4,
6803 					       PCI_DMA_FROMDEVICE);
6804 			if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6805 				goto memalloc_failed;
6806 			rxdp3->Buffer0_ptr = *temp0 =
6807 				pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6808 					       PCI_DMA_FROMDEVICE);
6809 			if (pci_dma_mapping_error(sp->pdev,
6810 						  rxdp3->Buffer0_ptr)) {
6811 				pci_unmap_single(sp->pdev,
6812 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6813 						 dev->mtu + 4,
6814 						 PCI_DMA_FROMDEVICE);
6815 				goto memalloc_failed;
6816 			}
6817 			rxdp->Host_Control = (unsigned long) (*skb);
6818 
6819 			/* Buffer-1 will be dummy buffer not used */
6820 			rxdp3->Buffer1_ptr = *temp1 =
6821 				pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6822 					       PCI_DMA_FROMDEVICE);
6823 			if (pci_dma_mapping_error(sp->pdev,
6824 						  rxdp3->Buffer1_ptr)) {
6825 				pci_unmap_single(sp->pdev,
6826 						 (dma_addr_t)rxdp3->Buffer0_ptr,
6827 						 BUF0_LEN, PCI_DMA_FROMDEVICE);
6828 				pci_unmap_single(sp->pdev,
6829 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6830 						 dev->mtu + 4,
6831 						 PCI_DMA_FROMDEVICE);
6832 				goto memalloc_failed;
6833 			}
6834 		}
6835 	}
6836 	return 0;
6837 
6838 memalloc_failed:
6839 	stats->pci_map_fail_cnt++;
6840 	stats->mem_freed += (*skb)->truesize;
6841 	dev_kfree_skb(*skb);
6842 	return -ENOMEM;
6843 }
6844 
6845 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6846 				int size)
6847 {
6848 	struct net_device *dev = sp->dev;
6849 	if (sp->rxd_mode == RXD_MODE_1) {
6850 		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6851 	} else if (sp->rxd_mode == RXD_MODE_3B) {
6852 		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6853 		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6854 		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6855 	}
6856 }
6857 
6858 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6859 {
6860 	int i, j, k, blk_cnt = 0, size;
6861 	struct config_param *config = &sp->config;
6862 	struct mac_info *mac_control = &sp->mac_control;
6863 	struct net_device *dev = sp->dev;
6864 	struct RxD_t *rxdp = NULL;
6865 	struct sk_buff *skb = NULL;
6866 	struct buffAdd *ba = NULL;
6867 	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6868 
6869 	/* Calculate the size based on ring mode */
6870 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6871 		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6872 	if (sp->rxd_mode == RXD_MODE_1)
6873 		size += NET_IP_ALIGN;
6874 	else if (sp->rxd_mode == RXD_MODE_3B)
6875 		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6876 
6877 	for (i = 0; i < config->rx_ring_num; i++) {
6878 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6879 		struct ring_info *ring = &mac_control->rings[i];
6880 
6881 		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6882 
6883 		for (j = 0; j < blk_cnt; j++) {
6884 			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6885 				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6886 				if (sp->rxd_mode == RXD_MODE_3B)
6887 					ba = &ring->ba[j][k];
6888 				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6889 							   &temp0_64,
6890 							   &temp1_64,
6891 							   &temp2_64,
6892 							   size) == -ENOMEM) {
6893 					return 0;
6894 				}
6895 
6896 				set_rxd_buffer_size(sp, rxdp, size);
6897 				dma_wmb();
6898 				/* flip the Ownership bit to Hardware */
6899 				rxdp->Control_1 |= RXD_OWN_XENA;
6900 			}
6901 		}
6902 	}
6903 	return 0;
6904 
6905 }
6906 
6907 static int s2io_add_isr(struct s2io_nic *sp)
6908 {
6909 	int ret = 0;
6910 	struct net_device *dev = sp->dev;
6911 	int err = 0;
6912 
6913 	if (sp->config.intr_type == MSI_X)
6914 		ret = s2io_enable_msi_x(sp);
6915 	if (ret) {
6916 		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6917 		sp->config.intr_type = INTA;
6918 	}
6919 
6920 	/*
6921 	 * Store the values of the MSIX table in
6922 	 * the struct s2io_nic structure
6923 	 */
6924 	store_xmsi_data(sp);
6925 
6926 	/* After proper initialization of H/W, register ISR */
6927 	if (sp->config.intr_type == MSI_X) {
6928 		int i, msix_rx_cnt = 0;
6929 
6930 		for (i = 0; i < sp->num_entries; i++) {
6931 			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6932 				if (sp->s2io_entries[i].type ==
6933 				    MSIX_RING_TYPE) {
6934 					snprintf(sp->desc[i],
6935 						sizeof(sp->desc[i]),
6936 						"%s:MSI-X-%d-RX",
6937 						dev->name, i);
6938 					err = request_irq(sp->entries[i].vector,
6939 							  s2io_msix_ring_handle,
6940 							  0,
6941 							  sp->desc[i],
6942 							  sp->s2io_entries[i].arg);
6943 				} else if (sp->s2io_entries[i].type ==
6944 					   MSIX_ALARM_TYPE) {
6945 					snprintf(sp->desc[i],
6946 						sizeof(sp->desc[i]),
6947 						"%s:MSI-X-%d-TX",
6948 						dev->name, i);
6949 					err = request_irq(sp->entries[i].vector,
6950 							  s2io_msix_fifo_handle,
6951 							  0,
6952 							  sp->desc[i],
6953 							  sp->s2io_entries[i].arg);
6954 
6955 				}
6956 				/* if either data or addr is zero print it. */
6957 				if (!(sp->msix_info[i].addr &&
6958 				      sp->msix_info[i].data)) {
6959 					DBG_PRINT(ERR_DBG,
6960 						  "%s @Addr:0x%llx Data:0x%llx\n",
6961 						  sp->desc[i],
6962 						  (unsigned long long)
6963 						  sp->msix_info[i].addr,
6964 						  (unsigned long long)
6965 						  ntohl(sp->msix_info[i].data));
6966 				} else
6967 					msix_rx_cnt++;
6968 				if (err) {
6969 					remove_msix_isr(sp);
6970 
6971 					DBG_PRINT(ERR_DBG,
6972 						  "%s:MSI-X-%d registration "
6973 						  "failed\n", dev->name, i);
6974 
6975 					DBG_PRINT(ERR_DBG,
6976 						  "%s: Defaulting to INTA\n",
6977 						  dev->name);
6978 					sp->config.intr_type = INTA;
6979 					break;
6980 				}
6981 				sp->s2io_entries[i].in_use =
6982 					MSIX_REGISTERED_SUCCESS;
6983 			}
6984 		}
6985 		if (!err) {
6986 			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6987 			DBG_PRINT(INFO_DBG,
6988 				  "MSI-X-TX entries enabled through alarm vector\n");
6989 		}
6990 	}
6991 	if (sp->config.intr_type == INTA) {
6992 		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6993 				  sp->name, dev);
6994 		if (err) {
6995 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6996 				  dev->name);
6997 			return -1;
6998 		}
6999 	}
7000 	return 0;
7001 }
7002 
7003 static void s2io_rem_isr(struct s2io_nic *sp)
7004 {
7005 	if (sp->config.intr_type == MSI_X)
7006 		remove_msix_isr(sp);
7007 	else
7008 		remove_inta_isr(sp);
7009 }
7010 
7011 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7012 {
7013 	int cnt = 0;
7014 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7015 	register u64 val64 = 0;
7016 	struct config_param *config;
7017 	config = &sp->config;
7018 
7019 	if (!is_s2io_card_up(sp))
7020 		return;
7021 
7022 	del_timer_sync(&sp->alarm_timer);
7023 	/* If s2io_set_link task is executing, wait till it completes. */
7024 	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7025 		msleep(50);
7026 	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7027 
7028 	/* Disable napi */
7029 	if (sp->config.napi) {
7030 		int off = 0;
7031 		if (config->intr_type ==  MSI_X) {
7032 			for (; off < sp->config.rx_ring_num; off++)
7033 				napi_disable(&sp->mac_control.rings[off].napi);
7034 		}
7035 		else
7036 			napi_disable(&sp->napi);
7037 	}
7038 
7039 	/* disable Tx and Rx traffic on the NIC */
7040 	if (do_io)
7041 		stop_nic(sp);
7042 
7043 	s2io_rem_isr(sp);
7044 
7045 	/* stop the tx queue, indicate link down */
7046 	s2io_link(sp, LINK_DOWN);
7047 
7048 	/* Check if the device is Quiescent and then Reset the NIC */
7049 	while (do_io) {
7050 		/* As per the HW requirement we need to replenish the
7051 		 * receive buffer to avoid the ring bump. Since there is
7052 		 * no intention of processing the Rx frame at this pointwe are
7053 		 * just setting the ownership bit of rxd in Each Rx
7054 		 * ring to HW and set the appropriate buffer size
7055 		 * based on the ring mode
7056 		 */
7057 		rxd_owner_bit_reset(sp);
7058 
7059 		val64 = readq(&bar0->adapter_status);
7060 		if (verify_xena_quiescence(sp)) {
7061 			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7062 				break;
7063 		}
7064 
7065 		msleep(50);
7066 		cnt++;
7067 		if (cnt == 10) {
7068 			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7069 				  "adapter status reads 0x%llx\n",
7070 				  (unsigned long long)val64);
7071 			break;
7072 		}
7073 	}
7074 	if (do_io)
7075 		s2io_reset(sp);
7076 
7077 	/* Free all Tx buffers */
7078 	free_tx_buffers(sp);
7079 
7080 	/* Free all Rx buffers */
7081 	free_rx_buffers(sp);
7082 
7083 	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7084 }
7085 
7086 static void s2io_card_down(struct s2io_nic *sp)
7087 {
7088 	do_s2io_card_down(sp, 1);
7089 }
7090 
7091 static int s2io_card_up(struct s2io_nic *sp)
7092 {
7093 	int i, ret = 0;
7094 	struct config_param *config;
7095 	struct mac_info *mac_control;
7096 	struct net_device *dev = sp->dev;
7097 	u16 interruptible;
7098 
7099 	/* Initialize the H/W I/O registers */
7100 	ret = init_nic(sp);
7101 	if (ret != 0) {
7102 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7103 			  dev->name);
7104 		if (ret != -EIO)
7105 			s2io_reset(sp);
7106 		return ret;
7107 	}
7108 
7109 	/*
7110 	 * Initializing the Rx buffers. For now we are considering only 1
7111 	 * Rx ring and initializing buffers into 30 Rx blocks
7112 	 */
7113 	config = &sp->config;
7114 	mac_control = &sp->mac_control;
7115 
7116 	for (i = 0; i < config->rx_ring_num; i++) {
7117 		struct ring_info *ring = &mac_control->rings[i];
7118 
7119 		ring->mtu = dev->mtu;
7120 		ring->lro = !!(dev->features & NETIF_F_LRO);
7121 		ret = fill_rx_buffers(sp, ring, 1);
7122 		if (ret) {
7123 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7124 				  dev->name);
7125 			s2io_reset(sp);
7126 			free_rx_buffers(sp);
7127 			return -ENOMEM;
7128 		}
7129 		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7130 			  ring->rx_bufs_left);
7131 	}
7132 
7133 	/* Initialise napi */
7134 	if (config->napi) {
7135 		if (config->intr_type ==  MSI_X) {
7136 			for (i = 0; i < sp->config.rx_ring_num; i++)
7137 				napi_enable(&sp->mac_control.rings[i].napi);
7138 		} else {
7139 			napi_enable(&sp->napi);
7140 		}
7141 	}
7142 
7143 	/* Maintain the state prior to the open */
7144 	if (sp->promisc_flg)
7145 		sp->promisc_flg = 0;
7146 	if (sp->m_cast_flg) {
7147 		sp->m_cast_flg = 0;
7148 		sp->all_multi_pos = 0;
7149 	}
7150 
7151 	/* Setting its receive mode */
7152 	s2io_set_multicast(dev);
7153 
7154 	if (dev->features & NETIF_F_LRO) {
7155 		/* Initialize max aggregatable pkts per session based on MTU */
7156 		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7157 		/* Check if we can use (if specified) user provided value */
7158 		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7159 			sp->lro_max_aggr_per_sess = lro_max_pkts;
7160 	}
7161 
7162 	/* Enable Rx Traffic and interrupts on the NIC */
7163 	if (start_nic(sp)) {
7164 		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7165 		s2io_reset(sp);
7166 		free_rx_buffers(sp);
7167 		return -ENODEV;
7168 	}
7169 
7170 	/* Add interrupt service routine */
7171 	if (s2io_add_isr(sp) != 0) {
7172 		if (sp->config.intr_type == MSI_X)
7173 			s2io_rem_isr(sp);
7174 		s2io_reset(sp);
7175 		free_rx_buffers(sp);
7176 		return -ENODEV;
7177 	}
7178 
7179 	timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7180 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7181 
7182 	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7183 
7184 	/*  Enable select interrupts */
7185 	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7186 	if (sp->config.intr_type != INTA) {
7187 		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7188 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7189 	} else {
7190 		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7191 		interruptible |= TX_PIC_INTR;
7192 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7193 	}
7194 
7195 	return 0;
7196 }
7197 
7198 /**
7199  * s2io_restart_nic - Resets the NIC.
7200  * @data : long pointer to the device private structure
7201  * Description:
7202  * This function is scheduled to be run by the s2io_tx_watchdog
7203  * function after 0.5 secs to reset the NIC. The idea is to reduce
7204  * the run time of the watch dog routine which is run holding a
7205  * spin lock.
7206  */
7207 
7208 static void s2io_restart_nic(struct work_struct *work)
7209 {
7210 	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7211 	struct net_device *dev = sp->dev;
7212 
7213 	rtnl_lock();
7214 
7215 	if (!netif_running(dev))
7216 		goto out_unlock;
7217 
7218 	s2io_card_down(sp);
7219 	if (s2io_card_up(sp)) {
7220 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7221 	}
7222 	s2io_wake_all_tx_queue(sp);
7223 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7224 out_unlock:
7225 	rtnl_unlock();
7226 }
7227 
7228 /**
7229  *  s2io_tx_watchdog - Watchdog for transmit side.
7230  *  @dev : Pointer to net device structure
7231  *  Description:
7232  *  This function is triggered if the Tx Queue is stopped
7233  *  for a pre-defined amount of time when the Interface is still up.
7234  *  If the Interface is jammed in such a situation, the hardware is
7235  *  reset (by s2io_close) and restarted again (by s2io_open) to
7236  *  overcome any problem that might have been caused in the hardware.
7237  *  Return value:
7238  *  void
7239  */
7240 
7241 static void s2io_tx_watchdog(struct net_device *dev)
7242 {
7243 	struct s2io_nic *sp = netdev_priv(dev);
7244 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7245 
7246 	if (netif_carrier_ok(dev)) {
7247 		swstats->watchdog_timer_cnt++;
7248 		schedule_work(&sp->rst_timer_task);
7249 		swstats->soft_reset_cnt++;
7250 	}
7251 }
7252 
7253 /**
7254  *   rx_osm_handler - To perform some OS related operations on SKB.
7255  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7256  *   @skb : the socket buffer pointer.
7257  *   @len : length of the packet
7258  *   @cksum : FCS checksum of the frame.
7259  *   @ring_no : the ring from which this RxD was extracted.
7260  *   Description:
7261  *   This function is called by the Rx interrupt serivce routine to perform
7262  *   some OS related operations on the SKB before passing it to the upper
7263  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7264  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7265  *   to the upper layer. If the checksum is wrong, it increments the Rx
7266  *   packet error count, frees the SKB and returns error.
7267  *   Return value:
7268  *   SUCCESS on success and -1 on failure.
7269  */
7270 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7271 {
7272 	struct s2io_nic *sp = ring_data->nic;
7273 	struct net_device *dev = ring_data->dev;
7274 	struct sk_buff *skb = (struct sk_buff *)
7275 		((unsigned long)rxdp->Host_Control);
7276 	int ring_no = ring_data->ring_no;
7277 	u16 l3_csum, l4_csum;
7278 	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7279 	struct lro *uninitialized_var(lro);
7280 	u8 err_mask;
7281 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7282 
7283 	skb->dev = dev;
7284 
7285 	if (err) {
7286 		/* Check for parity error */
7287 		if (err & 0x1)
7288 			swstats->parity_err_cnt++;
7289 
7290 		err_mask = err >> 48;
7291 		switch (err_mask) {
7292 		case 1:
7293 			swstats->rx_parity_err_cnt++;
7294 			break;
7295 
7296 		case 2:
7297 			swstats->rx_abort_cnt++;
7298 			break;
7299 
7300 		case 3:
7301 			swstats->rx_parity_abort_cnt++;
7302 			break;
7303 
7304 		case 4:
7305 			swstats->rx_rda_fail_cnt++;
7306 			break;
7307 
7308 		case 5:
7309 			swstats->rx_unkn_prot_cnt++;
7310 			break;
7311 
7312 		case 6:
7313 			swstats->rx_fcs_err_cnt++;
7314 			break;
7315 
7316 		case 7:
7317 			swstats->rx_buf_size_err_cnt++;
7318 			break;
7319 
7320 		case 8:
7321 			swstats->rx_rxd_corrupt_cnt++;
7322 			break;
7323 
7324 		case 15:
7325 			swstats->rx_unkn_err_cnt++;
7326 			break;
7327 		}
7328 		/*
7329 		 * Drop the packet if bad transfer code. Exception being
7330 		 * 0x5, which could be due to unsupported IPv6 extension header.
7331 		 * In this case, we let stack handle the packet.
7332 		 * Note that in this case, since checksum will be incorrect,
7333 		 * stack will validate the same.
7334 		 */
7335 		if (err_mask != 0x5) {
7336 			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7337 				  dev->name, err_mask);
7338 			dev->stats.rx_crc_errors++;
7339 			swstats->mem_freed
7340 				+= skb->truesize;
7341 			dev_kfree_skb(skb);
7342 			ring_data->rx_bufs_left -= 1;
7343 			rxdp->Host_Control = 0;
7344 			return 0;
7345 		}
7346 	}
7347 
7348 	rxdp->Host_Control = 0;
7349 	if (sp->rxd_mode == RXD_MODE_1) {
7350 		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7351 
7352 		skb_put(skb, len);
7353 	} else if (sp->rxd_mode == RXD_MODE_3B) {
7354 		int get_block = ring_data->rx_curr_get_info.block_index;
7355 		int get_off = ring_data->rx_curr_get_info.offset;
7356 		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7357 		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7358 		unsigned char *buff = skb_push(skb, buf0_len);
7359 
7360 		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7361 		memcpy(buff, ba->ba_0, buf0_len);
7362 		skb_put(skb, buf2_len);
7363 	}
7364 
7365 	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7366 	    ((!ring_data->lro) ||
7367 	     (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7368 	    (dev->features & NETIF_F_RXCSUM)) {
7369 		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7370 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7371 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7372 			/*
7373 			 * NIC verifies if the Checksum of the received
7374 			 * frame is Ok or not and accordingly returns
7375 			 * a flag in the RxD.
7376 			 */
7377 			skb->ip_summed = CHECKSUM_UNNECESSARY;
7378 			if (ring_data->lro) {
7379 				u32 tcp_len = 0;
7380 				u8 *tcp;
7381 				int ret = 0;
7382 
7383 				ret = s2io_club_tcp_session(ring_data,
7384 							    skb->data, &tcp,
7385 							    &tcp_len, &lro,
7386 							    rxdp, sp);
7387 				switch (ret) {
7388 				case 3: /* Begin anew */
7389 					lro->parent = skb;
7390 					goto aggregate;
7391 				case 1: /* Aggregate */
7392 					lro_append_pkt(sp, lro, skb, tcp_len);
7393 					goto aggregate;
7394 				case 4: /* Flush session */
7395 					lro_append_pkt(sp, lro, skb, tcp_len);
7396 					queue_rx_frame(lro->parent,
7397 						       lro->vlan_tag);
7398 					clear_lro_session(lro);
7399 					swstats->flush_max_pkts++;
7400 					goto aggregate;
7401 				case 2: /* Flush both */
7402 					lro->parent->data_len = lro->frags_len;
7403 					swstats->sending_both++;
7404 					queue_rx_frame(lro->parent,
7405 						       lro->vlan_tag);
7406 					clear_lro_session(lro);
7407 					goto send_up;
7408 				case 0: /* sessions exceeded */
7409 				case -1: /* non-TCP or not L2 aggregatable */
7410 				case 5: /*
7411 					 * First pkt in session not
7412 					 * L3/L4 aggregatable
7413 					 */
7414 					break;
7415 				default:
7416 					DBG_PRINT(ERR_DBG,
7417 						  "%s: Samadhana!!\n",
7418 						  __func__);
7419 					BUG();
7420 				}
7421 			}
7422 		} else {
7423 			/*
7424 			 * Packet with erroneous checksum, let the
7425 			 * upper layers deal with it.
7426 			 */
7427 			skb_checksum_none_assert(skb);
7428 		}
7429 	} else
7430 		skb_checksum_none_assert(skb);
7431 
7432 	swstats->mem_freed += skb->truesize;
7433 send_up:
7434 	skb_record_rx_queue(skb, ring_no);
7435 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7436 aggregate:
7437 	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7438 	return SUCCESS;
7439 }
7440 
7441 /**
7442  *  s2io_link - stops/starts the Tx queue.
7443  *  @sp : private member of the device structure, which is a pointer to the
7444  *  s2io_nic structure.
7445  *  @link : inidicates whether link is UP/DOWN.
7446  *  Description:
7447  *  This function stops/starts the Tx queue depending on whether the link
7448  *  status of the NIC is is down or up. This is called by the Alarm
7449  *  interrupt handler whenever a link change interrupt comes up.
7450  *  Return value:
7451  *  void.
7452  */
7453 
7454 static void s2io_link(struct s2io_nic *sp, int link)
7455 {
7456 	struct net_device *dev = sp->dev;
7457 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7458 
7459 	if (link != sp->last_link_state) {
7460 		init_tti(sp, link);
7461 		if (link == LINK_DOWN) {
7462 			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7463 			s2io_stop_all_tx_queue(sp);
7464 			netif_carrier_off(dev);
7465 			if (swstats->link_up_cnt)
7466 				swstats->link_up_time =
7467 					jiffies - sp->start_time;
7468 			swstats->link_down_cnt++;
7469 		} else {
7470 			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7471 			if (swstats->link_down_cnt)
7472 				swstats->link_down_time =
7473 					jiffies - sp->start_time;
7474 			swstats->link_up_cnt++;
7475 			netif_carrier_on(dev);
7476 			s2io_wake_all_tx_queue(sp);
7477 		}
7478 	}
7479 	sp->last_link_state = link;
7480 	sp->start_time = jiffies;
7481 }
7482 
7483 /**
7484  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7485  *  @sp : private member of the device structure, which is a pointer to the
7486  *  s2io_nic structure.
7487  *  Description:
7488  *  This function initializes a few of the PCI and PCI-X configuration registers
7489  *  with recommended values.
7490  *  Return value:
7491  *  void
7492  */
7493 
7494 static void s2io_init_pci(struct s2io_nic *sp)
7495 {
7496 	u16 pci_cmd = 0, pcix_cmd = 0;
7497 
7498 	/* Enable Data Parity Error Recovery in PCI-X command register. */
7499 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7500 			     &(pcix_cmd));
7501 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7502 			      (pcix_cmd | 1));
7503 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7504 			     &(pcix_cmd));
7505 
7506 	/* Set the PErr Response bit in PCI command register. */
7507 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7508 	pci_write_config_word(sp->pdev, PCI_COMMAND,
7509 			      (pci_cmd | PCI_COMMAND_PARITY));
7510 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7511 }
7512 
7513 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7514 			    u8 *dev_multiq)
7515 {
7516 	int i;
7517 
7518 	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7519 		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7520 			  "(%d) not supported\n", tx_fifo_num);
7521 
7522 		if (tx_fifo_num < 1)
7523 			tx_fifo_num = 1;
7524 		else
7525 			tx_fifo_num = MAX_TX_FIFOS;
7526 
7527 		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7528 	}
7529 
7530 	if (multiq)
7531 		*dev_multiq = multiq;
7532 
7533 	if (tx_steering_type && (1 == tx_fifo_num)) {
7534 		if (tx_steering_type != TX_DEFAULT_STEERING)
7535 			DBG_PRINT(ERR_DBG,
7536 				  "Tx steering is not supported with "
7537 				  "one fifo. Disabling Tx steering.\n");
7538 		tx_steering_type = NO_STEERING;
7539 	}
7540 
7541 	if ((tx_steering_type < NO_STEERING) ||
7542 	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7543 		DBG_PRINT(ERR_DBG,
7544 			  "Requested transmit steering not supported\n");
7545 		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7546 		tx_steering_type = NO_STEERING;
7547 	}
7548 
7549 	if (rx_ring_num > MAX_RX_RINGS) {
7550 		DBG_PRINT(ERR_DBG,
7551 			  "Requested number of rx rings not supported\n");
7552 		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7553 			  MAX_RX_RINGS);
7554 		rx_ring_num = MAX_RX_RINGS;
7555 	}
7556 
7557 	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7558 		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7559 			  "Defaulting to INTA\n");
7560 		*dev_intr_type = INTA;
7561 	}
7562 
7563 	if ((*dev_intr_type == MSI_X) &&
7564 	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7565 	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7566 		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7567 			  "Defaulting to INTA\n");
7568 		*dev_intr_type = INTA;
7569 	}
7570 
7571 	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7572 		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7573 		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7574 		rx_ring_mode = 1;
7575 	}
7576 
7577 	for (i = 0; i < MAX_RX_RINGS; i++)
7578 		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7579 			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7580 				  "supported\nDefaulting to %d\n",
7581 				  MAX_RX_BLOCKS_PER_RING);
7582 			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7583 		}
7584 
7585 	return SUCCESS;
7586 }
7587 
7588 /**
7589  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7590  * or Traffic class respectively.
7591  * @nic: device private variable
7592  * Description: The function configures the receive steering to
7593  * desired receive ring.
7594  * Return Value:  SUCCESS on success and
7595  * '-1' on failure (endian settings incorrect).
7596  */
7597 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7598 {
7599 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7600 	register u64 val64 = 0;
7601 
7602 	if (ds_codepoint > 63)
7603 		return FAILURE;
7604 
7605 	val64 = RTS_DS_MEM_DATA(ring);
7606 	writeq(val64, &bar0->rts_ds_mem_data);
7607 
7608 	val64 = RTS_DS_MEM_CTRL_WE |
7609 		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7610 		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7611 
7612 	writeq(val64, &bar0->rts_ds_mem_ctrl);
7613 
7614 	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7615 				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7616 				     S2IO_BIT_RESET);
7617 }
7618 
7619 static const struct net_device_ops s2io_netdev_ops = {
7620 	.ndo_open	        = s2io_open,
7621 	.ndo_stop	        = s2io_close,
7622 	.ndo_get_stats	        = s2io_get_stats,
7623 	.ndo_start_xmit    	= s2io_xmit,
7624 	.ndo_validate_addr	= eth_validate_addr,
7625 	.ndo_set_rx_mode	= s2io_set_multicast,
7626 	.ndo_do_ioctl	   	= s2io_ioctl,
7627 	.ndo_set_mac_address    = s2io_set_mac_addr,
7628 	.ndo_change_mtu	   	= s2io_change_mtu,
7629 	.ndo_set_features	= s2io_set_features,
7630 	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7631 #ifdef CONFIG_NET_POLL_CONTROLLER
7632 	.ndo_poll_controller    = s2io_netpoll,
7633 #endif
7634 };
7635 
7636 /**
7637  *  s2io_init_nic - Initialization of the adapter .
7638  *  @pdev : structure containing the PCI related information of the device.
7639  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7640  *  Description:
7641  *  The function initializes an adapter identified by the pci_dec structure.
7642  *  All OS related initialization including memory and device structure and
7643  *  initlaization of the device private variable is done. Also the swapper
7644  *  control register is initialized to enable read and write into the I/O
7645  *  registers of the device.
7646  *  Return value:
7647  *  returns 0 on success and negative on failure.
7648  */
7649 
7650 static int
7651 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7652 {
7653 	struct s2io_nic *sp;
7654 	struct net_device *dev;
7655 	int i, j, ret;
7656 	int dma_flag = false;
7657 	u32 mac_up, mac_down;
7658 	u64 val64 = 0, tmp64 = 0;
7659 	struct XENA_dev_config __iomem *bar0 = NULL;
7660 	u16 subid;
7661 	struct config_param *config;
7662 	struct mac_info *mac_control;
7663 	int mode;
7664 	u8 dev_intr_type = intr_type;
7665 	u8 dev_multiq = 0;
7666 
7667 	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7668 	if (ret)
7669 		return ret;
7670 
7671 	ret = pci_enable_device(pdev);
7672 	if (ret) {
7673 		DBG_PRINT(ERR_DBG,
7674 			  "%s: pci_enable_device failed\n", __func__);
7675 		return ret;
7676 	}
7677 
7678 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7679 		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7680 		dma_flag = true;
7681 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7682 			DBG_PRINT(ERR_DBG,
7683 				  "Unable to obtain 64bit DMA "
7684 				  "for consistent allocations\n");
7685 			pci_disable_device(pdev);
7686 			return -ENOMEM;
7687 		}
7688 	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7689 		DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7690 	} else {
7691 		pci_disable_device(pdev);
7692 		return -ENOMEM;
7693 	}
7694 	ret = pci_request_regions(pdev, s2io_driver_name);
7695 	if (ret) {
7696 		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7697 			  __func__, ret);
7698 		pci_disable_device(pdev);
7699 		return -ENODEV;
7700 	}
7701 	if (dev_multiq)
7702 		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7703 	else
7704 		dev = alloc_etherdev(sizeof(struct s2io_nic));
7705 	if (dev == NULL) {
7706 		pci_disable_device(pdev);
7707 		pci_release_regions(pdev);
7708 		return -ENODEV;
7709 	}
7710 
7711 	pci_set_master(pdev);
7712 	pci_set_drvdata(pdev, dev);
7713 	SET_NETDEV_DEV(dev, &pdev->dev);
7714 
7715 	/*  Private member variable initialized to s2io NIC structure */
7716 	sp = netdev_priv(dev);
7717 	sp->dev = dev;
7718 	sp->pdev = pdev;
7719 	sp->high_dma_flag = dma_flag;
7720 	sp->device_enabled_once = false;
7721 	if (rx_ring_mode == 1)
7722 		sp->rxd_mode = RXD_MODE_1;
7723 	if (rx_ring_mode == 2)
7724 		sp->rxd_mode = RXD_MODE_3B;
7725 
7726 	sp->config.intr_type = dev_intr_type;
7727 
7728 	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7729 	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7730 		sp->device_type = XFRAME_II_DEVICE;
7731 	else
7732 		sp->device_type = XFRAME_I_DEVICE;
7733 
7734 
7735 	/* Initialize some PCI/PCI-X fields of the NIC. */
7736 	s2io_init_pci(sp);
7737 
7738 	/*
7739 	 * Setting the device configuration parameters.
7740 	 * Most of these parameters can be specified by the user during
7741 	 * module insertion as they are module loadable parameters. If
7742 	 * these parameters are not not specified during load time, they
7743 	 * are initialized with default values.
7744 	 */
7745 	config = &sp->config;
7746 	mac_control = &sp->mac_control;
7747 
7748 	config->napi = napi;
7749 	config->tx_steering_type = tx_steering_type;
7750 
7751 	/* Tx side parameters. */
7752 	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7753 		config->tx_fifo_num = MAX_TX_FIFOS;
7754 	else
7755 		config->tx_fifo_num = tx_fifo_num;
7756 
7757 	/* Initialize the fifos used for tx steering */
7758 	if (config->tx_fifo_num < 5) {
7759 		if (config->tx_fifo_num  == 1)
7760 			sp->total_tcp_fifos = 1;
7761 		else
7762 			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7763 		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7764 		sp->total_udp_fifos = 1;
7765 		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7766 	} else {
7767 		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7768 				       FIFO_OTHER_MAX_NUM);
7769 		sp->udp_fifo_idx = sp->total_tcp_fifos;
7770 		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7771 		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7772 	}
7773 
7774 	config->multiq = dev_multiq;
7775 	for (i = 0; i < config->tx_fifo_num; i++) {
7776 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7777 
7778 		tx_cfg->fifo_len = tx_fifo_len[i];
7779 		tx_cfg->fifo_priority = i;
7780 	}
7781 
7782 	/* mapping the QoS priority to the configured fifos */
7783 	for (i = 0; i < MAX_TX_FIFOS; i++)
7784 		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7785 
7786 	/* map the hashing selector table to the configured fifos */
7787 	for (i = 0; i < config->tx_fifo_num; i++)
7788 		sp->fifo_selector[i] = fifo_selector[i];
7789 
7790 
7791 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7792 	for (i = 0; i < config->tx_fifo_num; i++) {
7793 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7794 
7795 		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7796 		if (tx_cfg->fifo_len < 65) {
7797 			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7798 			break;
7799 		}
7800 	}
7801 	/* + 2 because one Txd for skb->data and one Txd for UFO */
7802 	config->max_txds = MAX_SKB_FRAGS + 2;
7803 
7804 	/* Rx side parameters. */
7805 	config->rx_ring_num = rx_ring_num;
7806 	for (i = 0; i < config->rx_ring_num; i++) {
7807 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7808 		struct ring_info *ring = &mac_control->rings[i];
7809 
7810 		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7811 		rx_cfg->ring_priority = i;
7812 		ring->rx_bufs_left = 0;
7813 		ring->rxd_mode = sp->rxd_mode;
7814 		ring->rxd_count = rxd_count[sp->rxd_mode];
7815 		ring->pdev = sp->pdev;
7816 		ring->dev = sp->dev;
7817 	}
7818 
7819 	for (i = 0; i < rx_ring_num; i++) {
7820 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7821 
7822 		rx_cfg->ring_org = RING_ORG_BUFF1;
7823 		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7824 	}
7825 
7826 	/*  Setting Mac Control parameters */
7827 	mac_control->rmac_pause_time = rmac_pause_time;
7828 	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7829 	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7830 
7831 
7832 	/*  initialize the shared memory used by the NIC and the host */
7833 	if (init_shared_mem(sp)) {
7834 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7835 		ret = -ENOMEM;
7836 		goto mem_alloc_failed;
7837 	}
7838 
7839 	sp->bar0 = pci_ioremap_bar(pdev, 0);
7840 	if (!sp->bar0) {
7841 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7842 			  dev->name);
7843 		ret = -ENOMEM;
7844 		goto bar0_remap_failed;
7845 	}
7846 
7847 	sp->bar1 = pci_ioremap_bar(pdev, 2);
7848 	if (!sp->bar1) {
7849 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7850 			  dev->name);
7851 		ret = -ENOMEM;
7852 		goto bar1_remap_failed;
7853 	}
7854 
7855 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7856 	for (j = 0; j < MAX_TX_FIFOS; j++) {
7857 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7858 	}
7859 
7860 	/*  Driver entry points */
7861 	dev->netdev_ops = &s2io_netdev_ops;
7862 	dev->ethtool_ops = &netdev_ethtool_ops;
7863 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7864 		NETIF_F_TSO | NETIF_F_TSO6 |
7865 		NETIF_F_RXCSUM | NETIF_F_LRO;
7866 	dev->features |= dev->hw_features |
7867 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7868 	if (sp->high_dma_flag == true)
7869 		dev->features |= NETIF_F_HIGHDMA;
7870 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7871 	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7872 	INIT_WORK(&sp->set_link_task, s2io_set_link);
7873 
7874 	pci_save_state(sp->pdev);
7875 
7876 	/* Setting swapper control on the NIC, for proper reset operation */
7877 	if (s2io_set_swapper(sp)) {
7878 		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7879 			  dev->name);
7880 		ret = -EAGAIN;
7881 		goto set_swap_failed;
7882 	}
7883 
7884 	/* Verify if the Herc works on the slot its placed into */
7885 	if (sp->device_type & XFRAME_II_DEVICE) {
7886 		mode = s2io_verify_pci_mode(sp);
7887 		if (mode < 0) {
7888 			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7889 				  __func__);
7890 			ret = -EBADSLT;
7891 			goto set_swap_failed;
7892 		}
7893 	}
7894 
7895 	if (sp->config.intr_type == MSI_X) {
7896 		sp->num_entries = config->rx_ring_num + 1;
7897 		ret = s2io_enable_msi_x(sp);
7898 
7899 		if (!ret) {
7900 			ret = s2io_test_msi(sp);
7901 			/* rollback MSI-X, will re-enable during add_isr() */
7902 			remove_msix_isr(sp);
7903 		}
7904 		if (ret) {
7905 
7906 			DBG_PRINT(ERR_DBG,
7907 				  "MSI-X requested but failed to enable\n");
7908 			sp->config.intr_type = INTA;
7909 		}
7910 	}
7911 
7912 	if (config->intr_type ==  MSI_X) {
7913 		for (i = 0; i < config->rx_ring_num ; i++) {
7914 			struct ring_info *ring = &mac_control->rings[i];
7915 
7916 			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7917 		}
7918 	} else {
7919 		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7920 	}
7921 
7922 	/* Not needed for Herc */
7923 	if (sp->device_type & XFRAME_I_DEVICE) {
7924 		/*
7925 		 * Fix for all "FFs" MAC address problems observed on
7926 		 * Alpha platforms
7927 		 */
7928 		fix_mac_address(sp);
7929 		s2io_reset(sp);
7930 	}
7931 
7932 	/*
7933 	 * MAC address initialization.
7934 	 * For now only one mac address will be read and used.
7935 	 */
7936 	bar0 = sp->bar0;
7937 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7938 		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7939 	writeq(val64, &bar0->rmac_addr_cmd_mem);
7940 	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7941 			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7942 			      S2IO_BIT_RESET);
7943 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
7944 	mac_down = (u32)tmp64;
7945 	mac_up = (u32) (tmp64 >> 32);
7946 
7947 	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7948 	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7949 	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7950 	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7951 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7952 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7953 
7954 	/*  Set the factory defined MAC address initially   */
7955 	dev->addr_len = ETH_ALEN;
7956 	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7957 
7958 	/* initialize number of multicast & unicast MAC entries variables */
7959 	if (sp->device_type == XFRAME_I_DEVICE) {
7960 		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7961 		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7962 		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7963 	} else if (sp->device_type == XFRAME_II_DEVICE) {
7964 		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7965 		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7966 		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7967 	}
7968 
7969 	/* MTU range: 46 - 9600 */
7970 	dev->min_mtu = MIN_MTU;
7971 	dev->max_mtu = S2IO_JUMBO_SIZE;
7972 
7973 	/* store mac addresses from CAM to s2io_nic structure */
7974 	do_s2io_store_unicast_mc(sp);
7975 
7976 	/* Configure MSIX vector for number of rings configured plus one */
7977 	if ((sp->device_type == XFRAME_II_DEVICE) &&
7978 	    (config->intr_type == MSI_X))
7979 		sp->num_entries = config->rx_ring_num + 1;
7980 
7981 	/* Store the values of the MSIX table in the s2io_nic structure */
7982 	store_xmsi_data(sp);
7983 	/* reset Nic and bring it to known state */
7984 	s2io_reset(sp);
7985 
7986 	/*
7987 	 * Initialize link state flags
7988 	 * and the card state parameter
7989 	 */
7990 	sp->state = 0;
7991 
7992 	/* Initialize spinlocks */
7993 	for (i = 0; i < sp->config.tx_fifo_num; i++) {
7994 		struct fifo_info *fifo = &mac_control->fifos[i];
7995 
7996 		spin_lock_init(&fifo->tx_lock);
7997 	}
7998 
7999 	/*
8000 	 * SXE-002: Configure link and activity LED to init state
8001 	 * on driver load.
8002 	 */
8003 	subid = sp->pdev->subsystem_device;
8004 	if ((subid & 0xFF) >= 0x07) {
8005 		val64 = readq(&bar0->gpio_control);
8006 		val64 |= 0x0000800000000000ULL;
8007 		writeq(val64, &bar0->gpio_control);
8008 		val64 = 0x0411040400000000ULL;
8009 		writeq(val64, (void __iomem *)bar0 + 0x2700);
8010 		val64 = readq(&bar0->gpio_control);
8011 	}
8012 
8013 	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8014 
8015 	if (register_netdev(dev)) {
8016 		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8017 		ret = -ENODEV;
8018 		goto register_failed;
8019 	}
8020 	s2io_vpd_read(sp);
8021 	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8022 	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8023 		  sp->product_name, pdev->revision);
8024 	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8025 		  s2io_driver_version);
8026 	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8027 	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8028 	if (sp->device_type & XFRAME_II_DEVICE) {
8029 		mode = s2io_print_pci_mode(sp);
8030 		if (mode < 0) {
8031 			ret = -EBADSLT;
8032 			unregister_netdev(dev);
8033 			goto set_swap_failed;
8034 		}
8035 	}
8036 	switch (sp->rxd_mode) {
8037 	case RXD_MODE_1:
8038 		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8039 			  dev->name);
8040 		break;
8041 	case RXD_MODE_3B:
8042 		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8043 			  dev->name);
8044 		break;
8045 	}
8046 
8047 	switch (sp->config.napi) {
8048 	case 0:
8049 		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8050 		break;
8051 	case 1:
8052 		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8053 		break;
8054 	}
8055 
8056 	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8057 		  sp->config.tx_fifo_num);
8058 
8059 	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8060 		  sp->config.rx_ring_num);
8061 
8062 	switch (sp->config.intr_type) {
8063 	case INTA:
8064 		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8065 		break;
8066 	case MSI_X:
8067 		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8068 		break;
8069 	}
8070 	if (sp->config.multiq) {
8071 		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8072 			struct fifo_info *fifo = &mac_control->fifos[i];
8073 
8074 			fifo->multiq = config->multiq;
8075 		}
8076 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8077 			  dev->name);
8078 	} else
8079 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8080 			  dev->name);
8081 
8082 	switch (sp->config.tx_steering_type) {
8083 	case NO_STEERING:
8084 		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8085 			  dev->name);
8086 		break;
8087 	case TX_PRIORITY_STEERING:
8088 		DBG_PRINT(ERR_DBG,
8089 			  "%s: Priority steering enabled for transmit\n",
8090 			  dev->name);
8091 		break;
8092 	case TX_DEFAULT_STEERING:
8093 		DBG_PRINT(ERR_DBG,
8094 			  "%s: Default steering enabled for transmit\n",
8095 			  dev->name);
8096 	}
8097 
8098 	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8099 		  dev->name);
8100 	/* Initialize device name */
8101 	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8102 		 sp->product_name);
8103 
8104 	if (vlan_tag_strip)
8105 		sp->vlan_strip_flag = 1;
8106 	else
8107 		sp->vlan_strip_flag = 0;
8108 
8109 	/*
8110 	 * Make Link state as off at this point, when the Link change
8111 	 * interrupt comes the state will be automatically changed to
8112 	 * the right state.
8113 	 */
8114 	netif_carrier_off(dev);
8115 
8116 	return 0;
8117 
8118 register_failed:
8119 set_swap_failed:
8120 	iounmap(sp->bar1);
8121 bar1_remap_failed:
8122 	iounmap(sp->bar0);
8123 bar0_remap_failed:
8124 mem_alloc_failed:
8125 	free_shared_mem(sp);
8126 	pci_disable_device(pdev);
8127 	pci_release_regions(pdev);
8128 	free_netdev(dev);
8129 
8130 	return ret;
8131 }
8132 
8133 /**
8134  * s2io_rem_nic - Free the PCI device
8135  * @pdev: structure containing the PCI related information of the device.
8136  * Description: This function is called by the Pci subsystem to release a
8137  * PCI device and free up all resource held up by the device. This could
8138  * be in response to a Hot plug event or when the driver is to be removed
8139  * from memory.
8140  */
8141 
8142 static void s2io_rem_nic(struct pci_dev *pdev)
8143 {
8144 	struct net_device *dev = pci_get_drvdata(pdev);
8145 	struct s2io_nic *sp;
8146 
8147 	if (dev == NULL) {
8148 		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8149 		return;
8150 	}
8151 
8152 	sp = netdev_priv(dev);
8153 
8154 	cancel_work_sync(&sp->rst_timer_task);
8155 	cancel_work_sync(&sp->set_link_task);
8156 
8157 	unregister_netdev(dev);
8158 
8159 	free_shared_mem(sp);
8160 	iounmap(sp->bar0);
8161 	iounmap(sp->bar1);
8162 	pci_release_regions(pdev);
8163 	free_netdev(dev);
8164 	pci_disable_device(pdev);
8165 }
8166 
8167 module_pci_driver(s2io_driver);
8168 
8169 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8170 				struct tcphdr **tcp, struct RxD_t *rxdp,
8171 				struct s2io_nic *sp)
8172 {
8173 	int ip_off;
8174 	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8175 
8176 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8177 		DBG_PRINT(INIT_DBG,
8178 			  "%s: Non-TCP frames not supported for LRO\n",
8179 			  __func__);
8180 		return -1;
8181 	}
8182 
8183 	/* Checking for DIX type or DIX type with VLAN */
8184 	if ((l2_type == 0) || (l2_type == 4)) {
8185 		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8186 		/*
8187 		 * If vlan stripping is disabled and the frame is VLAN tagged,
8188 		 * shift the offset by the VLAN header size bytes.
8189 		 */
8190 		if ((!sp->vlan_strip_flag) &&
8191 		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8192 			ip_off += HEADER_VLAN_SIZE;
8193 	} else {
8194 		/* LLC, SNAP etc are considered non-mergeable */
8195 		return -1;
8196 	}
8197 
8198 	*ip = (struct iphdr *)(buffer + ip_off);
8199 	ip_len = (u8)((*ip)->ihl);
8200 	ip_len <<= 2;
8201 	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8202 
8203 	return 0;
8204 }
8205 
8206 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8207 				  struct tcphdr *tcp)
8208 {
8209 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8210 	if ((lro->iph->saddr != ip->saddr) ||
8211 	    (lro->iph->daddr != ip->daddr) ||
8212 	    (lro->tcph->source != tcp->source) ||
8213 	    (lro->tcph->dest != tcp->dest))
8214 		return -1;
8215 	return 0;
8216 }
8217 
8218 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8219 {
8220 	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8221 }
8222 
8223 static void initiate_new_session(struct lro *lro, u8 *l2h,
8224 				 struct iphdr *ip, struct tcphdr *tcp,
8225 				 u32 tcp_pyld_len, u16 vlan_tag)
8226 {
8227 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8228 	lro->l2h = l2h;
8229 	lro->iph = ip;
8230 	lro->tcph = tcp;
8231 	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8232 	lro->tcp_ack = tcp->ack_seq;
8233 	lro->sg_num = 1;
8234 	lro->total_len = ntohs(ip->tot_len);
8235 	lro->frags_len = 0;
8236 	lro->vlan_tag = vlan_tag;
8237 	/*
8238 	 * Check if we saw TCP timestamp.
8239 	 * Other consistency checks have already been done.
8240 	 */
8241 	if (tcp->doff == 8) {
8242 		__be32 *ptr;
8243 		ptr = (__be32 *)(tcp+1);
8244 		lro->saw_ts = 1;
8245 		lro->cur_tsval = ntohl(*(ptr+1));
8246 		lro->cur_tsecr = *(ptr+2);
8247 	}
8248 	lro->in_use = 1;
8249 }
8250 
8251 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8252 {
8253 	struct iphdr *ip = lro->iph;
8254 	struct tcphdr *tcp = lro->tcph;
8255 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8256 
8257 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8258 
8259 	/* Update L3 header */
8260 	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8261 	ip->tot_len = htons(lro->total_len);
8262 
8263 	/* Update L4 header */
8264 	tcp->ack_seq = lro->tcp_ack;
8265 	tcp->window = lro->window;
8266 
8267 	/* Update tsecr field if this session has timestamps enabled */
8268 	if (lro->saw_ts) {
8269 		__be32 *ptr = (__be32 *)(tcp + 1);
8270 		*(ptr+2) = lro->cur_tsecr;
8271 	}
8272 
8273 	/* Update counters required for calculation of
8274 	 * average no. of packets aggregated.
8275 	 */
8276 	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8277 	swstats->num_aggregations++;
8278 }
8279 
8280 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8281 			     struct tcphdr *tcp, u32 l4_pyld)
8282 {
8283 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8284 	lro->total_len += l4_pyld;
8285 	lro->frags_len += l4_pyld;
8286 	lro->tcp_next_seq += l4_pyld;
8287 	lro->sg_num++;
8288 
8289 	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8290 	lro->tcp_ack = tcp->ack_seq;
8291 	lro->window = tcp->window;
8292 
8293 	if (lro->saw_ts) {
8294 		__be32 *ptr;
8295 		/* Update tsecr and tsval from this packet */
8296 		ptr = (__be32 *)(tcp+1);
8297 		lro->cur_tsval = ntohl(*(ptr+1));
8298 		lro->cur_tsecr = *(ptr + 2);
8299 	}
8300 }
8301 
8302 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8303 				    struct tcphdr *tcp, u32 tcp_pyld_len)
8304 {
8305 	u8 *ptr;
8306 
8307 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8308 
8309 	if (!tcp_pyld_len) {
8310 		/* Runt frame or a pure ack */
8311 		return -1;
8312 	}
8313 
8314 	if (ip->ihl != 5) /* IP has options */
8315 		return -1;
8316 
8317 	/* If we see CE codepoint in IP header, packet is not mergeable */
8318 	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8319 		return -1;
8320 
8321 	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8322 	if (tcp->urg || tcp->psh || tcp->rst ||
8323 	    tcp->syn || tcp->fin ||
8324 	    tcp->ece || tcp->cwr || !tcp->ack) {
8325 		/*
8326 		 * Currently recognize only the ack control word and
8327 		 * any other control field being set would result in
8328 		 * flushing the LRO session
8329 		 */
8330 		return -1;
8331 	}
8332 
8333 	/*
8334 	 * Allow only one TCP timestamp option. Don't aggregate if
8335 	 * any other options are detected.
8336 	 */
8337 	if (tcp->doff != 5 && tcp->doff != 8)
8338 		return -1;
8339 
8340 	if (tcp->doff == 8) {
8341 		ptr = (u8 *)(tcp + 1);
8342 		while (*ptr == TCPOPT_NOP)
8343 			ptr++;
8344 		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8345 			return -1;
8346 
8347 		/* Ensure timestamp value increases monotonically */
8348 		if (l_lro)
8349 			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8350 				return -1;
8351 
8352 		/* timestamp echo reply should be non-zero */
8353 		if (*((__be32 *)(ptr+6)) == 0)
8354 			return -1;
8355 	}
8356 
8357 	return 0;
8358 }
8359 
8360 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8361 				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8362 				 struct RxD_t *rxdp, struct s2io_nic *sp)
8363 {
8364 	struct iphdr *ip;
8365 	struct tcphdr *tcph;
8366 	int ret = 0, i;
8367 	u16 vlan_tag = 0;
8368 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8369 
8370 	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8371 				   rxdp, sp);
8372 	if (ret)
8373 		return ret;
8374 
8375 	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8376 
8377 	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8378 	tcph = (struct tcphdr *)*tcp;
8379 	*tcp_len = get_l4_pyld_length(ip, tcph);
8380 	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8381 		struct lro *l_lro = &ring_data->lro0_n[i];
8382 		if (l_lro->in_use) {
8383 			if (check_for_socket_match(l_lro, ip, tcph))
8384 				continue;
8385 			/* Sock pair matched */
8386 			*lro = l_lro;
8387 
8388 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8389 				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8390 					  "expected 0x%x, actual 0x%x\n",
8391 					  __func__,
8392 					  (*lro)->tcp_next_seq,
8393 					  ntohl(tcph->seq));
8394 
8395 				swstats->outof_sequence_pkts++;
8396 				ret = 2;
8397 				break;
8398 			}
8399 
8400 			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8401 						      *tcp_len))
8402 				ret = 1; /* Aggregate */
8403 			else
8404 				ret = 2; /* Flush both */
8405 			break;
8406 		}
8407 	}
8408 
8409 	if (ret == 0) {
8410 		/* Before searching for available LRO objects,
8411 		 * check if the pkt is L3/L4 aggregatable. If not
8412 		 * don't create new LRO session. Just send this
8413 		 * packet up.
8414 		 */
8415 		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8416 			return 5;
8417 
8418 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8419 			struct lro *l_lro = &ring_data->lro0_n[i];
8420 			if (!(l_lro->in_use)) {
8421 				*lro = l_lro;
8422 				ret = 3; /* Begin anew */
8423 				break;
8424 			}
8425 		}
8426 	}
8427 
8428 	if (ret == 0) { /* sessions exceeded */
8429 		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8430 			  __func__);
8431 		*lro = NULL;
8432 		return ret;
8433 	}
8434 
8435 	switch (ret) {
8436 	case 3:
8437 		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8438 				     vlan_tag);
8439 		break;
8440 	case 2:
8441 		update_L3L4_header(sp, *lro);
8442 		break;
8443 	case 1:
8444 		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8445 		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8446 			update_L3L4_header(sp, *lro);
8447 			ret = 4; /* Flush the LRO */
8448 		}
8449 		break;
8450 	default:
8451 		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8452 		break;
8453 	}
8454 
8455 	return ret;
8456 }
8457 
8458 static void clear_lro_session(struct lro *lro)
8459 {
8460 	static u16 lro_struct_size = sizeof(struct lro);
8461 
8462 	memset(lro, 0, lro_struct_size);
8463 }
8464 
8465 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8466 {
8467 	struct net_device *dev = skb->dev;
8468 	struct s2io_nic *sp = netdev_priv(dev);
8469 
8470 	skb->protocol = eth_type_trans(skb, dev);
8471 	if (vlan_tag && sp->vlan_strip_flag)
8472 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8473 	if (sp->config.napi)
8474 		netif_receive_skb(skb);
8475 	else
8476 		netif_rx(skb);
8477 }
8478 
8479 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8480 			   struct sk_buff *skb, u32 tcp_len)
8481 {
8482 	struct sk_buff *first = lro->parent;
8483 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8484 
8485 	first->len += tcp_len;
8486 	first->data_len = lro->frags_len;
8487 	skb_pull(skb, (skb->len - tcp_len));
8488 	if (skb_shinfo(first)->frag_list)
8489 		lro->last_frag->next = skb;
8490 	else
8491 		skb_shinfo(first)->frag_list = skb;
8492 	first->truesize += skb->truesize;
8493 	lro->last_frag = skb;
8494 	swstats->clubbed_frms_cnt++;
8495 }
8496 
8497 /**
8498  * s2io_io_error_detected - called when PCI error is detected
8499  * @pdev: Pointer to PCI device
8500  * @state: The current pci connection state
8501  *
8502  * This function is called after a PCI bus error affecting
8503  * this device has been detected.
8504  */
8505 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8506 					       pci_channel_state_t state)
8507 {
8508 	struct net_device *netdev = pci_get_drvdata(pdev);
8509 	struct s2io_nic *sp = netdev_priv(netdev);
8510 
8511 	netif_device_detach(netdev);
8512 
8513 	if (state == pci_channel_io_perm_failure)
8514 		return PCI_ERS_RESULT_DISCONNECT;
8515 
8516 	if (netif_running(netdev)) {
8517 		/* Bring down the card, while avoiding PCI I/O */
8518 		do_s2io_card_down(sp, 0);
8519 	}
8520 	pci_disable_device(pdev);
8521 
8522 	return PCI_ERS_RESULT_NEED_RESET;
8523 }
8524 
8525 /**
8526  * s2io_io_slot_reset - called after the pci bus has been reset.
8527  * @pdev: Pointer to PCI device
8528  *
8529  * Restart the card from scratch, as if from a cold-boot.
8530  * At this point, the card has exprienced a hard reset,
8531  * followed by fixups by BIOS, and has its config space
8532  * set up identically to what it was at cold boot.
8533  */
8534 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8535 {
8536 	struct net_device *netdev = pci_get_drvdata(pdev);
8537 	struct s2io_nic *sp = netdev_priv(netdev);
8538 
8539 	if (pci_enable_device(pdev)) {
8540 		pr_err("Cannot re-enable PCI device after reset.\n");
8541 		return PCI_ERS_RESULT_DISCONNECT;
8542 	}
8543 
8544 	pci_set_master(pdev);
8545 	s2io_reset(sp);
8546 
8547 	return PCI_ERS_RESULT_RECOVERED;
8548 }
8549 
8550 /**
8551  * s2io_io_resume - called when traffic can start flowing again.
8552  * @pdev: Pointer to PCI device
8553  *
8554  * This callback is called when the error recovery driver tells
8555  * us that its OK to resume normal operation.
8556  */
8557 static void s2io_io_resume(struct pci_dev *pdev)
8558 {
8559 	struct net_device *netdev = pci_get_drvdata(pdev);
8560 	struct s2io_nic *sp = netdev_priv(netdev);
8561 
8562 	if (netif_running(netdev)) {
8563 		if (s2io_card_up(sp)) {
8564 			pr_err("Can't bring device back up after reset.\n");
8565 			return;
8566 		}
8567 
8568 		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8569 			s2io_card_down(sp);
8570 			pr_err("Can't restore mac addr after reset.\n");
8571 			return;
8572 		}
8573 	}
8574 
8575 	netif_device_attach(netdev);
8576 	netif_tx_wake_all_queues(netdev);
8577 }
8578