1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2010 Exar Corp.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik		: For pointing out the improper error condition
15  *			  check in the s2io_xmit routine and also some
16  *			  issues in the Tx watch dog function. Also for
17  *			  patiently answering all those innumerable
18  *			  questions regaring the 2.6 porting issues.
19  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
20  *			  macros available only in 2.6 Kernel.
21  * Francois Romieu	: For pointing out all code part that were
22  *			  deprecated and also styling related comments.
23  * Grant Grundler	: For helping me get rid of some Architecture
24  *			  dependent code.
25  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explanation of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *		values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_max_pkts: This parameter defines maximum number of packets can be
42  *     aggregated as a single large packet
43  * napi: This parameter used to enable/disable NAPI (polling Rx)
44  *     Possible values '1' for enable and '0' for disable. Default is '1'
45  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
46  *                 Possible values '1' for enable , '0' for disable.
47  *                 Default is '2' - which means disable in promisc mode
48  *                 and enable in non-promiscuous mode.
49  * multiq: This parameter used to enable/disable MULTIQUEUE support.
50  *      Possible values '1' for enable and '0' for disable. Default is '0'
51  ************************************************************************/
52 
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/mdio.h>
65 #include <linux/skbuff.h>
66 #include <linux/init.h>
67 #include <linux/delay.h>
68 #include <linux/stddef.h>
69 #include <linux/ioctl.h>
70 #include <linux/timex.h>
71 #include <linux/ethtool.h>
72 #include <linux/workqueue.h>
73 #include <linux/if_vlan.h>
74 #include <linux/ip.h>
75 #include <linux/tcp.h>
76 #include <linux/uaccess.h>
77 #include <linux/io.h>
78 #include <linux/io-64-nonatomic-lo-hi.h>
79 #include <linux/slab.h>
80 #include <linux/prefetch.h>
81 #include <net/tcp.h>
82 #include <net/checksum.h>
83 
84 #include <asm/div64.h>
85 #include <asm/irq.h>
86 
87 /* local include */
88 #include "s2io.h"
89 #include "s2io-regs.h"
90 
91 #define DRV_VERSION "2.0.26.28"
92 
93 /* S2io Driver name & version. */
94 static const char s2io_driver_name[] = "Neterion";
95 static const char s2io_driver_version[] = DRV_VERSION;
96 
97 static const int rxd_size[2] = {32, 48};
98 static const int rxd_count[2] = {127, 85};
99 
100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101 {
102 	int ret;
103 
104 	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105 	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
106 
107 	return ret;
108 }
109 
110 /*
111  * Cards with following subsystem_id have a link state indication
112  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
113  * macro below identifies these cards given the subsystem_id.
114  */
115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
116 	(dev_type == XFRAME_I_DEVICE) ?					\
117 	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
118 	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
119 
120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
122 
123 static inline int is_s2io_card_up(const struct s2io_nic *sp)
124 {
125 	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 }
127 
128 /* Ethtool related variables and Macros. */
129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130 	"Register test\t(offline)",
131 	"Eeprom test\t(offline)",
132 	"Link test\t(online)",
133 	"RLDRAM test\t(offline)",
134 	"BIST Test\t(offline)"
135 };
136 
137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138 	{"tmac_frms"},
139 	{"tmac_data_octets"},
140 	{"tmac_drop_frms"},
141 	{"tmac_mcst_frms"},
142 	{"tmac_bcst_frms"},
143 	{"tmac_pause_ctrl_frms"},
144 	{"tmac_ttl_octets"},
145 	{"tmac_ucst_frms"},
146 	{"tmac_nucst_frms"},
147 	{"tmac_any_err_frms"},
148 	{"tmac_ttl_less_fb_octets"},
149 	{"tmac_vld_ip_octets"},
150 	{"tmac_vld_ip"},
151 	{"tmac_drop_ip"},
152 	{"tmac_icmp"},
153 	{"tmac_rst_tcp"},
154 	{"tmac_tcp"},
155 	{"tmac_udp"},
156 	{"rmac_vld_frms"},
157 	{"rmac_data_octets"},
158 	{"rmac_fcs_err_frms"},
159 	{"rmac_drop_frms"},
160 	{"rmac_vld_mcst_frms"},
161 	{"rmac_vld_bcst_frms"},
162 	{"rmac_in_rng_len_err_frms"},
163 	{"rmac_out_rng_len_err_frms"},
164 	{"rmac_long_frms"},
165 	{"rmac_pause_ctrl_frms"},
166 	{"rmac_unsup_ctrl_frms"},
167 	{"rmac_ttl_octets"},
168 	{"rmac_accepted_ucst_frms"},
169 	{"rmac_accepted_nucst_frms"},
170 	{"rmac_discarded_frms"},
171 	{"rmac_drop_events"},
172 	{"rmac_ttl_less_fb_octets"},
173 	{"rmac_ttl_frms"},
174 	{"rmac_usized_frms"},
175 	{"rmac_osized_frms"},
176 	{"rmac_frag_frms"},
177 	{"rmac_jabber_frms"},
178 	{"rmac_ttl_64_frms"},
179 	{"rmac_ttl_65_127_frms"},
180 	{"rmac_ttl_128_255_frms"},
181 	{"rmac_ttl_256_511_frms"},
182 	{"rmac_ttl_512_1023_frms"},
183 	{"rmac_ttl_1024_1518_frms"},
184 	{"rmac_ip"},
185 	{"rmac_ip_octets"},
186 	{"rmac_hdr_err_ip"},
187 	{"rmac_drop_ip"},
188 	{"rmac_icmp"},
189 	{"rmac_tcp"},
190 	{"rmac_udp"},
191 	{"rmac_err_drp_udp"},
192 	{"rmac_xgmii_err_sym"},
193 	{"rmac_frms_q0"},
194 	{"rmac_frms_q1"},
195 	{"rmac_frms_q2"},
196 	{"rmac_frms_q3"},
197 	{"rmac_frms_q4"},
198 	{"rmac_frms_q5"},
199 	{"rmac_frms_q6"},
200 	{"rmac_frms_q7"},
201 	{"rmac_full_q0"},
202 	{"rmac_full_q1"},
203 	{"rmac_full_q2"},
204 	{"rmac_full_q3"},
205 	{"rmac_full_q4"},
206 	{"rmac_full_q5"},
207 	{"rmac_full_q6"},
208 	{"rmac_full_q7"},
209 	{"rmac_pause_cnt"},
210 	{"rmac_xgmii_data_err_cnt"},
211 	{"rmac_xgmii_ctrl_err_cnt"},
212 	{"rmac_accepted_ip"},
213 	{"rmac_err_tcp"},
214 	{"rd_req_cnt"},
215 	{"new_rd_req_cnt"},
216 	{"new_rd_req_rtry_cnt"},
217 	{"rd_rtry_cnt"},
218 	{"wr_rtry_rd_ack_cnt"},
219 	{"wr_req_cnt"},
220 	{"new_wr_req_cnt"},
221 	{"new_wr_req_rtry_cnt"},
222 	{"wr_rtry_cnt"},
223 	{"wr_disc_cnt"},
224 	{"rd_rtry_wr_ack_cnt"},
225 	{"txp_wr_cnt"},
226 	{"txd_rd_cnt"},
227 	{"txd_wr_cnt"},
228 	{"rxd_rd_cnt"},
229 	{"rxd_wr_cnt"},
230 	{"txf_rd_cnt"},
231 	{"rxf_wr_cnt"}
232 };
233 
234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235 	{"rmac_ttl_1519_4095_frms"},
236 	{"rmac_ttl_4096_8191_frms"},
237 	{"rmac_ttl_8192_max_frms"},
238 	{"rmac_ttl_gt_max_frms"},
239 	{"rmac_osized_alt_frms"},
240 	{"rmac_jabber_alt_frms"},
241 	{"rmac_gt_max_alt_frms"},
242 	{"rmac_vlan_frms"},
243 	{"rmac_len_discard"},
244 	{"rmac_fcs_discard"},
245 	{"rmac_pf_discard"},
246 	{"rmac_da_discard"},
247 	{"rmac_red_discard"},
248 	{"rmac_rts_discard"},
249 	{"rmac_ingm_full_discard"},
250 	{"link_fault_cnt"}
251 };
252 
253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254 	{"\n DRIVER STATISTICS"},
255 	{"single_bit_ecc_errs"},
256 	{"double_bit_ecc_errs"},
257 	{"parity_err_cnt"},
258 	{"serious_err_cnt"},
259 	{"soft_reset_cnt"},
260 	{"fifo_full_cnt"},
261 	{"ring_0_full_cnt"},
262 	{"ring_1_full_cnt"},
263 	{"ring_2_full_cnt"},
264 	{"ring_3_full_cnt"},
265 	{"ring_4_full_cnt"},
266 	{"ring_5_full_cnt"},
267 	{"ring_6_full_cnt"},
268 	{"ring_7_full_cnt"},
269 	{"alarm_transceiver_temp_high"},
270 	{"alarm_transceiver_temp_low"},
271 	{"alarm_laser_bias_current_high"},
272 	{"alarm_laser_bias_current_low"},
273 	{"alarm_laser_output_power_high"},
274 	{"alarm_laser_output_power_low"},
275 	{"warn_transceiver_temp_high"},
276 	{"warn_transceiver_temp_low"},
277 	{"warn_laser_bias_current_high"},
278 	{"warn_laser_bias_current_low"},
279 	{"warn_laser_output_power_high"},
280 	{"warn_laser_output_power_low"},
281 	{"lro_aggregated_pkts"},
282 	{"lro_flush_both_count"},
283 	{"lro_out_of_sequence_pkts"},
284 	{"lro_flush_due_to_max_pkts"},
285 	{"lro_avg_aggr_pkts"},
286 	{"mem_alloc_fail_cnt"},
287 	{"pci_map_fail_cnt"},
288 	{"watchdog_timer_cnt"},
289 	{"mem_allocated"},
290 	{"mem_freed"},
291 	{"link_up_cnt"},
292 	{"link_down_cnt"},
293 	{"link_up_time"},
294 	{"link_down_time"},
295 	{"tx_tcode_buf_abort_cnt"},
296 	{"tx_tcode_desc_abort_cnt"},
297 	{"tx_tcode_parity_err_cnt"},
298 	{"tx_tcode_link_loss_cnt"},
299 	{"tx_tcode_list_proc_err_cnt"},
300 	{"rx_tcode_parity_err_cnt"},
301 	{"rx_tcode_abort_cnt"},
302 	{"rx_tcode_parity_abort_cnt"},
303 	{"rx_tcode_rda_fail_cnt"},
304 	{"rx_tcode_unkn_prot_cnt"},
305 	{"rx_tcode_fcs_err_cnt"},
306 	{"rx_tcode_buf_size_err_cnt"},
307 	{"rx_tcode_rxd_corrupt_cnt"},
308 	{"rx_tcode_unkn_err_cnt"},
309 	{"tda_err_cnt"},
310 	{"pfc_err_cnt"},
311 	{"pcc_err_cnt"},
312 	{"tti_err_cnt"},
313 	{"tpa_err_cnt"},
314 	{"sm_err_cnt"},
315 	{"lso_err_cnt"},
316 	{"mac_tmac_err_cnt"},
317 	{"mac_rmac_err_cnt"},
318 	{"xgxs_txgxs_err_cnt"},
319 	{"xgxs_rxgxs_err_cnt"},
320 	{"rc_err_cnt"},
321 	{"prc_pcix_err_cnt"},
322 	{"rpa_err_cnt"},
323 	{"rda_err_cnt"},
324 	{"rti_err_cnt"},
325 	{"mc_err_cnt"}
326 };
327 
328 #define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
329 #define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
330 #define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
331 
332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
334 
335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
337 
338 #define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
339 #define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
340 
341 /* copy mac addr to def_mac_addr array */
342 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
343 {
344 	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
345 	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
346 	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
347 	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
348 	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
349 	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
350 }
351 
352 /*
353  * Constants to be programmed into the Xena's registers, to configure
354  * the XAUI.
355  */
356 
357 #define	END_SIGN	0x0
358 static const u64 herc_act_dtx_cfg[] = {
359 	/* Set address */
360 	0x8000051536750000ULL, 0x80000515367500E0ULL,
361 	/* Write data */
362 	0x8000051536750004ULL, 0x80000515367500E4ULL,
363 	/* Set address */
364 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
365 	/* Write data */
366 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
367 	/* Set address */
368 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
369 	/* Write data */
370 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
371 	/* Set address */
372 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
373 	/* Write data */
374 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
375 	/* Done */
376 	END_SIGN
377 };
378 
379 static const u64 xena_dtx_cfg[] = {
380 	/* Set address */
381 	0x8000051500000000ULL, 0x80000515000000E0ULL,
382 	/* Write data */
383 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
384 	/* Set address */
385 	0x8001051500000000ULL, 0x80010515000000E0ULL,
386 	/* Write data */
387 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
388 	/* Set address */
389 	0x8002051500000000ULL, 0x80020515000000E0ULL,
390 	/* Write data */
391 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
392 	END_SIGN
393 };
394 
395 /*
396  * Constants for Fixing the MacAddress problem seen mostly on
397  * Alpha machines.
398  */
399 static const u64 fix_mac[] = {
400 	0x0060000000000000ULL, 0x0060600000000000ULL,
401 	0x0040600000000000ULL, 0x0000600000000000ULL,
402 	0x0020600000000000ULL, 0x0060600000000000ULL,
403 	0x0020600000000000ULL, 0x0060600000000000ULL,
404 	0x0020600000000000ULL, 0x0060600000000000ULL,
405 	0x0020600000000000ULL, 0x0060600000000000ULL,
406 	0x0020600000000000ULL, 0x0060600000000000ULL,
407 	0x0020600000000000ULL, 0x0060600000000000ULL,
408 	0x0020600000000000ULL, 0x0060600000000000ULL,
409 	0x0020600000000000ULL, 0x0060600000000000ULL,
410 	0x0020600000000000ULL, 0x0060600000000000ULL,
411 	0x0020600000000000ULL, 0x0060600000000000ULL,
412 	0x0020600000000000ULL, 0x0000600000000000ULL,
413 	0x0040600000000000ULL, 0x0060600000000000ULL,
414 	END_SIGN
415 };
416 
417 MODULE_LICENSE("GPL");
418 MODULE_VERSION(DRV_VERSION);
419 
420 
421 /* Module Loadable parameters. */
422 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
423 S2IO_PARM_INT(rx_ring_num, 1);
424 S2IO_PARM_INT(multiq, 0);
425 S2IO_PARM_INT(rx_ring_mode, 1);
426 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
427 S2IO_PARM_INT(rmac_pause_time, 0x100);
428 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
429 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
430 S2IO_PARM_INT(shared_splits, 0);
431 S2IO_PARM_INT(tmac_util_period, 5);
432 S2IO_PARM_INT(rmac_util_period, 5);
433 S2IO_PARM_INT(l3l4hdr_size, 128);
434 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
435 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
436 /* Frequency of Rx desc syncs expressed as power of 2 */
437 S2IO_PARM_INT(rxsync_frequency, 3);
438 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
439 S2IO_PARM_INT(intr_type, 2);
440 /* Large receive offload feature */
441 
442 /* Max pkts to be aggregated by LRO at one time. If not specified,
443  * aggregation happens until we hit max IP pkt size(64K)
444  */
445 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
446 S2IO_PARM_INT(indicate_max_pkts, 0);
447 
448 S2IO_PARM_INT(napi, 1);
449 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
450 
451 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
452 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
453 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
454 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
455 static unsigned int rts_frm_len[MAX_RX_RINGS] =
456 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
457 
458 module_param_array(tx_fifo_len, uint, NULL, 0);
459 module_param_array(rx_ring_sz, uint, NULL, 0);
460 module_param_array(rts_frm_len, uint, NULL, 0);
461 
462 /*
463  * S2IO device table.
464  * This table lists all the devices that this driver supports.
465  */
466 static const struct pci_device_id s2io_tbl[] = {
467 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
468 	 PCI_ANY_ID, PCI_ANY_ID},
469 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
470 	 PCI_ANY_ID, PCI_ANY_ID},
471 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
472 	 PCI_ANY_ID, PCI_ANY_ID},
473 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
474 	 PCI_ANY_ID, PCI_ANY_ID},
475 	{0,}
476 };
477 
478 MODULE_DEVICE_TABLE(pci, s2io_tbl);
479 
480 static const struct pci_error_handlers s2io_err_handler = {
481 	.error_detected = s2io_io_error_detected,
482 	.slot_reset = s2io_io_slot_reset,
483 	.resume = s2io_io_resume,
484 };
485 
486 static struct pci_driver s2io_driver = {
487 	.name = "S2IO",
488 	.id_table = s2io_tbl,
489 	.probe = s2io_init_nic,
490 	.remove = s2io_rem_nic,
491 	.err_handler = &s2io_err_handler,
492 };
493 
494 /* A simplifier macro used both by init and free shared_mem Fns(). */
495 #define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
496 
497 /* netqueue manipulation helper functions */
498 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
499 {
500 	if (!sp->config.multiq) {
501 		int i;
502 
503 		for (i = 0; i < sp->config.tx_fifo_num; i++)
504 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
505 	}
506 	netif_tx_stop_all_queues(sp->dev);
507 }
508 
509 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
510 {
511 	if (!sp->config.multiq)
512 		sp->mac_control.fifos[fifo_no].queue_state =
513 			FIFO_QUEUE_STOP;
514 
515 	netif_tx_stop_all_queues(sp->dev);
516 }
517 
518 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
519 {
520 	if (!sp->config.multiq) {
521 		int i;
522 
523 		for (i = 0; i < sp->config.tx_fifo_num; i++)
524 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
525 	}
526 	netif_tx_start_all_queues(sp->dev);
527 }
528 
529 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
530 {
531 	if (!sp->config.multiq) {
532 		int i;
533 
534 		for (i = 0; i < sp->config.tx_fifo_num; i++)
535 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
536 	}
537 	netif_tx_wake_all_queues(sp->dev);
538 }
539 
540 static inline void s2io_wake_tx_queue(
541 	struct fifo_info *fifo, int cnt, u8 multiq)
542 {
543 
544 	if (multiq) {
545 		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
546 			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
547 	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
548 		if (netif_queue_stopped(fifo->dev)) {
549 			fifo->queue_state = FIFO_QUEUE_START;
550 			netif_wake_queue(fifo->dev);
551 		}
552 	}
553 }
554 
555 /**
556  * init_shared_mem - Allocation and Initialization of Memory
557  * @nic: Device private variable.
558  * Description: The function allocates all the memory areas shared
559  * between the NIC and the driver. This includes Tx descriptors,
560  * Rx descriptors and the statistics block.
561  */
562 
563 static int init_shared_mem(struct s2io_nic *nic)
564 {
565 	u32 size;
566 	void *tmp_v_addr, *tmp_v_addr_next;
567 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
568 	struct RxD_block *pre_rxd_blk = NULL;
569 	int i, j, blk_cnt;
570 	int lst_size, lst_per_page;
571 	struct net_device *dev = nic->dev;
572 	unsigned long tmp;
573 	struct buffAdd *ba;
574 	struct config_param *config = &nic->config;
575 	struct mac_info *mac_control = &nic->mac_control;
576 	unsigned long long mem_allocated = 0;
577 
578 	/* Allocation and initialization of TXDLs in FIFOs */
579 	size = 0;
580 	for (i = 0; i < config->tx_fifo_num; i++) {
581 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
582 
583 		size += tx_cfg->fifo_len;
584 	}
585 	if (size > MAX_AVAILABLE_TXDS) {
586 		DBG_PRINT(ERR_DBG,
587 			  "Too many TxDs requested: %d, max supported: %d\n",
588 			  size, MAX_AVAILABLE_TXDS);
589 		return -EINVAL;
590 	}
591 
592 	size = 0;
593 	for (i = 0; i < config->tx_fifo_num; i++) {
594 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
595 
596 		size = tx_cfg->fifo_len;
597 		/*
598 		 * Legal values are from 2 to 8192
599 		 */
600 		if (size < 2) {
601 			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
602 				  "Valid lengths are 2 through 8192\n",
603 				  i, size);
604 			return -EINVAL;
605 		}
606 	}
607 
608 	lst_size = (sizeof(struct TxD) * config->max_txds);
609 	lst_per_page = PAGE_SIZE / lst_size;
610 
611 	for (i = 0; i < config->tx_fifo_num; i++) {
612 		struct fifo_info *fifo = &mac_control->fifos[i];
613 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
614 		int fifo_len = tx_cfg->fifo_len;
615 		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
616 
617 		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
618 		if (!fifo->list_info) {
619 			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
620 			return -ENOMEM;
621 		}
622 		mem_allocated += list_holder_size;
623 	}
624 	for (i = 0; i < config->tx_fifo_num; i++) {
625 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
626 						lst_per_page);
627 		struct fifo_info *fifo = &mac_control->fifos[i];
628 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
629 
630 		fifo->tx_curr_put_info.offset = 0;
631 		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
632 		fifo->tx_curr_get_info.offset = 0;
633 		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
634 		fifo->fifo_no = i;
635 		fifo->nic = nic;
636 		fifo->max_txds = MAX_SKB_FRAGS + 2;
637 		fifo->dev = dev;
638 
639 		for (j = 0; j < page_num; j++) {
640 			int k = 0;
641 			dma_addr_t tmp_p;
642 			void *tmp_v;
643 			tmp_v = pci_alloc_consistent(nic->pdev,
644 						     PAGE_SIZE, &tmp_p);
645 			if (!tmp_v) {
646 				DBG_PRINT(INFO_DBG,
647 					  "pci_alloc_consistent failed for TxDL\n");
648 				return -ENOMEM;
649 			}
650 			/* If we got a zero DMA address(can happen on
651 			 * certain platforms like PPC), reallocate.
652 			 * Store virtual address of page we don't want,
653 			 * to be freed later.
654 			 */
655 			if (!tmp_p) {
656 				mac_control->zerodma_virt_addr = tmp_v;
657 				DBG_PRINT(INIT_DBG,
658 					  "%s: Zero DMA address for TxDL. "
659 					  "Virtual address %p\n",
660 					  dev->name, tmp_v);
661 				tmp_v = pci_alloc_consistent(nic->pdev,
662 							     PAGE_SIZE, &tmp_p);
663 				if (!tmp_v) {
664 					DBG_PRINT(INFO_DBG,
665 						  "pci_alloc_consistent failed for TxDL\n");
666 					return -ENOMEM;
667 				}
668 				mem_allocated += PAGE_SIZE;
669 			}
670 			while (k < lst_per_page) {
671 				int l = (j * lst_per_page) + k;
672 				if (l == tx_cfg->fifo_len)
673 					break;
674 				fifo->list_info[l].list_virt_addr =
675 					tmp_v + (k * lst_size);
676 				fifo->list_info[l].list_phy_addr =
677 					tmp_p + (k * lst_size);
678 				k++;
679 			}
680 		}
681 	}
682 
683 	for (i = 0; i < config->tx_fifo_num; i++) {
684 		struct fifo_info *fifo = &mac_control->fifos[i];
685 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
686 
687 		size = tx_cfg->fifo_len;
688 		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
689 		if (!fifo->ufo_in_band_v)
690 			return -ENOMEM;
691 		mem_allocated += (size * sizeof(u64));
692 	}
693 
694 	/* Allocation and initialization of RXDs in Rings */
695 	size = 0;
696 	for (i = 0; i < config->rx_ring_num; i++) {
697 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
698 		struct ring_info *ring = &mac_control->rings[i];
699 
700 		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
701 			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
702 				  "multiple of RxDs per Block\n",
703 				  dev->name, i);
704 			return FAILURE;
705 		}
706 		size += rx_cfg->num_rxd;
707 		ring->block_count = rx_cfg->num_rxd /
708 			(rxd_count[nic->rxd_mode] + 1);
709 		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
710 	}
711 	if (nic->rxd_mode == RXD_MODE_1)
712 		size = (size * (sizeof(struct RxD1)));
713 	else
714 		size = (size * (sizeof(struct RxD3)));
715 
716 	for (i = 0; i < config->rx_ring_num; i++) {
717 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
718 		struct ring_info *ring = &mac_control->rings[i];
719 
720 		ring->rx_curr_get_info.block_index = 0;
721 		ring->rx_curr_get_info.offset = 0;
722 		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
723 		ring->rx_curr_put_info.block_index = 0;
724 		ring->rx_curr_put_info.offset = 0;
725 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
726 		ring->nic = nic;
727 		ring->ring_no = i;
728 
729 		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
730 		/*  Allocating all the Rx blocks */
731 		for (j = 0; j < blk_cnt; j++) {
732 			struct rx_block_info *rx_blocks;
733 			int l;
734 
735 			rx_blocks = &ring->rx_blocks[j];
736 			size = SIZE_OF_BLOCK;	/* size is always page size */
737 			tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
738 							  &tmp_p_addr);
739 			if (tmp_v_addr == NULL) {
740 				/*
741 				 * In case of failure, free_shared_mem()
742 				 * is called, which should free any
743 				 * memory that was alloced till the
744 				 * failure happened.
745 				 */
746 				rx_blocks->block_virt_addr = tmp_v_addr;
747 				return -ENOMEM;
748 			}
749 			mem_allocated += size;
750 			memset(tmp_v_addr, 0, size);
751 
752 			size = sizeof(struct rxd_info) *
753 				rxd_count[nic->rxd_mode];
754 			rx_blocks->block_virt_addr = tmp_v_addr;
755 			rx_blocks->block_dma_addr = tmp_p_addr;
756 			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
757 			if (!rx_blocks->rxds)
758 				return -ENOMEM;
759 			mem_allocated += size;
760 			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
761 				rx_blocks->rxds[l].virt_addr =
762 					rx_blocks->block_virt_addr +
763 					(rxd_size[nic->rxd_mode] * l);
764 				rx_blocks->rxds[l].dma_addr =
765 					rx_blocks->block_dma_addr +
766 					(rxd_size[nic->rxd_mode] * l);
767 			}
768 		}
769 		/* Interlinking all Rx Blocks */
770 		for (j = 0; j < blk_cnt; j++) {
771 			int next = (j + 1) % blk_cnt;
772 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
773 			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
774 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
775 			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
776 
777 			pre_rxd_blk = tmp_v_addr;
778 			pre_rxd_blk->reserved_2_pNext_RxD_block =
779 				(unsigned long)tmp_v_addr_next;
780 			pre_rxd_blk->pNext_RxD_Blk_physical =
781 				(u64)tmp_p_addr_next;
782 		}
783 	}
784 	if (nic->rxd_mode == RXD_MODE_3B) {
785 		/*
786 		 * Allocation of Storages for buffer addresses in 2BUFF mode
787 		 * and the buffers as well.
788 		 */
789 		for (i = 0; i < config->rx_ring_num; i++) {
790 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
791 			struct ring_info *ring = &mac_control->rings[i];
792 
793 			blk_cnt = rx_cfg->num_rxd /
794 				(rxd_count[nic->rxd_mode] + 1);
795 			size = sizeof(struct buffAdd *) * blk_cnt;
796 			ring->ba = kmalloc(size, GFP_KERNEL);
797 			if (!ring->ba)
798 				return -ENOMEM;
799 			mem_allocated += size;
800 			for (j = 0; j < blk_cnt; j++) {
801 				int k = 0;
802 
803 				size = sizeof(struct buffAdd) *
804 					(rxd_count[nic->rxd_mode] + 1);
805 				ring->ba[j] = kmalloc(size, GFP_KERNEL);
806 				if (!ring->ba[j])
807 					return -ENOMEM;
808 				mem_allocated += size;
809 				while (k != rxd_count[nic->rxd_mode]) {
810 					ba = &ring->ba[j][k];
811 					size = BUF0_LEN + ALIGN_SIZE;
812 					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
813 					if (!ba->ba_0_org)
814 						return -ENOMEM;
815 					mem_allocated += size;
816 					tmp = (unsigned long)ba->ba_0_org;
817 					tmp += ALIGN_SIZE;
818 					tmp &= ~((unsigned long)ALIGN_SIZE);
819 					ba->ba_0 = (void *)tmp;
820 
821 					size = BUF1_LEN + ALIGN_SIZE;
822 					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
823 					if (!ba->ba_1_org)
824 						return -ENOMEM;
825 					mem_allocated += size;
826 					tmp = (unsigned long)ba->ba_1_org;
827 					tmp += ALIGN_SIZE;
828 					tmp &= ~((unsigned long)ALIGN_SIZE);
829 					ba->ba_1 = (void *)tmp;
830 					k++;
831 				}
832 			}
833 		}
834 	}
835 
836 	/* Allocation and initialization of Statistics block */
837 	size = sizeof(struct stat_block);
838 	mac_control->stats_mem =
839 		pci_alloc_consistent(nic->pdev, size,
840 				     &mac_control->stats_mem_phy);
841 
842 	if (!mac_control->stats_mem) {
843 		/*
844 		 * In case of failure, free_shared_mem() is called, which
845 		 * should free any memory that was alloced till the
846 		 * failure happened.
847 		 */
848 		return -ENOMEM;
849 	}
850 	mem_allocated += size;
851 	mac_control->stats_mem_sz = size;
852 
853 	tmp_v_addr = mac_control->stats_mem;
854 	mac_control->stats_info = tmp_v_addr;
855 	memset(tmp_v_addr, 0, size);
856 	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
857 		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
858 	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
859 	return SUCCESS;
860 }
861 
862 /**
863  * free_shared_mem - Free the allocated Memory
864  * @nic:  Device private variable.
865  * Description: This function is to free all memory locations allocated by
866  * the init_shared_mem() function and return it to the kernel.
867  */
868 
869 static void free_shared_mem(struct s2io_nic *nic)
870 {
871 	int i, j, blk_cnt, size;
872 	void *tmp_v_addr;
873 	dma_addr_t tmp_p_addr;
874 	int lst_size, lst_per_page;
875 	struct net_device *dev;
876 	int page_num = 0;
877 	struct config_param *config;
878 	struct mac_info *mac_control;
879 	struct stat_block *stats;
880 	struct swStat *swstats;
881 
882 	if (!nic)
883 		return;
884 
885 	dev = nic->dev;
886 
887 	config = &nic->config;
888 	mac_control = &nic->mac_control;
889 	stats = mac_control->stats_info;
890 	swstats = &stats->sw_stat;
891 
892 	lst_size = sizeof(struct TxD) * config->max_txds;
893 	lst_per_page = PAGE_SIZE / lst_size;
894 
895 	for (i = 0; i < config->tx_fifo_num; i++) {
896 		struct fifo_info *fifo = &mac_control->fifos[i];
897 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
898 
899 		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
900 		for (j = 0; j < page_num; j++) {
901 			int mem_blks = (j * lst_per_page);
902 			struct list_info_hold *fli;
903 
904 			if (!fifo->list_info)
905 				return;
906 
907 			fli = &fifo->list_info[mem_blks];
908 			if (!fli->list_virt_addr)
909 				break;
910 			pci_free_consistent(nic->pdev, PAGE_SIZE,
911 					    fli->list_virt_addr,
912 					    fli->list_phy_addr);
913 			swstats->mem_freed += PAGE_SIZE;
914 		}
915 		/* If we got a zero DMA address during allocation,
916 		 * free the page now
917 		 */
918 		if (mac_control->zerodma_virt_addr) {
919 			pci_free_consistent(nic->pdev, PAGE_SIZE,
920 					    mac_control->zerodma_virt_addr,
921 					    (dma_addr_t)0);
922 			DBG_PRINT(INIT_DBG,
923 				  "%s: Freeing TxDL with zero DMA address. "
924 				  "Virtual address %p\n",
925 				  dev->name, mac_control->zerodma_virt_addr);
926 			swstats->mem_freed += PAGE_SIZE;
927 		}
928 		kfree(fifo->list_info);
929 		swstats->mem_freed += tx_cfg->fifo_len *
930 			sizeof(struct list_info_hold);
931 	}
932 
933 	size = SIZE_OF_BLOCK;
934 	for (i = 0; i < config->rx_ring_num; i++) {
935 		struct ring_info *ring = &mac_control->rings[i];
936 
937 		blk_cnt = ring->block_count;
938 		for (j = 0; j < blk_cnt; j++) {
939 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
940 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
941 			if (tmp_v_addr == NULL)
942 				break;
943 			pci_free_consistent(nic->pdev, size,
944 					    tmp_v_addr, tmp_p_addr);
945 			swstats->mem_freed += size;
946 			kfree(ring->rx_blocks[j].rxds);
947 			swstats->mem_freed += sizeof(struct rxd_info) *
948 				rxd_count[nic->rxd_mode];
949 		}
950 	}
951 
952 	if (nic->rxd_mode == RXD_MODE_3B) {
953 		/* Freeing buffer storage addresses in 2BUFF mode. */
954 		for (i = 0; i < config->rx_ring_num; i++) {
955 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
956 			struct ring_info *ring = &mac_control->rings[i];
957 
958 			blk_cnt = rx_cfg->num_rxd /
959 				(rxd_count[nic->rxd_mode] + 1);
960 			for (j = 0; j < blk_cnt; j++) {
961 				int k = 0;
962 				if (!ring->ba[j])
963 					continue;
964 				while (k != rxd_count[nic->rxd_mode]) {
965 					struct buffAdd *ba = &ring->ba[j][k];
966 					kfree(ba->ba_0_org);
967 					swstats->mem_freed +=
968 						BUF0_LEN + ALIGN_SIZE;
969 					kfree(ba->ba_1_org);
970 					swstats->mem_freed +=
971 						BUF1_LEN + ALIGN_SIZE;
972 					k++;
973 				}
974 				kfree(ring->ba[j]);
975 				swstats->mem_freed += sizeof(struct buffAdd) *
976 					(rxd_count[nic->rxd_mode] + 1);
977 			}
978 			kfree(ring->ba);
979 			swstats->mem_freed += sizeof(struct buffAdd *) *
980 				blk_cnt;
981 		}
982 	}
983 
984 	for (i = 0; i < nic->config.tx_fifo_num; i++) {
985 		struct fifo_info *fifo = &mac_control->fifos[i];
986 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
987 
988 		if (fifo->ufo_in_band_v) {
989 			swstats->mem_freed += tx_cfg->fifo_len *
990 				sizeof(u64);
991 			kfree(fifo->ufo_in_band_v);
992 		}
993 	}
994 
995 	if (mac_control->stats_mem) {
996 		swstats->mem_freed += mac_control->stats_mem_sz;
997 		pci_free_consistent(nic->pdev,
998 				    mac_control->stats_mem_sz,
999 				    mac_control->stats_mem,
1000 				    mac_control->stats_mem_phy);
1001 	}
1002 }
1003 
1004 /**
1005  * s2io_verify_pci_mode -
1006  */
1007 
1008 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1009 {
1010 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1011 	register u64 val64 = 0;
1012 	int     mode;
1013 
1014 	val64 = readq(&bar0->pci_mode);
1015 	mode = (u8)GET_PCI_MODE(val64);
1016 
1017 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1018 		return -1;      /* Unknown PCI mode */
1019 	return mode;
1020 }
1021 
1022 #define NEC_VENID   0x1033
1023 #define NEC_DEVID   0x0125
1024 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1025 {
1026 	struct pci_dev *tdev = NULL;
1027 	for_each_pci_dev(tdev) {
1028 		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1029 			if (tdev->bus == s2io_pdev->bus->parent) {
1030 				pci_dev_put(tdev);
1031 				return 1;
1032 			}
1033 		}
1034 	}
1035 	return 0;
1036 }
1037 
1038 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1039 /**
1040  * s2io_print_pci_mode -
1041  */
1042 static int s2io_print_pci_mode(struct s2io_nic *nic)
1043 {
1044 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1045 	register u64 val64 = 0;
1046 	int	mode;
1047 	struct config_param *config = &nic->config;
1048 	const char *pcimode;
1049 
1050 	val64 = readq(&bar0->pci_mode);
1051 	mode = (u8)GET_PCI_MODE(val64);
1052 
1053 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1054 		return -1;	/* Unknown PCI mode */
1055 
1056 	config->bus_speed = bus_speed[mode];
1057 
1058 	if (s2io_on_nec_bridge(nic->pdev)) {
1059 		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1060 			  nic->dev->name);
1061 		return mode;
1062 	}
1063 
1064 	switch (mode) {
1065 	case PCI_MODE_PCI_33:
1066 		pcimode = "33MHz PCI bus";
1067 		break;
1068 	case PCI_MODE_PCI_66:
1069 		pcimode = "66MHz PCI bus";
1070 		break;
1071 	case PCI_MODE_PCIX_M1_66:
1072 		pcimode = "66MHz PCIX(M1) bus";
1073 		break;
1074 	case PCI_MODE_PCIX_M1_100:
1075 		pcimode = "100MHz PCIX(M1) bus";
1076 		break;
1077 	case PCI_MODE_PCIX_M1_133:
1078 		pcimode = "133MHz PCIX(M1) bus";
1079 		break;
1080 	case PCI_MODE_PCIX_M2_66:
1081 		pcimode = "133MHz PCIX(M2) bus";
1082 		break;
1083 	case PCI_MODE_PCIX_M2_100:
1084 		pcimode = "200MHz PCIX(M2) bus";
1085 		break;
1086 	case PCI_MODE_PCIX_M2_133:
1087 		pcimode = "266MHz PCIX(M2) bus";
1088 		break;
1089 	default:
1090 		pcimode = "unsupported bus!";
1091 		mode = -1;
1092 	}
1093 
1094 	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1095 		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1096 
1097 	return mode;
1098 }
1099 
1100 /**
1101  *  init_tti - Initialization transmit traffic interrupt scheme
1102  *  @nic: device private variable
1103  *  @link: link status (UP/DOWN) used to enable/disable continuous
1104  *  transmit interrupts
1105  *  Description: The function configures transmit traffic interrupts
1106  *  Return Value:  SUCCESS on success and
1107  *  '-1' on failure
1108  */
1109 
1110 static int init_tti(struct s2io_nic *nic, int link)
1111 {
1112 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1113 	register u64 val64 = 0;
1114 	int i;
1115 	struct config_param *config = &nic->config;
1116 
1117 	for (i = 0; i < config->tx_fifo_num; i++) {
1118 		/*
1119 		 * TTI Initialization. Default Tx timer gets us about
1120 		 * 250 interrupts per sec. Continuous interrupts are enabled
1121 		 * by default.
1122 		 */
1123 		if (nic->device_type == XFRAME_II_DEVICE) {
1124 			int count = (nic->config.bus_speed * 125)/2;
1125 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1126 		} else
1127 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1128 
1129 		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1130 			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1131 			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1132 			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1133 		if (i == 0)
1134 			if (use_continuous_tx_intrs && (link == LINK_UP))
1135 				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1136 		writeq(val64, &bar0->tti_data1_mem);
1137 
1138 		if (nic->config.intr_type == MSI_X) {
1139 			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1140 				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1141 				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1142 				TTI_DATA2_MEM_TX_UFC_D(0x300);
1143 		} else {
1144 			if ((nic->config.tx_steering_type ==
1145 			     TX_DEFAULT_STEERING) &&
1146 			    (config->tx_fifo_num > 1) &&
1147 			    (i >= nic->udp_fifo_idx) &&
1148 			    (i < (nic->udp_fifo_idx +
1149 				  nic->total_udp_fifos)))
1150 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1151 					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1152 					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1153 					TTI_DATA2_MEM_TX_UFC_D(0x120);
1154 			else
1155 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1156 					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1157 					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1158 					TTI_DATA2_MEM_TX_UFC_D(0x80);
1159 		}
1160 
1161 		writeq(val64, &bar0->tti_data2_mem);
1162 
1163 		val64 = TTI_CMD_MEM_WE |
1164 			TTI_CMD_MEM_STROBE_NEW_CMD |
1165 			TTI_CMD_MEM_OFFSET(i);
1166 		writeq(val64, &bar0->tti_command_mem);
1167 
1168 		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1169 					  TTI_CMD_MEM_STROBE_NEW_CMD,
1170 					  S2IO_BIT_RESET) != SUCCESS)
1171 			return FAILURE;
1172 	}
1173 
1174 	return SUCCESS;
1175 }
1176 
1177 /**
1178  *  init_nic - Initialization of hardware
1179  *  @nic: device private variable
1180  *  Description: The function sequentially configures every block
1181  *  of the H/W from their reset values.
1182  *  Return Value:  SUCCESS on success and
1183  *  '-1' on failure (endian settings incorrect).
1184  */
1185 
1186 static int init_nic(struct s2io_nic *nic)
1187 {
1188 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1189 	struct net_device *dev = nic->dev;
1190 	register u64 val64 = 0;
1191 	void __iomem *add;
1192 	u32 time;
1193 	int i, j;
1194 	int dtx_cnt = 0;
1195 	unsigned long long mem_share;
1196 	int mem_size;
1197 	struct config_param *config = &nic->config;
1198 	struct mac_info *mac_control = &nic->mac_control;
1199 
1200 	/* to set the swapper controle on the card */
1201 	if (s2io_set_swapper(nic)) {
1202 		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1203 		return -EIO;
1204 	}
1205 
1206 	/*
1207 	 * Herc requires EOI to be removed from reset before XGXS, so..
1208 	 */
1209 	if (nic->device_type & XFRAME_II_DEVICE) {
1210 		val64 = 0xA500000000ULL;
1211 		writeq(val64, &bar0->sw_reset);
1212 		msleep(500);
1213 		val64 = readq(&bar0->sw_reset);
1214 	}
1215 
1216 	/* Remove XGXS from reset state */
1217 	val64 = 0;
1218 	writeq(val64, &bar0->sw_reset);
1219 	msleep(500);
1220 	val64 = readq(&bar0->sw_reset);
1221 
1222 	/* Ensure that it's safe to access registers by checking
1223 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1224 	 */
1225 	if (nic->device_type == XFRAME_II_DEVICE) {
1226 		for (i = 0; i < 50; i++) {
1227 			val64 = readq(&bar0->adapter_status);
1228 			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1229 				break;
1230 			msleep(10);
1231 		}
1232 		if (i == 50)
1233 			return -ENODEV;
1234 	}
1235 
1236 	/*  Enable Receiving broadcasts */
1237 	add = &bar0->mac_cfg;
1238 	val64 = readq(&bar0->mac_cfg);
1239 	val64 |= MAC_RMAC_BCAST_ENABLE;
1240 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1241 	writel((u32)val64, add);
1242 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1243 	writel((u32) (val64 >> 32), (add + 4));
1244 
1245 	/* Read registers in all blocks */
1246 	val64 = readq(&bar0->mac_int_mask);
1247 	val64 = readq(&bar0->mc_int_mask);
1248 	val64 = readq(&bar0->xgxs_int_mask);
1249 
1250 	/*  Set MTU */
1251 	val64 = dev->mtu;
1252 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1253 
1254 	if (nic->device_type & XFRAME_II_DEVICE) {
1255 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1256 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1257 					  &bar0->dtx_control, UF);
1258 			if (dtx_cnt & 0x1)
1259 				msleep(1); /* Necessary!! */
1260 			dtx_cnt++;
1261 		}
1262 	} else {
1263 		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1264 			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1265 					  &bar0->dtx_control, UF);
1266 			val64 = readq(&bar0->dtx_control);
1267 			dtx_cnt++;
1268 		}
1269 	}
1270 
1271 	/*  Tx DMA Initialization */
1272 	val64 = 0;
1273 	writeq(val64, &bar0->tx_fifo_partition_0);
1274 	writeq(val64, &bar0->tx_fifo_partition_1);
1275 	writeq(val64, &bar0->tx_fifo_partition_2);
1276 	writeq(val64, &bar0->tx_fifo_partition_3);
1277 
1278 	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1279 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1280 
1281 		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1282 			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1283 
1284 		if (i == (config->tx_fifo_num - 1)) {
1285 			if (i % 2 == 0)
1286 				i++;
1287 		}
1288 
1289 		switch (i) {
1290 		case 1:
1291 			writeq(val64, &bar0->tx_fifo_partition_0);
1292 			val64 = 0;
1293 			j = 0;
1294 			break;
1295 		case 3:
1296 			writeq(val64, &bar0->tx_fifo_partition_1);
1297 			val64 = 0;
1298 			j = 0;
1299 			break;
1300 		case 5:
1301 			writeq(val64, &bar0->tx_fifo_partition_2);
1302 			val64 = 0;
1303 			j = 0;
1304 			break;
1305 		case 7:
1306 			writeq(val64, &bar0->tx_fifo_partition_3);
1307 			val64 = 0;
1308 			j = 0;
1309 			break;
1310 		default:
1311 			j++;
1312 			break;
1313 		}
1314 	}
1315 
1316 	/*
1317 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1318 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1319 	 */
1320 	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1321 		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1322 
1323 	val64 = readq(&bar0->tx_fifo_partition_0);
1324 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1325 		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1326 
1327 	/*
1328 	 * Initialization of Tx_PA_CONFIG register to ignore packet
1329 	 * integrity checking.
1330 	 */
1331 	val64 = readq(&bar0->tx_pa_cfg);
1332 	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1333 		TX_PA_CFG_IGNORE_SNAP_OUI |
1334 		TX_PA_CFG_IGNORE_LLC_CTRL |
1335 		TX_PA_CFG_IGNORE_L2_ERR;
1336 	writeq(val64, &bar0->tx_pa_cfg);
1337 
1338 	/* Rx DMA initialization. */
1339 	val64 = 0;
1340 	for (i = 0; i < config->rx_ring_num; i++) {
1341 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1342 
1343 		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1344 	}
1345 	writeq(val64, &bar0->rx_queue_priority);
1346 
1347 	/*
1348 	 * Allocating equal share of memory to all the
1349 	 * configured Rings.
1350 	 */
1351 	val64 = 0;
1352 	if (nic->device_type & XFRAME_II_DEVICE)
1353 		mem_size = 32;
1354 	else
1355 		mem_size = 64;
1356 
1357 	for (i = 0; i < config->rx_ring_num; i++) {
1358 		switch (i) {
1359 		case 0:
1360 			mem_share = (mem_size / config->rx_ring_num +
1361 				     mem_size % config->rx_ring_num);
1362 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1363 			continue;
1364 		case 1:
1365 			mem_share = (mem_size / config->rx_ring_num);
1366 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1367 			continue;
1368 		case 2:
1369 			mem_share = (mem_size / config->rx_ring_num);
1370 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1371 			continue;
1372 		case 3:
1373 			mem_share = (mem_size / config->rx_ring_num);
1374 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1375 			continue;
1376 		case 4:
1377 			mem_share = (mem_size / config->rx_ring_num);
1378 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1379 			continue;
1380 		case 5:
1381 			mem_share = (mem_size / config->rx_ring_num);
1382 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1383 			continue;
1384 		case 6:
1385 			mem_share = (mem_size / config->rx_ring_num);
1386 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1387 			continue;
1388 		case 7:
1389 			mem_share = (mem_size / config->rx_ring_num);
1390 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1391 			continue;
1392 		}
1393 	}
1394 	writeq(val64, &bar0->rx_queue_cfg);
1395 
1396 	/*
1397 	 * Filling Tx round robin registers
1398 	 * as per the number of FIFOs for equal scheduling priority
1399 	 */
1400 	switch (config->tx_fifo_num) {
1401 	case 1:
1402 		val64 = 0x0;
1403 		writeq(val64, &bar0->tx_w_round_robin_0);
1404 		writeq(val64, &bar0->tx_w_round_robin_1);
1405 		writeq(val64, &bar0->tx_w_round_robin_2);
1406 		writeq(val64, &bar0->tx_w_round_robin_3);
1407 		writeq(val64, &bar0->tx_w_round_robin_4);
1408 		break;
1409 	case 2:
1410 		val64 = 0x0001000100010001ULL;
1411 		writeq(val64, &bar0->tx_w_round_robin_0);
1412 		writeq(val64, &bar0->tx_w_round_robin_1);
1413 		writeq(val64, &bar0->tx_w_round_robin_2);
1414 		writeq(val64, &bar0->tx_w_round_robin_3);
1415 		val64 = 0x0001000100000000ULL;
1416 		writeq(val64, &bar0->tx_w_round_robin_4);
1417 		break;
1418 	case 3:
1419 		val64 = 0x0001020001020001ULL;
1420 		writeq(val64, &bar0->tx_w_round_robin_0);
1421 		val64 = 0x0200010200010200ULL;
1422 		writeq(val64, &bar0->tx_w_round_robin_1);
1423 		val64 = 0x0102000102000102ULL;
1424 		writeq(val64, &bar0->tx_w_round_robin_2);
1425 		val64 = 0x0001020001020001ULL;
1426 		writeq(val64, &bar0->tx_w_round_robin_3);
1427 		val64 = 0x0200010200000000ULL;
1428 		writeq(val64, &bar0->tx_w_round_robin_4);
1429 		break;
1430 	case 4:
1431 		val64 = 0x0001020300010203ULL;
1432 		writeq(val64, &bar0->tx_w_round_robin_0);
1433 		writeq(val64, &bar0->tx_w_round_robin_1);
1434 		writeq(val64, &bar0->tx_w_round_robin_2);
1435 		writeq(val64, &bar0->tx_w_round_robin_3);
1436 		val64 = 0x0001020300000000ULL;
1437 		writeq(val64, &bar0->tx_w_round_robin_4);
1438 		break;
1439 	case 5:
1440 		val64 = 0x0001020304000102ULL;
1441 		writeq(val64, &bar0->tx_w_round_robin_0);
1442 		val64 = 0x0304000102030400ULL;
1443 		writeq(val64, &bar0->tx_w_round_robin_1);
1444 		val64 = 0x0102030400010203ULL;
1445 		writeq(val64, &bar0->tx_w_round_robin_2);
1446 		val64 = 0x0400010203040001ULL;
1447 		writeq(val64, &bar0->tx_w_round_robin_3);
1448 		val64 = 0x0203040000000000ULL;
1449 		writeq(val64, &bar0->tx_w_round_robin_4);
1450 		break;
1451 	case 6:
1452 		val64 = 0x0001020304050001ULL;
1453 		writeq(val64, &bar0->tx_w_round_robin_0);
1454 		val64 = 0x0203040500010203ULL;
1455 		writeq(val64, &bar0->tx_w_round_robin_1);
1456 		val64 = 0x0405000102030405ULL;
1457 		writeq(val64, &bar0->tx_w_round_robin_2);
1458 		val64 = 0x0001020304050001ULL;
1459 		writeq(val64, &bar0->tx_w_round_robin_3);
1460 		val64 = 0x0203040500000000ULL;
1461 		writeq(val64, &bar0->tx_w_round_robin_4);
1462 		break;
1463 	case 7:
1464 		val64 = 0x0001020304050600ULL;
1465 		writeq(val64, &bar0->tx_w_round_robin_0);
1466 		val64 = 0x0102030405060001ULL;
1467 		writeq(val64, &bar0->tx_w_round_robin_1);
1468 		val64 = 0x0203040506000102ULL;
1469 		writeq(val64, &bar0->tx_w_round_robin_2);
1470 		val64 = 0x0304050600010203ULL;
1471 		writeq(val64, &bar0->tx_w_round_robin_3);
1472 		val64 = 0x0405060000000000ULL;
1473 		writeq(val64, &bar0->tx_w_round_robin_4);
1474 		break;
1475 	case 8:
1476 		val64 = 0x0001020304050607ULL;
1477 		writeq(val64, &bar0->tx_w_round_robin_0);
1478 		writeq(val64, &bar0->tx_w_round_robin_1);
1479 		writeq(val64, &bar0->tx_w_round_robin_2);
1480 		writeq(val64, &bar0->tx_w_round_robin_3);
1481 		val64 = 0x0001020300000000ULL;
1482 		writeq(val64, &bar0->tx_w_round_robin_4);
1483 		break;
1484 	}
1485 
1486 	/* Enable all configured Tx FIFO partitions */
1487 	val64 = readq(&bar0->tx_fifo_partition_0);
1488 	val64 |= (TX_FIFO_PARTITION_EN);
1489 	writeq(val64, &bar0->tx_fifo_partition_0);
1490 
1491 	/* Filling the Rx round robin registers as per the
1492 	 * number of Rings and steering based on QoS with
1493 	 * equal priority.
1494 	 */
1495 	switch (config->rx_ring_num) {
1496 	case 1:
1497 		val64 = 0x0;
1498 		writeq(val64, &bar0->rx_w_round_robin_0);
1499 		writeq(val64, &bar0->rx_w_round_robin_1);
1500 		writeq(val64, &bar0->rx_w_round_robin_2);
1501 		writeq(val64, &bar0->rx_w_round_robin_3);
1502 		writeq(val64, &bar0->rx_w_round_robin_4);
1503 
1504 		val64 = 0x8080808080808080ULL;
1505 		writeq(val64, &bar0->rts_qos_steering);
1506 		break;
1507 	case 2:
1508 		val64 = 0x0001000100010001ULL;
1509 		writeq(val64, &bar0->rx_w_round_robin_0);
1510 		writeq(val64, &bar0->rx_w_round_robin_1);
1511 		writeq(val64, &bar0->rx_w_round_robin_2);
1512 		writeq(val64, &bar0->rx_w_round_robin_3);
1513 		val64 = 0x0001000100000000ULL;
1514 		writeq(val64, &bar0->rx_w_round_robin_4);
1515 
1516 		val64 = 0x8080808040404040ULL;
1517 		writeq(val64, &bar0->rts_qos_steering);
1518 		break;
1519 	case 3:
1520 		val64 = 0x0001020001020001ULL;
1521 		writeq(val64, &bar0->rx_w_round_robin_0);
1522 		val64 = 0x0200010200010200ULL;
1523 		writeq(val64, &bar0->rx_w_round_robin_1);
1524 		val64 = 0x0102000102000102ULL;
1525 		writeq(val64, &bar0->rx_w_round_robin_2);
1526 		val64 = 0x0001020001020001ULL;
1527 		writeq(val64, &bar0->rx_w_round_robin_3);
1528 		val64 = 0x0200010200000000ULL;
1529 		writeq(val64, &bar0->rx_w_round_robin_4);
1530 
1531 		val64 = 0x8080804040402020ULL;
1532 		writeq(val64, &bar0->rts_qos_steering);
1533 		break;
1534 	case 4:
1535 		val64 = 0x0001020300010203ULL;
1536 		writeq(val64, &bar0->rx_w_round_robin_0);
1537 		writeq(val64, &bar0->rx_w_round_robin_1);
1538 		writeq(val64, &bar0->rx_w_round_robin_2);
1539 		writeq(val64, &bar0->rx_w_round_robin_3);
1540 		val64 = 0x0001020300000000ULL;
1541 		writeq(val64, &bar0->rx_w_round_robin_4);
1542 
1543 		val64 = 0x8080404020201010ULL;
1544 		writeq(val64, &bar0->rts_qos_steering);
1545 		break;
1546 	case 5:
1547 		val64 = 0x0001020304000102ULL;
1548 		writeq(val64, &bar0->rx_w_round_robin_0);
1549 		val64 = 0x0304000102030400ULL;
1550 		writeq(val64, &bar0->rx_w_round_robin_1);
1551 		val64 = 0x0102030400010203ULL;
1552 		writeq(val64, &bar0->rx_w_round_robin_2);
1553 		val64 = 0x0400010203040001ULL;
1554 		writeq(val64, &bar0->rx_w_round_robin_3);
1555 		val64 = 0x0203040000000000ULL;
1556 		writeq(val64, &bar0->rx_w_round_robin_4);
1557 
1558 		val64 = 0x8080404020201008ULL;
1559 		writeq(val64, &bar0->rts_qos_steering);
1560 		break;
1561 	case 6:
1562 		val64 = 0x0001020304050001ULL;
1563 		writeq(val64, &bar0->rx_w_round_robin_0);
1564 		val64 = 0x0203040500010203ULL;
1565 		writeq(val64, &bar0->rx_w_round_robin_1);
1566 		val64 = 0x0405000102030405ULL;
1567 		writeq(val64, &bar0->rx_w_round_robin_2);
1568 		val64 = 0x0001020304050001ULL;
1569 		writeq(val64, &bar0->rx_w_round_robin_3);
1570 		val64 = 0x0203040500000000ULL;
1571 		writeq(val64, &bar0->rx_w_round_robin_4);
1572 
1573 		val64 = 0x8080404020100804ULL;
1574 		writeq(val64, &bar0->rts_qos_steering);
1575 		break;
1576 	case 7:
1577 		val64 = 0x0001020304050600ULL;
1578 		writeq(val64, &bar0->rx_w_round_robin_0);
1579 		val64 = 0x0102030405060001ULL;
1580 		writeq(val64, &bar0->rx_w_round_robin_1);
1581 		val64 = 0x0203040506000102ULL;
1582 		writeq(val64, &bar0->rx_w_round_robin_2);
1583 		val64 = 0x0304050600010203ULL;
1584 		writeq(val64, &bar0->rx_w_round_robin_3);
1585 		val64 = 0x0405060000000000ULL;
1586 		writeq(val64, &bar0->rx_w_round_robin_4);
1587 
1588 		val64 = 0x8080402010080402ULL;
1589 		writeq(val64, &bar0->rts_qos_steering);
1590 		break;
1591 	case 8:
1592 		val64 = 0x0001020304050607ULL;
1593 		writeq(val64, &bar0->rx_w_round_robin_0);
1594 		writeq(val64, &bar0->rx_w_round_robin_1);
1595 		writeq(val64, &bar0->rx_w_round_robin_2);
1596 		writeq(val64, &bar0->rx_w_round_robin_3);
1597 		val64 = 0x0001020300000000ULL;
1598 		writeq(val64, &bar0->rx_w_round_robin_4);
1599 
1600 		val64 = 0x8040201008040201ULL;
1601 		writeq(val64, &bar0->rts_qos_steering);
1602 		break;
1603 	}
1604 
1605 	/* UDP Fix */
1606 	val64 = 0;
1607 	for (i = 0; i < 8; i++)
1608 		writeq(val64, &bar0->rts_frm_len_n[i]);
1609 
1610 	/* Set the default rts frame length for the rings configured */
1611 	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1612 	for (i = 0 ; i < config->rx_ring_num ; i++)
1613 		writeq(val64, &bar0->rts_frm_len_n[i]);
1614 
1615 	/* Set the frame length for the configured rings
1616 	 * desired by the user
1617 	 */
1618 	for (i = 0; i < config->rx_ring_num; i++) {
1619 		/* If rts_frm_len[i] == 0 then it is assumed that user not
1620 		 * specified frame length steering.
1621 		 * If the user provides the frame length then program
1622 		 * the rts_frm_len register for those values or else
1623 		 * leave it as it is.
1624 		 */
1625 		if (rts_frm_len[i] != 0) {
1626 			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1627 			       &bar0->rts_frm_len_n[i]);
1628 		}
1629 	}
1630 
1631 	/* Disable differentiated services steering logic */
1632 	for (i = 0; i < 64; i++) {
1633 		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1634 			DBG_PRINT(ERR_DBG,
1635 				  "%s: rts_ds_steer failed on codepoint %d\n",
1636 				  dev->name, i);
1637 			return -ENODEV;
1638 		}
1639 	}
1640 
1641 	/* Program statistics memory */
1642 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1643 
1644 	if (nic->device_type == XFRAME_II_DEVICE) {
1645 		val64 = STAT_BC(0x320);
1646 		writeq(val64, &bar0->stat_byte_cnt);
1647 	}
1648 
1649 	/*
1650 	 * Initializing the sampling rate for the device to calculate the
1651 	 * bandwidth utilization.
1652 	 */
1653 	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1654 		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1655 	writeq(val64, &bar0->mac_link_util);
1656 
1657 	/*
1658 	 * Initializing the Transmit and Receive Traffic Interrupt
1659 	 * Scheme.
1660 	 */
1661 
1662 	/* Initialize TTI */
1663 	if (SUCCESS != init_tti(nic, nic->last_link_state))
1664 		return -ENODEV;
1665 
1666 	/* RTI Initialization */
1667 	if (nic->device_type == XFRAME_II_DEVICE) {
1668 		/*
1669 		 * Programmed to generate Apprx 500 Intrs per
1670 		 * second
1671 		 */
1672 		int count = (nic->config.bus_speed * 125)/4;
1673 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1674 	} else
1675 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1676 	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1677 		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1678 		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1679 		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1680 
1681 	writeq(val64, &bar0->rti_data1_mem);
1682 
1683 	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1684 		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1685 	if (nic->config.intr_type == MSI_X)
1686 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1687 			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1688 	else
1689 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1690 			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1691 	writeq(val64, &bar0->rti_data2_mem);
1692 
1693 	for (i = 0; i < config->rx_ring_num; i++) {
1694 		val64 = RTI_CMD_MEM_WE |
1695 			RTI_CMD_MEM_STROBE_NEW_CMD |
1696 			RTI_CMD_MEM_OFFSET(i);
1697 		writeq(val64, &bar0->rti_command_mem);
1698 
1699 		/*
1700 		 * Once the operation completes, the Strobe bit of the
1701 		 * command register will be reset. We poll for this
1702 		 * particular condition. We wait for a maximum of 500ms
1703 		 * for the operation to complete, if it's not complete
1704 		 * by then we return error.
1705 		 */
1706 		time = 0;
1707 		while (true) {
1708 			val64 = readq(&bar0->rti_command_mem);
1709 			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1710 				break;
1711 
1712 			if (time > 10) {
1713 				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1714 					  dev->name);
1715 				return -ENODEV;
1716 			}
1717 			time++;
1718 			msleep(50);
1719 		}
1720 	}
1721 
1722 	/*
1723 	 * Initializing proper values as Pause threshold into all
1724 	 * the 8 Queues on Rx side.
1725 	 */
1726 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1727 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1728 
1729 	/* Disable RMAC PAD STRIPPING */
1730 	add = &bar0->mac_cfg;
1731 	val64 = readq(&bar0->mac_cfg);
1732 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1733 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1734 	writel((u32) (val64), add);
1735 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1736 	writel((u32) (val64 >> 32), (add + 4));
1737 	val64 = readq(&bar0->mac_cfg);
1738 
1739 	/* Enable FCS stripping by adapter */
1740 	add = &bar0->mac_cfg;
1741 	val64 = readq(&bar0->mac_cfg);
1742 	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1743 	if (nic->device_type == XFRAME_II_DEVICE)
1744 		writeq(val64, &bar0->mac_cfg);
1745 	else {
1746 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1747 		writel((u32) (val64), add);
1748 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1749 		writel((u32) (val64 >> 32), (add + 4));
1750 	}
1751 
1752 	/*
1753 	 * Set the time value to be inserted in the pause frame
1754 	 * generated by xena.
1755 	 */
1756 	val64 = readq(&bar0->rmac_pause_cfg);
1757 	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1758 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1759 	writeq(val64, &bar0->rmac_pause_cfg);
1760 
1761 	/*
1762 	 * Set the Threshold Limit for Generating the pause frame
1763 	 * If the amount of data in any Queue exceeds ratio of
1764 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1765 	 * pause frame is generated
1766 	 */
1767 	val64 = 0;
1768 	for (i = 0; i < 4; i++) {
1769 		val64 |= (((u64)0xFF00 |
1770 			   nic->mac_control.mc_pause_threshold_q0q3)
1771 			  << (i * 2 * 8));
1772 	}
1773 	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1774 
1775 	val64 = 0;
1776 	for (i = 0; i < 4; i++) {
1777 		val64 |= (((u64)0xFF00 |
1778 			   nic->mac_control.mc_pause_threshold_q4q7)
1779 			  << (i * 2 * 8));
1780 	}
1781 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1782 
1783 	/*
1784 	 * TxDMA will stop Read request if the number of read split has
1785 	 * exceeded the limit pointed by shared_splits
1786 	 */
1787 	val64 = readq(&bar0->pic_control);
1788 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1789 	writeq(val64, &bar0->pic_control);
1790 
1791 	if (nic->config.bus_speed == 266) {
1792 		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1793 		writeq(0x0, &bar0->read_retry_delay);
1794 		writeq(0x0, &bar0->write_retry_delay);
1795 	}
1796 
1797 	/*
1798 	 * Programming the Herc to split every write transaction
1799 	 * that does not start on an ADB to reduce disconnects.
1800 	 */
1801 	if (nic->device_type == XFRAME_II_DEVICE) {
1802 		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1803 			MISC_LINK_STABILITY_PRD(3);
1804 		writeq(val64, &bar0->misc_control);
1805 		val64 = readq(&bar0->pic_control2);
1806 		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1807 		writeq(val64, &bar0->pic_control2);
1808 	}
1809 	if (strstr(nic->product_name, "CX4")) {
1810 		val64 = TMAC_AVG_IPG(0x17);
1811 		writeq(val64, &bar0->tmac_avg_ipg);
1812 	}
1813 
1814 	return SUCCESS;
1815 }
1816 #define LINK_UP_DOWN_INTERRUPT		1
1817 #define MAC_RMAC_ERR_TIMER		2
1818 
1819 static int s2io_link_fault_indication(struct s2io_nic *nic)
1820 {
1821 	if (nic->device_type == XFRAME_II_DEVICE)
1822 		return LINK_UP_DOWN_INTERRUPT;
1823 	else
1824 		return MAC_RMAC_ERR_TIMER;
1825 }
1826 
1827 /**
1828  *  do_s2io_write_bits -  update alarm bits in alarm register
1829  *  @value: alarm bits
1830  *  @flag: interrupt status
1831  *  @addr: address value
1832  *  Description: update alarm bits in alarm register
1833  *  Return Value:
1834  *  NONE.
1835  */
1836 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1837 {
1838 	u64 temp64;
1839 
1840 	temp64 = readq(addr);
1841 
1842 	if (flag == ENABLE_INTRS)
1843 		temp64 &= ~((u64)value);
1844 	else
1845 		temp64 |= ((u64)value);
1846 	writeq(temp64, addr);
1847 }
1848 
1849 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1850 {
1851 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1852 	register u64 gen_int_mask = 0;
1853 	u64 interruptible;
1854 
1855 	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1856 	if (mask & TX_DMA_INTR) {
1857 		gen_int_mask |= TXDMA_INT_M;
1858 
1859 		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1860 				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1861 				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1862 				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1863 
1864 		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1865 				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1866 				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1867 				   &bar0->pfc_err_mask);
1868 
1869 		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1870 				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1871 				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1872 
1873 		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1874 				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1875 				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1876 				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1877 				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1878 				   PCC_TXB_ECC_SG_ERR,
1879 				   flag, &bar0->pcc_err_mask);
1880 
1881 		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1882 				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1883 
1884 		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1885 				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1886 				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1887 				   flag, &bar0->lso_err_mask);
1888 
1889 		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1890 				   flag, &bar0->tpa_err_mask);
1891 
1892 		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1893 	}
1894 
1895 	if (mask & TX_MAC_INTR) {
1896 		gen_int_mask |= TXMAC_INT_M;
1897 		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1898 				   &bar0->mac_int_mask);
1899 		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1900 				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1901 				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1902 				   flag, &bar0->mac_tmac_err_mask);
1903 	}
1904 
1905 	if (mask & TX_XGXS_INTR) {
1906 		gen_int_mask |= TXXGXS_INT_M;
1907 		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1908 				   &bar0->xgxs_int_mask);
1909 		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1910 				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1911 				   flag, &bar0->xgxs_txgxs_err_mask);
1912 	}
1913 
1914 	if (mask & RX_DMA_INTR) {
1915 		gen_int_mask |= RXDMA_INT_M;
1916 		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1917 				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1918 				   flag, &bar0->rxdma_int_mask);
1919 		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1920 				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1921 				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1922 				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1923 		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1924 				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1925 				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1926 				   &bar0->prc_pcix_err_mask);
1927 		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1928 				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1929 				   &bar0->rpa_err_mask);
1930 		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1931 				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1932 				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1933 				   RDA_FRM_ECC_SG_ERR |
1934 				   RDA_MISC_ERR|RDA_PCIX_ERR,
1935 				   flag, &bar0->rda_err_mask);
1936 		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1937 				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1938 				   flag, &bar0->rti_err_mask);
1939 	}
1940 
1941 	if (mask & RX_MAC_INTR) {
1942 		gen_int_mask |= RXMAC_INT_M;
1943 		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1944 				   &bar0->mac_int_mask);
1945 		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1946 				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1947 				 RMAC_DOUBLE_ECC_ERR);
1948 		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1949 			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1950 		do_s2io_write_bits(interruptible,
1951 				   flag, &bar0->mac_rmac_err_mask);
1952 	}
1953 
1954 	if (mask & RX_XGXS_INTR) {
1955 		gen_int_mask |= RXXGXS_INT_M;
1956 		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1957 				   &bar0->xgxs_int_mask);
1958 		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1959 				   &bar0->xgxs_rxgxs_err_mask);
1960 	}
1961 
1962 	if (mask & MC_INTR) {
1963 		gen_int_mask |= MC_INT_M;
1964 		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1965 				   flag, &bar0->mc_int_mask);
1966 		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1967 				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1968 				   &bar0->mc_err_mask);
1969 	}
1970 	nic->general_int_mask = gen_int_mask;
1971 
1972 	/* Remove this line when alarm interrupts are enabled */
1973 	nic->general_int_mask = 0;
1974 }
1975 
1976 /**
1977  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1978  *  @nic: device private variable,
1979  *  @mask: A mask indicating which Intr block must be modified and,
1980  *  @flag: A flag indicating whether to enable or disable the Intrs.
1981  *  Description: This function will either disable or enable the interrupts
1982  *  depending on the flag argument. The mask argument can be used to
1983  *  enable/disable any Intr block.
1984  *  Return Value: NONE.
1985  */
1986 
1987 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1988 {
1989 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1990 	register u64 temp64 = 0, intr_mask = 0;
1991 
1992 	intr_mask = nic->general_int_mask;
1993 
1994 	/*  Top level interrupt classification */
1995 	/*  PIC Interrupts */
1996 	if (mask & TX_PIC_INTR) {
1997 		/*  Enable PIC Intrs in the general intr mask register */
1998 		intr_mask |= TXPIC_INT_M;
1999 		if (flag == ENABLE_INTRS) {
2000 			/*
2001 			 * If Hercules adapter enable GPIO otherwise
2002 			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2003 			 * interrupts for now.
2004 			 * TODO
2005 			 */
2006 			if (s2io_link_fault_indication(nic) ==
2007 			    LINK_UP_DOWN_INTERRUPT) {
2008 				do_s2io_write_bits(PIC_INT_GPIO, flag,
2009 						   &bar0->pic_int_mask);
2010 				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2011 						   &bar0->gpio_int_mask);
2012 			} else
2013 				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2014 		} else if (flag == DISABLE_INTRS) {
2015 			/*
2016 			 * Disable PIC Intrs in the general
2017 			 * intr mask register
2018 			 */
2019 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2020 		}
2021 	}
2022 
2023 	/*  Tx traffic interrupts */
2024 	if (mask & TX_TRAFFIC_INTR) {
2025 		intr_mask |= TXTRAFFIC_INT_M;
2026 		if (flag == ENABLE_INTRS) {
2027 			/*
2028 			 * Enable all the Tx side interrupts
2029 			 * writing 0 Enables all 64 TX interrupt levels
2030 			 */
2031 			writeq(0x0, &bar0->tx_traffic_mask);
2032 		} else if (flag == DISABLE_INTRS) {
2033 			/*
2034 			 * Disable Tx Traffic Intrs in the general intr mask
2035 			 * register.
2036 			 */
2037 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2038 		}
2039 	}
2040 
2041 	/*  Rx traffic interrupts */
2042 	if (mask & RX_TRAFFIC_INTR) {
2043 		intr_mask |= RXTRAFFIC_INT_M;
2044 		if (flag == ENABLE_INTRS) {
2045 			/* writing 0 Enables all 8 RX interrupt levels */
2046 			writeq(0x0, &bar0->rx_traffic_mask);
2047 		} else if (flag == DISABLE_INTRS) {
2048 			/*
2049 			 * Disable Rx Traffic Intrs in the general intr mask
2050 			 * register.
2051 			 */
2052 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2053 		}
2054 	}
2055 
2056 	temp64 = readq(&bar0->general_int_mask);
2057 	if (flag == ENABLE_INTRS)
2058 		temp64 &= ~((u64)intr_mask);
2059 	else
2060 		temp64 = DISABLE_ALL_INTRS;
2061 	writeq(temp64, &bar0->general_int_mask);
2062 
2063 	nic->general_int_mask = readq(&bar0->general_int_mask);
2064 }
2065 
2066 /**
2067  *  verify_pcc_quiescent- Checks for PCC quiescent state
2068  *  Return: 1 If PCC is quiescence
2069  *          0 If PCC is not quiescence
2070  */
2071 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2072 {
2073 	int ret = 0, herc;
2074 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2075 	u64 val64 = readq(&bar0->adapter_status);
2076 
2077 	herc = (sp->device_type == XFRAME_II_DEVICE);
2078 
2079 	if (flag == false) {
2080 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2081 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2082 				ret = 1;
2083 		} else {
2084 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2085 				ret = 1;
2086 		}
2087 	} else {
2088 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2089 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2090 			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2091 				ret = 1;
2092 		} else {
2093 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2094 			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2095 				ret = 1;
2096 		}
2097 	}
2098 
2099 	return ret;
2100 }
2101 /**
2102  *  verify_xena_quiescence - Checks whether the H/W is ready
2103  *  Description: Returns whether the H/W is ready to go or not. Depending
2104  *  on whether adapter enable bit was written or not the comparison
2105  *  differs and the calling function passes the input argument flag to
2106  *  indicate this.
2107  *  Return: 1 If xena is quiescence
2108  *          0 If Xena is not quiescence
2109  */
2110 
2111 static int verify_xena_quiescence(struct s2io_nic *sp)
2112 {
2113 	int  mode;
2114 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2115 	u64 val64 = readq(&bar0->adapter_status);
2116 	mode = s2io_verify_pci_mode(sp);
2117 
2118 	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2119 		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2120 		return 0;
2121 	}
2122 	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2123 		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2124 		return 0;
2125 	}
2126 	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2127 		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2128 		return 0;
2129 	}
2130 	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2131 		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2132 		return 0;
2133 	}
2134 	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2135 		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2136 		return 0;
2137 	}
2138 	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2139 		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2140 		return 0;
2141 	}
2142 	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2143 		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2144 		return 0;
2145 	}
2146 	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2147 		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2148 		return 0;
2149 	}
2150 
2151 	/*
2152 	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2153 	 * the the P_PLL_LOCK bit in the adapter_status register will
2154 	 * not be asserted.
2155 	 */
2156 	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2157 	    sp->device_type == XFRAME_II_DEVICE &&
2158 	    mode != PCI_MODE_PCI_33) {
2159 		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2160 		return 0;
2161 	}
2162 	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2163 	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2164 		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2165 		return 0;
2166 	}
2167 	return 1;
2168 }
2169 
2170 /**
2171  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2172  * @sp: Pointer to device specifc structure
2173  * Description :
2174  * New procedure to clear mac address reading  problems on Alpha platforms
2175  *
2176  */
2177 
2178 static void fix_mac_address(struct s2io_nic *sp)
2179 {
2180 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2181 	int i = 0;
2182 
2183 	while (fix_mac[i] != END_SIGN) {
2184 		writeq(fix_mac[i++], &bar0->gpio_control);
2185 		udelay(10);
2186 		(void) readq(&bar0->gpio_control);
2187 	}
2188 }
2189 
2190 /**
2191  *  start_nic - Turns the device on
2192  *  @nic : device private variable.
2193  *  Description:
2194  *  This function actually turns the device on. Before this  function is
2195  *  called,all Registers are configured from their reset states
2196  *  and shared memory is allocated but the NIC is still quiescent. On
2197  *  calling this function, the device interrupts are cleared and the NIC is
2198  *  literally switched on by writing into the adapter control register.
2199  *  Return Value:
2200  *  SUCCESS on success and -1 on failure.
2201  */
2202 
2203 static int start_nic(struct s2io_nic *nic)
2204 {
2205 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2206 	struct net_device *dev = nic->dev;
2207 	register u64 val64 = 0;
2208 	u16 subid, i;
2209 	struct config_param *config = &nic->config;
2210 	struct mac_info *mac_control = &nic->mac_control;
2211 
2212 	/*  PRC Initialization and configuration */
2213 	for (i = 0; i < config->rx_ring_num; i++) {
2214 		struct ring_info *ring = &mac_control->rings[i];
2215 
2216 		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2217 		       &bar0->prc_rxd0_n[i]);
2218 
2219 		val64 = readq(&bar0->prc_ctrl_n[i]);
2220 		if (nic->rxd_mode == RXD_MODE_1)
2221 			val64 |= PRC_CTRL_RC_ENABLED;
2222 		else
2223 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2224 		if (nic->device_type == XFRAME_II_DEVICE)
2225 			val64 |= PRC_CTRL_GROUP_READS;
2226 		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2227 		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2228 		writeq(val64, &bar0->prc_ctrl_n[i]);
2229 	}
2230 
2231 	if (nic->rxd_mode == RXD_MODE_3B) {
2232 		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2233 		val64 = readq(&bar0->rx_pa_cfg);
2234 		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2235 		writeq(val64, &bar0->rx_pa_cfg);
2236 	}
2237 
2238 	if (vlan_tag_strip == 0) {
2239 		val64 = readq(&bar0->rx_pa_cfg);
2240 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2241 		writeq(val64, &bar0->rx_pa_cfg);
2242 		nic->vlan_strip_flag = 0;
2243 	}
2244 
2245 	/*
2246 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2247 	 * for around 100ms, which is approximately the time required
2248 	 * for the device to be ready for operation.
2249 	 */
2250 	val64 = readq(&bar0->mc_rldram_mrs);
2251 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2252 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2253 	val64 = readq(&bar0->mc_rldram_mrs);
2254 
2255 	msleep(100);	/* Delay by around 100 ms. */
2256 
2257 	/* Enabling ECC Protection. */
2258 	val64 = readq(&bar0->adapter_control);
2259 	val64 &= ~ADAPTER_ECC_EN;
2260 	writeq(val64, &bar0->adapter_control);
2261 
2262 	/*
2263 	 * Verify if the device is ready to be enabled, if so enable
2264 	 * it.
2265 	 */
2266 	val64 = readq(&bar0->adapter_status);
2267 	if (!verify_xena_quiescence(nic)) {
2268 		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2269 			  "Adapter status reads: 0x%llx\n",
2270 			  dev->name, (unsigned long long)val64);
2271 		return FAILURE;
2272 	}
2273 
2274 	/*
2275 	 * With some switches, link might be already up at this point.
2276 	 * Because of this weird behavior, when we enable laser,
2277 	 * we may not get link. We need to handle this. We cannot
2278 	 * figure out which switch is misbehaving. So we are forced to
2279 	 * make a global change.
2280 	 */
2281 
2282 	/* Enabling Laser. */
2283 	val64 = readq(&bar0->adapter_control);
2284 	val64 |= ADAPTER_EOI_TX_ON;
2285 	writeq(val64, &bar0->adapter_control);
2286 
2287 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2288 		/*
2289 		 * Dont see link state interrupts initially on some switches,
2290 		 * so directly scheduling the link state task here.
2291 		 */
2292 		schedule_work(&nic->set_link_task);
2293 	}
2294 	/* SXE-002: Initialize link and activity LED */
2295 	subid = nic->pdev->subsystem_device;
2296 	if (((subid & 0xFF) >= 0x07) &&
2297 	    (nic->device_type == XFRAME_I_DEVICE)) {
2298 		val64 = readq(&bar0->gpio_control);
2299 		val64 |= 0x0000800000000000ULL;
2300 		writeq(val64, &bar0->gpio_control);
2301 		val64 = 0x0411040400000000ULL;
2302 		writeq(val64, (void __iomem *)bar0 + 0x2700);
2303 	}
2304 
2305 	return SUCCESS;
2306 }
2307 /**
2308  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2309  */
2310 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2311 					struct TxD *txdlp, int get_off)
2312 {
2313 	struct s2io_nic *nic = fifo_data->nic;
2314 	struct sk_buff *skb;
2315 	struct TxD *txds;
2316 	u16 j, frg_cnt;
2317 
2318 	txds = txdlp;
2319 	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2320 		pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2321 				 sizeof(u64), PCI_DMA_TODEVICE);
2322 		txds++;
2323 	}
2324 
2325 	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2326 	if (!skb) {
2327 		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2328 		return NULL;
2329 	}
2330 	pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2331 			 skb_headlen(skb), PCI_DMA_TODEVICE);
2332 	frg_cnt = skb_shinfo(skb)->nr_frags;
2333 	if (frg_cnt) {
2334 		txds++;
2335 		for (j = 0; j < frg_cnt; j++, txds++) {
2336 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2337 			if (!txds->Buffer_Pointer)
2338 				break;
2339 			pci_unmap_page(nic->pdev,
2340 				       (dma_addr_t)txds->Buffer_Pointer,
2341 				       skb_frag_size(frag), PCI_DMA_TODEVICE);
2342 		}
2343 	}
2344 	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2345 	return skb;
2346 }
2347 
2348 /**
2349  *  free_tx_buffers - Free all queued Tx buffers
2350  *  @nic : device private variable.
2351  *  Description:
2352  *  Free all queued Tx buffers.
2353  *  Return Value: void
2354  */
2355 
2356 static void free_tx_buffers(struct s2io_nic *nic)
2357 {
2358 	struct net_device *dev = nic->dev;
2359 	struct sk_buff *skb;
2360 	struct TxD *txdp;
2361 	int i, j;
2362 	int cnt = 0;
2363 	struct config_param *config = &nic->config;
2364 	struct mac_info *mac_control = &nic->mac_control;
2365 	struct stat_block *stats = mac_control->stats_info;
2366 	struct swStat *swstats = &stats->sw_stat;
2367 
2368 	for (i = 0; i < config->tx_fifo_num; i++) {
2369 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2370 		struct fifo_info *fifo = &mac_control->fifos[i];
2371 		unsigned long flags;
2372 
2373 		spin_lock_irqsave(&fifo->tx_lock, flags);
2374 		for (j = 0; j < tx_cfg->fifo_len; j++) {
2375 			txdp = fifo->list_info[j].list_virt_addr;
2376 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2377 			if (skb) {
2378 				swstats->mem_freed += skb->truesize;
2379 				dev_kfree_skb(skb);
2380 				cnt++;
2381 			}
2382 		}
2383 		DBG_PRINT(INTR_DBG,
2384 			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2385 			  dev->name, cnt, i);
2386 		fifo->tx_curr_get_info.offset = 0;
2387 		fifo->tx_curr_put_info.offset = 0;
2388 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2389 	}
2390 }
2391 
2392 /**
2393  *   stop_nic -  To stop the nic
2394  *   @nic ; device private variable.
2395  *   Description:
2396  *   This function does exactly the opposite of what the start_nic()
2397  *   function does. This function is called to stop the device.
2398  *   Return Value:
2399  *   void.
2400  */
2401 
2402 static void stop_nic(struct s2io_nic *nic)
2403 {
2404 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2405 	register u64 val64 = 0;
2406 	u16 interruptible;
2407 
2408 	/*  Disable all interrupts */
2409 	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2410 	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2411 	interruptible |= TX_PIC_INTR;
2412 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2413 
2414 	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2415 	val64 = readq(&bar0->adapter_control);
2416 	val64 &= ~(ADAPTER_CNTL_EN);
2417 	writeq(val64, &bar0->adapter_control);
2418 }
2419 
2420 /**
2421  *  fill_rx_buffers - Allocates the Rx side skbs
2422  *  @ring_info: per ring structure
2423  *  @from_card_up: If this is true, we will map the buffer to get
2424  *     the dma address for buf0 and buf1 to give it to the card.
2425  *     Else we will sync the already mapped buffer to give it to the card.
2426  *  Description:
2427  *  The function allocates Rx side skbs and puts the physical
2428  *  address of these buffers into the RxD buffer pointers, so that the NIC
2429  *  can DMA the received frame into these locations.
2430  *  The NIC supports 3 receive modes, viz
2431  *  1. single buffer,
2432  *  2. three buffer and
2433  *  3. Five buffer modes.
2434  *  Each mode defines how many fragments the received frame will be split
2435  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2436  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2437  *  is split into 3 fragments. As of now only single buffer mode is
2438  *  supported.
2439  *   Return Value:
2440  *  SUCCESS on success or an appropriate -ve value on failure.
2441  */
2442 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2443 			   int from_card_up)
2444 {
2445 	struct sk_buff *skb;
2446 	struct RxD_t *rxdp;
2447 	int off, size, block_no, block_no1;
2448 	u32 alloc_tab = 0;
2449 	u32 alloc_cnt;
2450 	u64 tmp;
2451 	struct buffAdd *ba;
2452 	struct RxD_t *first_rxdp = NULL;
2453 	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2454 	struct RxD1 *rxdp1;
2455 	struct RxD3 *rxdp3;
2456 	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2457 
2458 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2459 
2460 	block_no1 = ring->rx_curr_get_info.block_index;
2461 	while (alloc_tab < alloc_cnt) {
2462 		block_no = ring->rx_curr_put_info.block_index;
2463 
2464 		off = ring->rx_curr_put_info.offset;
2465 
2466 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2467 
2468 		if ((block_no == block_no1) &&
2469 		    (off == ring->rx_curr_get_info.offset) &&
2470 		    (rxdp->Host_Control)) {
2471 			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2472 				  ring->dev->name);
2473 			goto end;
2474 		}
2475 		if (off && (off == ring->rxd_count)) {
2476 			ring->rx_curr_put_info.block_index++;
2477 			if (ring->rx_curr_put_info.block_index ==
2478 			    ring->block_count)
2479 				ring->rx_curr_put_info.block_index = 0;
2480 			block_no = ring->rx_curr_put_info.block_index;
2481 			off = 0;
2482 			ring->rx_curr_put_info.offset = off;
2483 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2484 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2485 				  ring->dev->name, rxdp);
2486 
2487 		}
2488 
2489 		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2490 		    ((ring->rxd_mode == RXD_MODE_3B) &&
2491 		     (rxdp->Control_2 & s2BIT(0)))) {
2492 			ring->rx_curr_put_info.offset = off;
2493 			goto end;
2494 		}
2495 		/* calculate size of skb based on ring mode */
2496 		size = ring->mtu +
2497 			HEADER_ETHERNET_II_802_3_SIZE +
2498 			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2499 		if (ring->rxd_mode == RXD_MODE_1)
2500 			size += NET_IP_ALIGN;
2501 		else
2502 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2503 
2504 		/* allocate skb */
2505 		skb = netdev_alloc_skb(nic->dev, size);
2506 		if (!skb) {
2507 			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2508 				  ring->dev->name);
2509 			if (first_rxdp) {
2510 				dma_wmb();
2511 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2512 			}
2513 			swstats->mem_alloc_fail_cnt++;
2514 
2515 			return -ENOMEM ;
2516 		}
2517 		swstats->mem_allocated += skb->truesize;
2518 
2519 		if (ring->rxd_mode == RXD_MODE_1) {
2520 			/* 1 buffer mode - normal operation mode */
2521 			rxdp1 = (struct RxD1 *)rxdp;
2522 			memset(rxdp, 0, sizeof(struct RxD1));
2523 			skb_reserve(skb, NET_IP_ALIGN);
2524 			rxdp1->Buffer0_ptr =
2525 				pci_map_single(ring->pdev, skb->data,
2526 					       size - NET_IP_ALIGN,
2527 					       PCI_DMA_FROMDEVICE);
2528 			if (pci_dma_mapping_error(nic->pdev,
2529 						  rxdp1->Buffer0_ptr))
2530 				goto pci_map_failed;
2531 
2532 			rxdp->Control_2 =
2533 				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2534 			rxdp->Host_Control = (unsigned long)skb;
2535 		} else if (ring->rxd_mode == RXD_MODE_3B) {
2536 			/*
2537 			 * 2 buffer mode -
2538 			 * 2 buffer mode provides 128
2539 			 * byte aligned receive buffers.
2540 			 */
2541 
2542 			rxdp3 = (struct RxD3 *)rxdp;
2543 			/* save buffer pointers to avoid frequent dma mapping */
2544 			Buffer0_ptr = rxdp3->Buffer0_ptr;
2545 			Buffer1_ptr = rxdp3->Buffer1_ptr;
2546 			memset(rxdp, 0, sizeof(struct RxD3));
2547 			/* restore the buffer pointers for dma sync*/
2548 			rxdp3->Buffer0_ptr = Buffer0_ptr;
2549 			rxdp3->Buffer1_ptr = Buffer1_ptr;
2550 
2551 			ba = &ring->ba[block_no][off];
2552 			skb_reserve(skb, BUF0_LEN);
2553 			tmp = (u64)(unsigned long)skb->data;
2554 			tmp += ALIGN_SIZE;
2555 			tmp &= ~ALIGN_SIZE;
2556 			skb->data = (void *) (unsigned long)tmp;
2557 			skb_reset_tail_pointer(skb);
2558 
2559 			if (from_card_up) {
2560 				rxdp3->Buffer0_ptr =
2561 					pci_map_single(ring->pdev, ba->ba_0,
2562 						       BUF0_LEN,
2563 						       PCI_DMA_FROMDEVICE);
2564 				if (pci_dma_mapping_error(nic->pdev,
2565 							  rxdp3->Buffer0_ptr))
2566 					goto pci_map_failed;
2567 			} else
2568 				pci_dma_sync_single_for_device(ring->pdev,
2569 							       (dma_addr_t)rxdp3->Buffer0_ptr,
2570 							       BUF0_LEN,
2571 							       PCI_DMA_FROMDEVICE);
2572 
2573 			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2574 			if (ring->rxd_mode == RXD_MODE_3B) {
2575 				/* Two buffer mode */
2576 
2577 				/*
2578 				 * Buffer2 will have L3/L4 header plus
2579 				 * L4 payload
2580 				 */
2581 				rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2582 								    skb->data,
2583 								    ring->mtu + 4,
2584 								    PCI_DMA_FROMDEVICE);
2585 
2586 				if (pci_dma_mapping_error(nic->pdev,
2587 							  rxdp3->Buffer2_ptr))
2588 					goto pci_map_failed;
2589 
2590 				if (from_card_up) {
2591 					rxdp3->Buffer1_ptr =
2592 						pci_map_single(ring->pdev,
2593 							       ba->ba_1,
2594 							       BUF1_LEN,
2595 							       PCI_DMA_FROMDEVICE);
2596 
2597 					if (pci_dma_mapping_error(nic->pdev,
2598 								  rxdp3->Buffer1_ptr)) {
2599 						pci_unmap_single(ring->pdev,
2600 								 (dma_addr_t)(unsigned long)
2601 								 skb->data,
2602 								 ring->mtu + 4,
2603 								 PCI_DMA_FROMDEVICE);
2604 						goto pci_map_failed;
2605 					}
2606 				}
2607 				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2608 				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2609 					(ring->mtu + 4);
2610 			}
2611 			rxdp->Control_2 |= s2BIT(0);
2612 			rxdp->Host_Control = (unsigned long) (skb);
2613 		}
2614 		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2615 			rxdp->Control_1 |= RXD_OWN_XENA;
2616 		off++;
2617 		if (off == (ring->rxd_count + 1))
2618 			off = 0;
2619 		ring->rx_curr_put_info.offset = off;
2620 
2621 		rxdp->Control_2 |= SET_RXD_MARKER;
2622 		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2623 			if (first_rxdp) {
2624 				dma_wmb();
2625 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2626 			}
2627 			first_rxdp = rxdp;
2628 		}
2629 		ring->rx_bufs_left += 1;
2630 		alloc_tab++;
2631 	}
2632 
2633 end:
2634 	/* Transfer ownership of first descriptor to adapter just before
2635 	 * exiting. Before that, use memory barrier so that ownership
2636 	 * and other fields are seen by adapter correctly.
2637 	 */
2638 	if (first_rxdp) {
2639 		dma_wmb();
2640 		first_rxdp->Control_1 |= RXD_OWN_XENA;
2641 	}
2642 
2643 	return SUCCESS;
2644 
2645 pci_map_failed:
2646 	swstats->pci_map_fail_cnt++;
2647 	swstats->mem_freed += skb->truesize;
2648 	dev_kfree_skb_irq(skb);
2649 	return -ENOMEM;
2650 }
2651 
2652 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2653 {
2654 	struct net_device *dev = sp->dev;
2655 	int j;
2656 	struct sk_buff *skb;
2657 	struct RxD_t *rxdp;
2658 	struct RxD1 *rxdp1;
2659 	struct RxD3 *rxdp3;
2660 	struct mac_info *mac_control = &sp->mac_control;
2661 	struct stat_block *stats = mac_control->stats_info;
2662 	struct swStat *swstats = &stats->sw_stat;
2663 
2664 	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2665 		rxdp = mac_control->rings[ring_no].
2666 			rx_blocks[blk].rxds[j].virt_addr;
2667 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2668 		if (!skb)
2669 			continue;
2670 		if (sp->rxd_mode == RXD_MODE_1) {
2671 			rxdp1 = (struct RxD1 *)rxdp;
2672 			pci_unmap_single(sp->pdev,
2673 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2674 					 dev->mtu +
2675 					 HEADER_ETHERNET_II_802_3_SIZE +
2676 					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2677 					 PCI_DMA_FROMDEVICE);
2678 			memset(rxdp, 0, sizeof(struct RxD1));
2679 		} else if (sp->rxd_mode == RXD_MODE_3B) {
2680 			rxdp3 = (struct RxD3 *)rxdp;
2681 			pci_unmap_single(sp->pdev,
2682 					 (dma_addr_t)rxdp3->Buffer0_ptr,
2683 					 BUF0_LEN,
2684 					 PCI_DMA_FROMDEVICE);
2685 			pci_unmap_single(sp->pdev,
2686 					 (dma_addr_t)rxdp3->Buffer1_ptr,
2687 					 BUF1_LEN,
2688 					 PCI_DMA_FROMDEVICE);
2689 			pci_unmap_single(sp->pdev,
2690 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2691 					 dev->mtu + 4,
2692 					 PCI_DMA_FROMDEVICE);
2693 			memset(rxdp, 0, sizeof(struct RxD3));
2694 		}
2695 		swstats->mem_freed += skb->truesize;
2696 		dev_kfree_skb(skb);
2697 		mac_control->rings[ring_no].rx_bufs_left -= 1;
2698 	}
2699 }
2700 
2701 /**
2702  *  free_rx_buffers - Frees all Rx buffers
2703  *  @sp: device private variable.
2704  *  Description:
2705  *  This function will free all Rx buffers allocated by host.
2706  *  Return Value:
2707  *  NONE.
2708  */
2709 
2710 static void free_rx_buffers(struct s2io_nic *sp)
2711 {
2712 	struct net_device *dev = sp->dev;
2713 	int i, blk = 0, buf_cnt = 0;
2714 	struct config_param *config = &sp->config;
2715 	struct mac_info *mac_control = &sp->mac_control;
2716 
2717 	for (i = 0; i < config->rx_ring_num; i++) {
2718 		struct ring_info *ring = &mac_control->rings[i];
2719 
2720 		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2721 			free_rxd_blk(sp, i, blk);
2722 
2723 		ring->rx_curr_put_info.block_index = 0;
2724 		ring->rx_curr_get_info.block_index = 0;
2725 		ring->rx_curr_put_info.offset = 0;
2726 		ring->rx_curr_get_info.offset = 0;
2727 		ring->rx_bufs_left = 0;
2728 		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2729 			  dev->name, buf_cnt, i);
2730 	}
2731 }
2732 
2733 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2734 {
2735 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2736 		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2737 			  ring->dev->name);
2738 	}
2739 	return 0;
2740 }
2741 
2742 /**
2743  * s2io_poll - Rx interrupt handler for NAPI support
2744  * @napi : pointer to the napi structure.
2745  * @budget : The number of packets that were budgeted to be processed
2746  * during  one pass through the 'Poll" function.
2747  * Description:
2748  * Comes into picture only if NAPI support has been incorporated. It does
2749  * the same thing that rx_intr_handler does, but not in a interrupt context
2750  * also It will process only a given number of packets.
2751  * Return value:
2752  * 0 on success and 1 if there are No Rx packets to be processed.
2753  */
2754 
2755 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2756 {
2757 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2758 	struct net_device *dev = ring->dev;
2759 	int pkts_processed = 0;
2760 	u8 __iomem *addr = NULL;
2761 	u8 val8 = 0;
2762 	struct s2io_nic *nic = netdev_priv(dev);
2763 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2764 	int budget_org = budget;
2765 
2766 	if (unlikely(!is_s2io_card_up(nic)))
2767 		return 0;
2768 
2769 	pkts_processed = rx_intr_handler(ring, budget);
2770 	s2io_chk_rx_buffers(nic, ring);
2771 
2772 	if (pkts_processed < budget_org) {
2773 		napi_complete_done(napi, pkts_processed);
2774 		/*Re Enable MSI-Rx Vector*/
2775 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2776 		addr += 7 - ring->ring_no;
2777 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2778 		writeb(val8, addr);
2779 		val8 = readb(addr);
2780 	}
2781 	return pkts_processed;
2782 }
2783 
2784 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2785 {
2786 	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2787 	int pkts_processed = 0;
2788 	int ring_pkts_processed, i;
2789 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2790 	int budget_org = budget;
2791 	struct config_param *config = &nic->config;
2792 	struct mac_info *mac_control = &nic->mac_control;
2793 
2794 	if (unlikely(!is_s2io_card_up(nic)))
2795 		return 0;
2796 
2797 	for (i = 0; i < config->rx_ring_num; i++) {
2798 		struct ring_info *ring = &mac_control->rings[i];
2799 		ring_pkts_processed = rx_intr_handler(ring, budget);
2800 		s2io_chk_rx_buffers(nic, ring);
2801 		pkts_processed += ring_pkts_processed;
2802 		budget -= ring_pkts_processed;
2803 		if (budget <= 0)
2804 			break;
2805 	}
2806 	if (pkts_processed < budget_org) {
2807 		napi_complete_done(napi, pkts_processed);
2808 		/* Re enable the Rx interrupts for the ring */
2809 		writeq(0, &bar0->rx_traffic_mask);
2810 		readl(&bar0->rx_traffic_mask);
2811 	}
2812 	return pkts_processed;
2813 }
2814 
2815 #ifdef CONFIG_NET_POLL_CONTROLLER
2816 /**
2817  * s2io_netpoll - netpoll event handler entry point
2818  * @dev : pointer to the device structure.
2819  * Description:
2820  * 	This function will be called by upper layer to check for events on the
2821  * interface in situations where interrupts are disabled. It is used for
2822  * specific in-kernel networking tasks, such as remote consoles and kernel
2823  * debugging over the network (example netdump in RedHat).
2824  */
2825 static void s2io_netpoll(struct net_device *dev)
2826 {
2827 	struct s2io_nic *nic = netdev_priv(dev);
2828 	const int irq = nic->pdev->irq;
2829 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2830 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2831 	int i;
2832 	struct config_param *config = &nic->config;
2833 	struct mac_info *mac_control = &nic->mac_control;
2834 
2835 	if (pci_channel_offline(nic->pdev))
2836 		return;
2837 
2838 	disable_irq(irq);
2839 
2840 	writeq(val64, &bar0->rx_traffic_int);
2841 	writeq(val64, &bar0->tx_traffic_int);
2842 
2843 	/* we need to free up the transmitted skbufs or else netpoll will
2844 	 * run out of skbs and will fail and eventually netpoll application such
2845 	 * as netdump will fail.
2846 	 */
2847 	for (i = 0; i < config->tx_fifo_num; i++)
2848 		tx_intr_handler(&mac_control->fifos[i]);
2849 
2850 	/* check for received packet and indicate up to network */
2851 	for (i = 0; i < config->rx_ring_num; i++) {
2852 		struct ring_info *ring = &mac_control->rings[i];
2853 
2854 		rx_intr_handler(ring, 0);
2855 	}
2856 
2857 	for (i = 0; i < config->rx_ring_num; i++) {
2858 		struct ring_info *ring = &mac_control->rings[i];
2859 
2860 		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2861 			DBG_PRINT(INFO_DBG,
2862 				  "%s: Out of memory in Rx Netpoll!!\n",
2863 				  dev->name);
2864 			break;
2865 		}
2866 	}
2867 	enable_irq(irq);
2868 }
2869 #endif
2870 
2871 /**
2872  *  rx_intr_handler - Rx interrupt handler
2873  *  @ring_info: per ring structure.
2874  *  @budget: budget for napi processing.
2875  *  Description:
2876  *  If the interrupt is because of a received frame or if the
2877  *  receive ring contains fresh as yet un-processed frames,this function is
2878  *  called. It picks out the RxD at which place the last Rx processing had
2879  *  stopped and sends the skb to the OSM's Rx handler and then increments
2880  *  the offset.
2881  *  Return Value:
2882  *  No. of napi packets processed.
2883  */
2884 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2885 {
2886 	int get_block, put_block;
2887 	struct rx_curr_get_info get_info, put_info;
2888 	struct RxD_t *rxdp;
2889 	struct sk_buff *skb;
2890 	int pkt_cnt = 0, napi_pkts = 0;
2891 	int i;
2892 	struct RxD1 *rxdp1;
2893 	struct RxD3 *rxdp3;
2894 
2895 	if (budget <= 0)
2896 		return napi_pkts;
2897 
2898 	get_info = ring_data->rx_curr_get_info;
2899 	get_block = get_info.block_index;
2900 	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2901 	put_block = put_info.block_index;
2902 	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2903 
2904 	while (RXD_IS_UP2DT(rxdp)) {
2905 		/*
2906 		 * If your are next to put index then it's
2907 		 * FIFO full condition
2908 		 */
2909 		if ((get_block == put_block) &&
2910 		    (get_info.offset + 1) == put_info.offset) {
2911 			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2912 				  ring_data->dev->name);
2913 			break;
2914 		}
2915 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2916 		if (skb == NULL) {
2917 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2918 				  ring_data->dev->name);
2919 			return 0;
2920 		}
2921 		if (ring_data->rxd_mode == RXD_MODE_1) {
2922 			rxdp1 = (struct RxD1 *)rxdp;
2923 			pci_unmap_single(ring_data->pdev, (dma_addr_t)
2924 					 rxdp1->Buffer0_ptr,
2925 					 ring_data->mtu +
2926 					 HEADER_ETHERNET_II_802_3_SIZE +
2927 					 HEADER_802_2_SIZE +
2928 					 HEADER_SNAP_SIZE,
2929 					 PCI_DMA_FROMDEVICE);
2930 		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2931 			rxdp3 = (struct RxD3 *)rxdp;
2932 			pci_dma_sync_single_for_cpu(ring_data->pdev,
2933 						    (dma_addr_t)rxdp3->Buffer0_ptr,
2934 						    BUF0_LEN,
2935 						    PCI_DMA_FROMDEVICE);
2936 			pci_unmap_single(ring_data->pdev,
2937 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2938 					 ring_data->mtu + 4,
2939 					 PCI_DMA_FROMDEVICE);
2940 		}
2941 		prefetch(skb->data);
2942 		rx_osm_handler(ring_data, rxdp);
2943 		get_info.offset++;
2944 		ring_data->rx_curr_get_info.offset = get_info.offset;
2945 		rxdp = ring_data->rx_blocks[get_block].
2946 			rxds[get_info.offset].virt_addr;
2947 		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2948 			get_info.offset = 0;
2949 			ring_data->rx_curr_get_info.offset = get_info.offset;
2950 			get_block++;
2951 			if (get_block == ring_data->block_count)
2952 				get_block = 0;
2953 			ring_data->rx_curr_get_info.block_index = get_block;
2954 			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2955 		}
2956 
2957 		if (ring_data->nic->config.napi) {
2958 			budget--;
2959 			napi_pkts++;
2960 			if (!budget)
2961 				break;
2962 		}
2963 		pkt_cnt++;
2964 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2965 			break;
2966 	}
2967 	if (ring_data->lro) {
2968 		/* Clear all LRO sessions before exiting */
2969 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2970 			struct lro *lro = &ring_data->lro0_n[i];
2971 			if (lro->in_use) {
2972 				update_L3L4_header(ring_data->nic, lro);
2973 				queue_rx_frame(lro->parent, lro->vlan_tag);
2974 				clear_lro_session(lro);
2975 			}
2976 		}
2977 	}
2978 	return napi_pkts;
2979 }
2980 
2981 /**
2982  *  tx_intr_handler - Transmit interrupt handler
2983  *  @nic : device private variable
2984  *  Description:
2985  *  If an interrupt was raised to indicate DMA complete of the
2986  *  Tx packet, this function is called. It identifies the last TxD
2987  *  whose buffer was freed and frees all skbs whose data have already
2988  *  DMA'ed into the NICs internal memory.
2989  *  Return Value:
2990  *  NONE
2991  */
2992 
2993 static void tx_intr_handler(struct fifo_info *fifo_data)
2994 {
2995 	struct s2io_nic *nic = fifo_data->nic;
2996 	struct tx_curr_get_info get_info, put_info;
2997 	struct sk_buff *skb = NULL;
2998 	struct TxD *txdlp;
2999 	int pkt_cnt = 0;
3000 	unsigned long flags = 0;
3001 	u8 err_mask;
3002 	struct stat_block *stats = nic->mac_control.stats_info;
3003 	struct swStat *swstats = &stats->sw_stat;
3004 
3005 	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3006 		return;
3007 
3008 	get_info = fifo_data->tx_curr_get_info;
3009 	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3010 	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3011 	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3012 	       (get_info.offset != put_info.offset) &&
3013 	       (txdlp->Host_Control)) {
3014 		/* Check for TxD errors */
3015 		if (txdlp->Control_1 & TXD_T_CODE) {
3016 			unsigned long long err;
3017 			err = txdlp->Control_1 & TXD_T_CODE;
3018 			if (err & 0x1) {
3019 				swstats->parity_err_cnt++;
3020 			}
3021 
3022 			/* update t_code statistics */
3023 			err_mask = err >> 48;
3024 			switch (err_mask) {
3025 			case 2:
3026 				swstats->tx_buf_abort_cnt++;
3027 				break;
3028 
3029 			case 3:
3030 				swstats->tx_desc_abort_cnt++;
3031 				break;
3032 
3033 			case 7:
3034 				swstats->tx_parity_err_cnt++;
3035 				break;
3036 
3037 			case 10:
3038 				swstats->tx_link_loss_cnt++;
3039 				break;
3040 
3041 			case 15:
3042 				swstats->tx_list_proc_err_cnt++;
3043 				break;
3044 			}
3045 		}
3046 
3047 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3048 		if (skb == NULL) {
3049 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3050 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3051 				  __func__);
3052 			return;
3053 		}
3054 		pkt_cnt++;
3055 
3056 		/* Updating the statistics block */
3057 		swstats->mem_freed += skb->truesize;
3058 		dev_consume_skb_irq(skb);
3059 
3060 		get_info.offset++;
3061 		if (get_info.offset == get_info.fifo_len + 1)
3062 			get_info.offset = 0;
3063 		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3064 		fifo_data->tx_curr_get_info.offset = get_info.offset;
3065 	}
3066 
3067 	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3068 
3069 	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3070 }
3071 
3072 /**
3073  *  s2io_mdio_write - Function to write in to MDIO registers
3074  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3075  *  @addr     : address value
3076  *  @value    : data value
3077  *  @dev      : pointer to net_device structure
3078  *  Description:
3079  *  This function is used to write values to the MDIO registers
3080  *  NONE
3081  */
3082 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3083 			    struct net_device *dev)
3084 {
3085 	u64 val64;
3086 	struct s2io_nic *sp = netdev_priv(dev);
3087 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3088 
3089 	/* address transaction */
3090 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3091 		MDIO_MMD_DEV_ADDR(mmd_type) |
3092 		MDIO_MMS_PRT_ADDR(0x0);
3093 	writeq(val64, &bar0->mdio_control);
3094 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3095 	writeq(val64, &bar0->mdio_control);
3096 	udelay(100);
3097 
3098 	/* Data transaction */
3099 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3100 		MDIO_MMD_DEV_ADDR(mmd_type) |
3101 		MDIO_MMS_PRT_ADDR(0x0) |
3102 		MDIO_MDIO_DATA(value) |
3103 		MDIO_OP(MDIO_OP_WRITE_TRANS);
3104 	writeq(val64, &bar0->mdio_control);
3105 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3106 	writeq(val64, &bar0->mdio_control);
3107 	udelay(100);
3108 
3109 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3110 		MDIO_MMD_DEV_ADDR(mmd_type) |
3111 		MDIO_MMS_PRT_ADDR(0x0) |
3112 		MDIO_OP(MDIO_OP_READ_TRANS);
3113 	writeq(val64, &bar0->mdio_control);
3114 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3115 	writeq(val64, &bar0->mdio_control);
3116 	udelay(100);
3117 }
3118 
3119 /**
3120  *  s2io_mdio_read - Function to write in to MDIO registers
3121  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3122  *  @addr     : address value
3123  *  @dev      : pointer to net_device structure
3124  *  Description:
3125  *  This function is used to read values to the MDIO registers
3126  *  NONE
3127  */
3128 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3129 {
3130 	u64 val64 = 0x0;
3131 	u64 rval64 = 0x0;
3132 	struct s2io_nic *sp = netdev_priv(dev);
3133 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3134 
3135 	/* address transaction */
3136 	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3137 			 | MDIO_MMD_DEV_ADDR(mmd_type)
3138 			 | MDIO_MMS_PRT_ADDR(0x0));
3139 	writeq(val64, &bar0->mdio_control);
3140 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3141 	writeq(val64, &bar0->mdio_control);
3142 	udelay(100);
3143 
3144 	/* Data transaction */
3145 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3146 		MDIO_MMD_DEV_ADDR(mmd_type) |
3147 		MDIO_MMS_PRT_ADDR(0x0) |
3148 		MDIO_OP(MDIO_OP_READ_TRANS);
3149 	writeq(val64, &bar0->mdio_control);
3150 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3151 	writeq(val64, &bar0->mdio_control);
3152 	udelay(100);
3153 
3154 	/* Read the value from regs */
3155 	rval64 = readq(&bar0->mdio_control);
3156 	rval64 = rval64 & 0xFFFF0000;
3157 	rval64 = rval64 >> 16;
3158 	return rval64;
3159 }
3160 
3161 /**
3162  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3163  *  @counter      : counter value to be updated
3164  *  @flag         : flag to indicate the status
3165  *  @type         : counter type
3166  *  Description:
3167  *  This function is to check the status of the xpak counters value
3168  *  NONE
3169  */
3170 
3171 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3172 				  u16 flag, u16 type)
3173 {
3174 	u64 mask = 0x3;
3175 	u64 val64;
3176 	int i;
3177 	for (i = 0; i < index; i++)
3178 		mask = mask << 0x2;
3179 
3180 	if (flag > 0) {
3181 		*counter = *counter + 1;
3182 		val64 = *regs_stat & mask;
3183 		val64 = val64 >> (index * 0x2);
3184 		val64 = val64 + 1;
3185 		if (val64 == 3) {
3186 			switch (type) {
3187 			case 1:
3188 				DBG_PRINT(ERR_DBG,
3189 					  "Take Xframe NIC out of service.\n");
3190 				DBG_PRINT(ERR_DBG,
3191 "Excessive temperatures may result in premature transceiver failure.\n");
3192 				break;
3193 			case 2:
3194 				DBG_PRINT(ERR_DBG,
3195 					  "Take Xframe NIC out of service.\n");
3196 				DBG_PRINT(ERR_DBG,
3197 "Excessive bias currents may indicate imminent laser diode failure.\n");
3198 				break;
3199 			case 3:
3200 				DBG_PRINT(ERR_DBG,
3201 					  "Take Xframe NIC out of service.\n");
3202 				DBG_PRINT(ERR_DBG,
3203 "Excessive laser output power may saturate far-end receiver.\n");
3204 				break;
3205 			default:
3206 				DBG_PRINT(ERR_DBG,
3207 					  "Incorrect XPAK Alarm type\n");
3208 			}
3209 			val64 = 0x0;
3210 		}
3211 		val64 = val64 << (index * 0x2);
3212 		*regs_stat = (*regs_stat & (~mask)) | (val64);
3213 
3214 	} else {
3215 		*regs_stat = *regs_stat & (~mask);
3216 	}
3217 }
3218 
3219 /**
3220  *  s2io_updt_xpak_counter - Function to update the xpak counters
3221  *  @dev         : pointer to net_device struct
3222  *  Description:
3223  *  This function is to upate the status of the xpak counters value
3224  *  NONE
3225  */
3226 static void s2io_updt_xpak_counter(struct net_device *dev)
3227 {
3228 	u16 flag  = 0x0;
3229 	u16 type  = 0x0;
3230 	u16 val16 = 0x0;
3231 	u64 val64 = 0x0;
3232 	u64 addr  = 0x0;
3233 
3234 	struct s2io_nic *sp = netdev_priv(dev);
3235 	struct stat_block *stats = sp->mac_control.stats_info;
3236 	struct xpakStat *xstats = &stats->xpak_stat;
3237 
3238 	/* Check the communication with the MDIO slave */
3239 	addr = MDIO_CTRL1;
3240 	val64 = 0x0;
3241 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3242 	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3243 		DBG_PRINT(ERR_DBG,
3244 			  "ERR: MDIO slave access failed - Returned %llx\n",
3245 			  (unsigned long long)val64);
3246 		return;
3247 	}
3248 
3249 	/* Check for the expected value of control reg 1 */
3250 	if (val64 != MDIO_CTRL1_SPEED10G) {
3251 		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3252 			  "Returned: %llx- Expected: 0x%x\n",
3253 			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3254 		return;
3255 	}
3256 
3257 	/* Loading the DOM register to MDIO register */
3258 	addr = 0xA100;
3259 	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3260 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3261 
3262 	/* Reading the Alarm flags */
3263 	addr = 0xA070;
3264 	val64 = 0x0;
3265 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3266 
3267 	flag = CHECKBIT(val64, 0x7);
3268 	type = 1;
3269 	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3270 			      &xstats->xpak_regs_stat,
3271 			      0x0, flag, type);
3272 
3273 	if (CHECKBIT(val64, 0x6))
3274 		xstats->alarm_transceiver_temp_low++;
3275 
3276 	flag = CHECKBIT(val64, 0x3);
3277 	type = 2;
3278 	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3279 			      &xstats->xpak_regs_stat,
3280 			      0x2, flag, type);
3281 
3282 	if (CHECKBIT(val64, 0x2))
3283 		xstats->alarm_laser_bias_current_low++;
3284 
3285 	flag = CHECKBIT(val64, 0x1);
3286 	type = 3;
3287 	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3288 			      &xstats->xpak_regs_stat,
3289 			      0x4, flag, type);
3290 
3291 	if (CHECKBIT(val64, 0x0))
3292 		xstats->alarm_laser_output_power_low++;
3293 
3294 	/* Reading the Warning flags */
3295 	addr = 0xA074;
3296 	val64 = 0x0;
3297 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3298 
3299 	if (CHECKBIT(val64, 0x7))
3300 		xstats->warn_transceiver_temp_high++;
3301 
3302 	if (CHECKBIT(val64, 0x6))
3303 		xstats->warn_transceiver_temp_low++;
3304 
3305 	if (CHECKBIT(val64, 0x3))
3306 		xstats->warn_laser_bias_current_high++;
3307 
3308 	if (CHECKBIT(val64, 0x2))
3309 		xstats->warn_laser_bias_current_low++;
3310 
3311 	if (CHECKBIT(val64, 0x1))
3312 		xstats->warn_laser_output_power_high++;
3313 
3314 	if (CHECKBIT(val64, 0x0))
3315 		xstats->warn_laser_output_power_low++;
3316 }
3317 
3318 /**
3319  *  wait_for_cmd_complete - waits for a command to complete.
3320  *  @sp : private member of the device structure, which is a pointer to the
3321  *  s2io_nic structure.
3322  *  Description: Function that waits for a command to Write into RMAC
3323  *  ADDR DATA registers to be completed and returns either success or
3324  *  error depending on whether the command was complete or not.
3325  *  Return value:
3326  *   SUCCESS on success and FAILURE on failure.
3327  */
3328 
3329 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3330 				 int bit_state)
3331 {
3332 	int ret = FAILURE, cnt = 0, delay = 1;
3333 	u64 val64;
3334 
3335 	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3336 		return FAILURE;
3337 
3338 	do {
3339 		val64 = readq(addr);
3340 		if (bit_state == S2IO_BIT_RESET) {
3341 			if (!(val64 & busy_bit)) {
3342 				ret = SUCCESS;
3343 				break;
3344 			}
3345 		} else {
3346 			if (val64 & busy_bit) {
3347 				ret = SUCCESS;
3348 				break;
3349 			}
3350 		}
3351 
3352 		if (in_interrupt())
3353 			mdelay(delay);
3354 		else
3355 			msleep(delay);
3356 
3357 		if (++cnt >= 10)
3358 			delay = 50;
3359 	} while (cnt < 20);
3360 	return ret;
3361 }
3362 /**
3363  * check_pci_device_id - Checks if the device id is supported
3364  * @id : device id
3365  * Description: Function to check if the pci device id is supported by driver.
3366  * Return value: Actual device id if supported else PCI_ANY_ID
3367  */
3368 static u16 check_pci_device_id(u16 id)
3369 {
3370 	switch (id) {
3371 	case PCI_DEVICE_ID_HERC_WIN:
3372 	case PCI_DEVICE_ID_HERC_UNI:
3373 		return XFRAME_II_DEVICE;
3374 	case PCI_DEVICE_ID_S2IO_UNI:
3375 	case PCI_DEVICE_ID_S2IO_WIN:
3376 		return XFRAME_I_DEVICE;
3377 	default:
3378 		return PCI_ANY_ID;
3379 	}
3380 }
3381 
3382 /**
3383  *  s2io_reset - Resets the card.
3384  *  @sp : private member of the device structure.
3385  *  Description: Function to Reset the card. This function then also
3386  *  restores the previously saved PCI configuration space registers as
3387  *  the card reset also resets the configuration space.
3388  *  Return value:
3389  *  void.
3390  */
3391 
3392 static void s2io_reset(struct s2io_nic *sp)
3393 {
3394 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3395 	u64 val64;
3396 	u16 subid, pci_cmd;
3397 	int i;
3398 	u16 val16;
3399 	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3400 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3401 	struct stat_block *stats;
3402 	struct swStat *swstats;
3403 
3404 	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3405 		  __func__, pci_name(sp->pdev));
3406 
3407 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3408 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3409 
3410 	val64 = SW_RESET_ALL;
3411 	writeq(val64, &bar0->sw_reset);
3412 	if (strstr(sp->product_name, "CX4"))
3413 		msleep(750);
3414 	msleep(250);
3415 	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3416 
3417 		/* Restore the PCI state saved during initialization. */
3418 		pci_restore_state(sp->pdev);
3419 		pci_save_state(sp->pdev);
3420 		pci_read_config_word(sp->pdev, 0x2, &val16);
3421 		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3422 			break;
3423 		msleep(200);
3424 	}
3425 
3426 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3427 		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3428 
3429 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3430 
3431 	s2io_init_pci(sp);
3432 
3433 	/* Set swapper to enable I/O register access */
3434 	s2io_set_swapper(sp);
3435 
3436 	/* restore mac_addr entries */
3437 	do_s2io_restore_unicast_mc(sp);
3438 
3439 	/* Restore the MSIX table entries from local variables */
3440 	restore_xmsi_data(sp);
3441 
3442 	/* Clear certain PCI/PCI-X fields after reset */
3443 	if (sp->device_type == XFRAME_II_DEVICE) {
3444 		/* Clear "detected parity error" bit */
3445 		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3446 
3447 		/* Clearing PCIX Ecc status register */
3448 		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3449 
3450 		/* Clearing PCI_STATUS error reflected here */
3451 		writeq(s2BIT(62), &bar0->txpic_int_reg);
3452 	}
3453 
3454 	/* Reset device statistics maintained by OS */
3455 	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3456 
3457 	stats = sp->mac_control.stats_info;
3458 	swstats = &stats->sw_stat;
3459 
3460 	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3461 	up_cnt = swstats->link_up_cnt;
3462 	down_cnt = swstats->link_down_cnt;
3463 	up_time = swstats->link_up_time;
3464 	down_time = swstats->link_down_time;
3465 	reset_cnt = swstats->soft_reset_cnt;
3466 	mem_alloc_cnt = swstats->mem_allocated;
3467 	mem_free_cnt = swstats->mem_freed;
3468 	watchdog_cnt = swstats->watchdog_timer_cnt;
3469 
3470 	memset(stats, 0, sizeof(struct stat_block));
3471 
3472 	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3473 	swstats->link_up_cnt = up_cnt;
3474 	swstats->link_down_cnt = down_cnt;
3475 	swstats->link_up_time = up_time;
3476 	swstats->link_down_time = down_time;
3477 	swstats->soft_reset_cnt = reset_cnt;
3478 	swstats->mem_allocated = mem_alloc_cnt;
3479 	swstats->mem_freed = mem_free_cnt;
3480 	swstats->watchdog_timer_cnt = watchdog_cnt;
3481 
3482 	/* SXE-002: Configure link and activity LED to turn it off */
3483 	subid = sp->pdev->subsystem_device;
3484 	if (((subid & 0xFF) >= 0x07) &&
3485 	    (sp->device_type == XFRAME_I_DEVICE)) {
3486 		val64 = readq(&bar0->gpio_control);
3487 		val64 |= 0x0000800000000000ULL;
3488 		writeq(val64, &bar0->gpio_control);
3489 		val64 = 0x0411040400000000ULL;
3490 		writeq(val64, (void __iomem *)bar0 + 0x2700);
3491 	}
3492 
3493 	/*
3494 	 * Clear spurious ECC interrupts that would have occurred on
3495 	 * XFRAME II cards after reset.
3496 	 */
3497 	if (sp->device_type == XFRAME_II_DEVICE) {
3498 		val64 = readq(&bar0->pcc_err_reg);
3499 		writeq(val64, &bar0->pcc_err_reg);
3500 	}
3501 
3502 	sp->device_enabled_once = false;
3503 }
3504 
3505 /**
3506  *  s2io_set_swapper - to set the swapper controle on the card
3507  *  @sp : private member of the device structure,
3508  *  pointer to the s2io_nic structure.
3509  *  Description: Function to set the swapper control on the card
3510  *  correctly depending on the 'endianness' of the system.
3511  *  Return value:
3512  *  SUCCESS on success and FAILURE on failure.
3513  */
3514 
3515 static int s2io_set_swapper(struct s2io_nic *sp)
3516 {
3517 	struct net_device *dev = sp->dev;
3518 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3519 	u64 val64, valt, valr;
3520 
3521 	/*
3522 	 * Set proper endian settings and verify the same by reading
3523 	 * the PIF Feed-back register.
3524 	 */
3525 
3526 	val64 = readq(&bar0->pif_rd_swapper_fb);
3527 	if (val64 != 0x0123456789ABCDEFULL) {
3528 		int i = 0;
3529 		static const u64 value[] = {
3530 			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3531 			0x8100008181000081ULL,	/* FE=1, SE=0 */
3532 			0x4200004242000042ULL,	/* FE=0, SE=1 */
3533 			0			/* FE=0, SE=0 */
3534 		};
3535 
3536 		while (i < 4) {
3537 			writeq(value[i], &bar0->swapper_ctrl);
3538 			val64 = readq(&bar0->pif_rd_swapper_fb);
3539 			if (val64 == 0x0123456789ABCDEFULL)
3540 				break;
3541 			i++;
3542 		}
3543 		if (i == 4) {
3544 			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3545 				  "feedback read %llx\n",
3546 				  dev->name, (unsigned long long)val64);
3547 			return FAILURE;
3548 		}
3549 		valr = value[i];
3550 	} else {
3551 		valr = readq(&bar0->swapper_ctrl);
3552 	}
3553 
3554 	valt = 0x0123456789ABCDEFULL;
3555 	writeq(valt, &bar0->xmsi_address);
3556 	val64 = readq(&bar0->xmsi_address);
3557 
3558 	if (val64 != valt) {
3559 		int i = 0;
3560 		static const u64 value[] = {
3561 			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3562 			0x0081810000818100ULL,	/* FE=1, SE=0 */
3563 			0x0042420000424200ULL,	/* FE=0, SE=1 */
3564 			0			/* FE=0, SE=0 */
3565 		};
3566 
3567 		while (i < 4) {
3568 			writeq((value[i] | valr), &bar0->swapper_ctrl);
3569 			writeq(valt, &bar0->xmsi_address);
3570 			val64 = readq(&bar0->xmsi_address);
3571 			if (val64 == valt)
3572 				break;
3573 			i++;
3574 		}
3575 		if (i == 4) {
3576 			unsigned long long x = val64;
3577 			DBG_PRINT(ERR_DBG,
3578 				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3579 			return FAILURE;
3580 		}
3581 	}
3582 	val64 = readq(&bar0->swapper_ctrl);
3583 	val64 &= 0xFFFF000000000000ULL;
3584 
3585 #ifdef __BIG_ENDIAN
3586 	/*
3587 	 * The device by default set to a big endian format, so a
3588 	 * big endian driver need not set anything.
3589 	 */
3590 	val64 |= (SWAPPER_CTRL_TXP_FE |
3591 		  SWAPPER_CTRL_TXP_SE |
3592 		  SWAPPER_CTRL_TXD_R_FE |
3593 		  SWAPPER_CTRL_TXD_W_FE |
3594 		  SWAPPER_CTRL_TXF_R_FE |
3595 		  SWAPPER_CTRL_RXD_R_FE |
3596 		  SWAPPER_CTRL_RXD_W_FE |
3597 		  SWAPPER_CTRL_RXF_W_FE |
3598 		  SWAPPER_CTRL_XMSI_FE |
3599 		  SWAPPER_CTRL_STATS_FE |
3600 		  SWAPPER_CTRL_STATS_SE);
3601 	if (sp->config.intr_type == INTA)
3602 		val64 |= SWAPPER_CTRL_XMSI_SE;
3603 	writeq(val64, &bar0->swapper_ctrl);
3604 #else
3605 	/*
3606 	 * Initially we enable all bits to make it accessible by the
3607 	 * driver, then we selectively enable only those bits that
3608 	 * we want to set.
3609 	 */
3610 	val64 |= (SWAPPER_CTRL_TXP_FE |
3611 		  SWAPPER_CTRL_TXP_SE |
3612 		  SWAPPER_CTRL_TXD_R_FE |
3613 		  SWAPPER_CTRL_TXD_R_SE |
3614 		  SWAPPER_CTRL_TXD_W_FE |
3615 		  SWAPPER_CTRL_TXD_W_SE |
3616 		  SWAPPER_CTRL_TXF_R_FE |
3617 		  SWAPPER_CTRL_RXD_R_FE |
3618 		  SWAPPER_CTRL_RXD_R_SE |
3619 		  SWAPPER_CTRL_RXD_W_FE |
3620 		  SWAPPER_CTRL_RXD_W_SE |
3621 		  SWAPPER_CTRL_RXF_W_FE |
3622 		  SWAPPER_CTRL_XMSI_FE |
3623 		  SWAPPER_CTRL_STATS_FE |
3624 		  SWAPPER_CTRL_STATS_SE);
3625 	if (sp->config.intr_type == INTA)
3626 		val64 |= SWAPPER_CTRL_XMSI_SE;
3627 	writeq(val64, &bar0->swapper_ctrl);
3628 #endif
3629 	val64 = readq(&bar0->swapper_ctrl);
3630 
3631 	/*
3632 	 * Verifying if endian settings are accurate by reading a
3633 	 * feedback register.
3634 	 */
3635 	val64 = readq(&bar0->pif_rd_swapper_fb);
3636 	if (val64 != 0x0123456789ABCDEFULL) {
3637 		/* Endian settings are incorrect, calls for another dekko. */
3638 		DBG_PRINT(ERR_DBG,
3639 			  "%s: Endian settings are wrong, feedback read %llx\n",
3640 			  dev->name, (unsigned long long)val64);
3641 		return FAILURE;
3642 	}
3643 
3644 	return SUCCESS;
3645 }
3646 
3647 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3648 {
3649 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3650 	u64 val64;
3651 	int ret = 0, cnt = 0;
3652 
3653 	do {
3654 		val64 = readq(&bar0->xmsi_access);
3655 		if (!(val64 & s2BIT(15)))
3656 			break;
3657 		mdelay(1);
3658 		cnt++;
3659 	} while (cnt < 5);
3660 	if (cnt == 5) {
3661 		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3662 		ret = 1;
3663 	}
3664 
3665 	return ret;
3666 }
3667 
3668 static void restore_xmsi_data(struct s2io_nic *nic)
3669 {
3670 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3671 	u64 val64;
3672 	int i, msix_index;
3673 
3674 	if (nic->device_type == XFRAME_I_DEVICE)
3675 		return;
3676 
3677 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3678 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3679 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3680 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3681 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3682 		writeq(val64, &bar0->xmsi_access);
3683 		if (wait_for_msix_trans(nic, msix_index))
3684 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3685 				  __func__, msix_index);
3686 	}
3687 }
3688 
3689 static void store_xmsi_data(struct s2io_nic *nic)
3690 {
3691 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3692 	u64 val64, addr, data;
3693 	int i, msix_index;
3694 
3695 	if (nic->device_type == XFRAME_I_DEVICE)
3696 		return;
3697 
3698 	/* Store and display */
3699 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3700 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3701 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3702 		writeq(val64, &bar0->xmsi_access);
3703 		if (wait_for_msix_trans(nic, msix_index)) {
3704 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3705 				  __func__, msix_index);
3706 			continue;
3707 		}
3708 		addr = readq(&bar0->xmsi_address);
3709 		data = readq(&bar0->xmsi_data);
3710 		if (addr && data) {
3711 			nic->msix_info[i].addr = addr;
3712 			nic->msix_info[i].data = data;
3713 		}
3714 	}
3715 }
3716 
3717 static int s2io_enable_msi_x(struct s2io_nic *nic)
3718 {
3719 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3720 	u64 rx_mat;
3721 	u16 msi_control; /* Temp variable */
3722 	int ret, i, j, msix_indx = 1;
3723 	int size;
3724 	struct stat_block *stats = nic->mac_control.stats_info;
3725 	struct swStat *swstats = &stats->sw_stat;
3726 
3727 	size = nic->num_entries * sizeof(struct msix_entry);
3728 	nic->entries = kzalloc(size, GFP_KERNEL);
3729 	if (!nic->entries) {
3730 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3731 			  __func__);
3732 		swstats->mem_alloc_fail_cnt++;
3733 		return -ENOMEM;
3734 	}
3735 	swstats->mem_allocated += size;
3736 
3737 	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3738 	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3739 	if (!nic->s2io_entries) {
3740 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3741 			  __func__);
3742 		swstats->mem_alloc_fail_cnt++;
3743 		kfree(nic->entries);
3744 		swstats->mem_freed
3745 			+= (nic->num_entries * sizeof(struct msix_entry));
3746 		return -ENOMEM;
3747 	}
3748 	swstats->mem_allocated += size;
3749 
3750 	nic->entries[0].entry = 0;
3751 	nic->s2io_entries[0].entry = 0;
3752 	nic->s2io_entries[0].in_use = MSIX_FLG;
3753 	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3754 	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3755 
3756 	for (i = 1; i < nic->num_entries; i++) {
3757 		nic->entries[i].entry = ((i - 1) * 8) + 1;
3758 		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3759 		nic->s2io_entries[i].arg = NULL;
3760 		nic->s2io_entries[i].in_use = 0;
3761 	}
3762 
3763 	rx_mat = readq(&bar0->rx_mat);
3764 	for (j = 0; j < nic->config.rx_ring_num; j++) {
3765 		rx_mat |= RX_MAT_SET(j, msix_indx);
3766 		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3767 		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3768 		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3769 		msix_indx += 8;
3770 	}
3771 	writeq(rx_mat, &bar0->rx_mat);
3772 	readq(&bar0->rx_mat);
3773 
3774 	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3775 				    nic->num_entries, nic->num_entries);
3776 	/* We fail init if error or we get less vectors than min required */
3777 	if (ret < 0) {
3778 		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3779 		kfree(nic->entries);
3780 		swstats->mem_freed += nic->num_entries *
3781 			sizeof(struct msix_entry);
3782 		kfree(nic->s2io_entries);
3783 		swstats->mem_freed += nic->num_entries *
3784 			sizeof(struct s2io_msix_entry);
3785 		nic->entries = NULL;
3786 		nic->s2io_entries = NULL;
3787 		return -ENOMEM;
3788 	}
3789 
3790 	/*
3791 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3792 	 * in the herc NIC. (Temp change, needs to be removed later)
3793 	 */
3794 	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3795 	msi_control |= 0x1; /* Enable MSI */
3796 	pci_write_config_word(nic->pdev, 0x42, msi_control);
3797 
3798 	return 0;
3799 }
3800 
3801 /* Handle software interrupt used during MSI(X) test */
3802 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3803 {
3804 	struct s2io_nic *sp = dev_id;
3805 
3806 	sp->msi_detected = 1;
3807 	wake_up(&sp->msi_wait);
3808 
3809 	return IRQ_HANDLED;
3810 }
3811 
3812 /* Test interrupt path by forcing a a software IRQ */
3813 static int s2io_test_msi(struct s2io_nic *sp)
3814 {
3815 	struct pci_dev *pdev = sp->pdev;
3816 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3817 	int err;
3818 	u64 val64, saved64;
3819 
3820 	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3821 			  sp->name, sp);
3822 	if (err) {
3823 		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3824 			  sp->dev->name, pci_name(pdev), pdev->irq);
3825 		return err;
3826 	}
3827 
3828 	init_waitqueue_head(&sp->msi_wait);
3829 	sp->msi_detected = 0;
3830 
3831 	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3832 	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3833 	val64 |= SCHED_INT_CTRL_TIMER_EN;
3834 	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3835 	writeq(val64, &bar0->scheduled_int_ctrl);
3836 
3837 	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3838 
3839 	if (!sp->msi_detected) {
3840 		/* MSI(X) test failed, go back to INTx mode */
3841 		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3842 			  "using MSI(X) during test\n",
3843 			  sp->dev->name, pci_name(pdev));
3844 
3845 		err = -EOPNOTSUPP;
3846 	}
3847 
3848 	free_irq(sp->entries[1].vector, sp);
3849 
3850 	writeq(saved64, &bar0->scheduled_int_ctrl);
3851 
3852 	return err;
3853 }
3854 
3855 static void remove_msix_isr(struct s2io_nic *sp)
3856 {
3857 	int i;
3858 	u16 msi_control;
3859 
3860 	for (i = 0; i < sp->num_entries; i++) {
3861 		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3862 			int vector = sp->entries[i].vector;
3863 			void *arg = sp->s2io_entries[i].arg;
3864 			free_irq(vector, arg);
3865 		}
3866 	}
3867 
3868 	kfree(sp->entries);
3869 	kfree(sp->s2io_entries);
3870 	sp->entries = NULL;
3871 	sp->s2io_entries = NULL;
3872 
3873 	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3874 	msi_control &= 0xFFFE; /* Disable MSI */
3875 	pci_write_config_word(sp->pdev, 0x42, msi_control);
3876 
3877 	pci_disable_msix(sp->pdev);
3878 }
3879 
3880 static void remove_inta_isr(struct s2io_nic *sp)
3881 {
3882 	free_irq(sp->pdev->irq, sp->dev);
3883 }
3884 
3885 /* ********************************************************* *
3886  * Functions defined below concern the OS part of the driver *
3887  * ********************************************************* */
3888 
3889 /**
3890  *  s2io_open - open entry point of the driver
3891  *  @dev : pointer to the device structure.
3892  *  Description:
3893  *  This function is the open entry point of the driver. It mainly calls a
3894  *  function to allocate Rx buffers and inserts them into the buffer
3895  *  descriptors and then enables the Rx part of the NIC.
3896  *  Return value:
3897  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3898  *   file on failure.
3899  */
3900 
3901 static int s2io_open(struct net_device *dev)
3902 {
3903 	struct s2io_nic *sp = netdev_priv(dev);
3904 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3905 	int err = 0;
3906 
3907 	/*
3908 	 * Make sure you have link off by default every time
3909 	 * Nic is initialized
3910 	 */
3911 	netif_carrier_off(dev);
3912 	sp->last_link_state = 0;
3913 
3914 	/* Initialize H/W and enable interrupts */
3915 	err = s2io_card_up(sp);
3916 	if (err) {
3917 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3918 			  dev->name);
3919 		goto hw_init_failed;
3920 	}
3921 
3922 	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3923 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3924 		s2io_card_down(sp);
3925 		err = -ENODEV;
3926 		goto hw_init_failed;
3927 	}
3928 	s2io_start_all_tx_queue(sp);
3929 	return 0;
3930 
3931 hw_init_failed:
3932 	if (sp->config.intr_type == MSI_X) {
3933 		if (sp->entries) {
3934 			kfree(sp->entries);
3935 			swstats->mem_freed += sp->num_entries *
3936 				sizeof(struct msix_entry);
3937 		}
3938 		if (sp->s2io_entries) {
3939 			kfree(sp->s2io_entries);
3940 			swstats->mem_freed += sp->num_entries *
3941 				sizeof(struct s2io_msix_entry);
3942 		}
3943 	}
3944 	return err;
3945 }
3946 
3947 /**
3948  *  s2io_close -close entry point of the driver
3949  *  @dev : device pointer.
3950  *  Description:
3951  *  This is the stop entry point of the driver. It needs to undo exactly
3952  *  whatever was done by the open entry point,thus it's usually referred to
3953  *  as the close function.Among other things this function mainly stops the
3954  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3955  *  Return value:
3956  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3957  *  file on failure.
3958  */
3959 
3960 static int s2io_close(struct net_device *dev)
3961 {
3962 	struct s2io_nic *sp = netdev_priv(dev);
3963 	struct config_param *config = &sp->config;
3964 	u64 tmp64;
3965 	int offset;
3966 
3967 	/* Return if the device is already closed               *
3968 	 *  Can happen when s2io_card_up failed in change_mtu    *
3969 	 */
3970 	if (!is_s2io_card_up(sp))
3971 		return 0;
3972 
3973 	s2io_stop_all_tx_queue(sp);
3974 	/* delete all populated mac entries */
3975 	for (offset = 1; offset < config->max_mc_addr; offset++) {
3976 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3977 		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3978 			do_s2io_delete_unicast_mc(sp, tmp64);
3979 	}
3980 
3981 	s2io_card_down(sp);
3982 
3983 	return 0;
3984 }
3985 
3986 /**
3987  *  s2io_xmit - Tx entry point of te driver
3988  *  @skb : the socket buffer containing the Tx data.
3989  *  @dev : device pointer.
3990  *  Description :
3991  *  This function is the Tx entry point of the driver. S2IO NIC supports
3992  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3993  *  NOTE: when device can't queue the pkt,just the trans_start variable will
3994  *  not be upadted.
3995  *  Return value:
3996  *  0 on success & 1 on failure.
3997  */
3998 
3999 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4000 {
4001 	struct s2io_nic *sp = netdev_priv(dev);
4002 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4003 	register u64 val64;
4004 	struct TxD *txdp;
4005 	struct TxFIFO_element __iomem *tx_fifo;
4006 	unsigned long flags = 0;
4007 	u16 vlan_tag = 0;
4008 	struct fifo_info *fifo = NULL;
4009 	int offload_type;
4010 	int enable_per_list_interrupt = 0;
4011 	struct config_param *config = &sp->config;
4012 	struct mac_info *mac_control = &sp->mac_control;
4013 	struct stat_block *stats = mac_control->stats_info;
4014 	struct swStat *swstats = &stats->sw_stat;
4015 
4016 	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4017 
4018 	if (unlikely(skb->len <= 0)) {
4019 		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4020 		dev_kfree_skb_any(skb);
4021 		return NETDEV_TX_OK;
4022 	}
4023 
4024 	if (!is_s2io_card_up(sp)) {
4025 		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4026 			  dev->name);
4027 		dev_kfree_skb_any(skb);
4028 		return NETDEV_TX_OK;
4029 	}
4030 
4031 	queue = 0;
4032 	if (skb_vlan_tag_present(skb))
4033 		vlan_tag = skb_vlan_tag_get(skb);
4034 	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4035 		if (skb->protocol == htons(ETH_P_IP)) {
4036 			struct iphdr *ip;
4037 			struct tcphdr *th;
4038 			ip = ip_hdr(skb);
4039 
4040 			if (!ip_is_fragment(ip)) {
4041 				th = (struct tcphdr *)(((unsigned char *)ip) +
4042 						       ip->ihl*4);
4043 
4044 				if (ip->protocol == IPPROTO_TCP) {
4045 					queue_len = sp->total_tcp_fifos;
4046 					queue = (ntohs(th->source) +
4047 						 ntohs(th->dest)) &
4048 						sp->fifo_selector[queue_len - 1];
4049 					if (queue >= queue_len)
4050 						queue = queue_len - 1;
4051 				} else if (ip->protocol == IPPROTO_UDP) {
4052 					queue_len = sp->total_udp_fifos;
4053 					queue = (ntohs(th->source) +
4054 						 ntohs(th->dest)) &
4055 						sp->fifo_selector[queue_len - 1];
4056 					if (queue >= queue_len)
4057 						queue = queue_len - 1;
4058 					queue += sp->udp_fifo_idx;
4059 					if (skb->len > 1024)
4060 						enable_per_list_interrupt = 1;
4061 				}
4062 			}
4063 		}
4064 	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4065 		/* get fifo number based on skb->priority value */
4066 		queue = config->fifo_mapping
4067 			[skb->priority & (MAX_TX_FIFOS - 1)];
4068 	fifo = &mac_control->fifos[queue];
4069 
4070 	spin_lock_irqsave(&fifo->tx_lock, flags);
4071 
4072 	if (sp->config.multiq) {
4073 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4074 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4075 			return NETDEV_TX_BUSY;
4076 		}
4077 	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4078 		if (netif_queue_stopped(dev)) {
4079 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4080 			return NETDEV_TX_BUSY;
4081 		}
4082 	}
4083 
4084 	put_off = (u16)fifo->tx_curr_put_info.offset;
4085 	get_off = (u16)fifo->tx_curr_get_info.offset;
4086 	txdp = fifo->list_info[put_off].list_virt_addr;
4087 
4088 	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4089 	/* Avoid "put" pointer going beyond "get" pointer */
4090 	if (txdp->Host_Control ||
4091 	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4092 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4093 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4094 		dev_kfree_skb_any(skb);
4095 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4096 		return NETDEV_TX_OK;
4097 	}
4098 
4099 	offload_type = s2io_offload_type(skb);
4100 	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4101 		txdp->Control_1 |= TXD_TCP_LSO_EN;
4102 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4103 	}
4104 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4105 		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4106 				    TXD_TX_CKO_TCP_EN |
4107 				    TXD_TX_CKO_UDP_EN);
4108 	}
4109 	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4110 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4111 	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4112 	if (enable_per_list_interrupt)
4113 		if (put_off & (queue_len >> 5))
4114 			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4115 	if (vlan_tag) {
4116 		txdp->Control_2 |= TXD_VLAN_ENABLE;
4117 		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4118 	}
4119 
4120 	frg_len = skb_headlen(skb);
4121 	txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4122 					      frg_len, PCI_DMA_TODEVICE);
4123 	if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4124 		goto pci_map_failed;
4125 
4126 	txdp->Host_Control = (unsigned long)skb;
4127 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4128 
4129 	frg_cnt = skb_shinfo(skb)->nr_frags;
4130 	/* For fragmented SKB. */
4131 	for (i = 0; i < frg_cnt; i++) {
4132 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4133 		/* A '0' length fragment will be ignored */
4134 		if (!skb_frag_size(frag))
4135 			continue;
4136 		txdp++;
4137 		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4138 							     frag, 0,
4139 							     skb_frag_size(frag),
4140 							     DMA_TO_DEVICE);
4141 		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4142 	}
4143 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4144 
4145 	tx_fifo = mac_control->tx_FIFO_start[queue];
4146 	val64 = fifo->list_info[put_off].list_phy_addr;
4147 	writeq(val64, &tx_fifo->TxDL_Pointer);
4148 
4149 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4150 		 TX_FIFO_LAST_LIST);
4151 	if (offload_type)
4152 		val64 |= TX_FIFO_SPECIAL_FUNC;
4153 
4154 	writeq(val64, &tx_fifo->List_Control);
4155 
4156 	put_off++;
4157 	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4158 		put_off = 0;
4159 	fifo->tx_curr_put_info.offset = put_off;
4160 
4161 	/* Avoid "put" pointer going beyond "get" pointer */
4162 	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4163 		swstats->fifo_full_cnt++;
4164 		DBG_PRINT(TX_DBG,
4165 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4166 			  put_off, get_off);
4167 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4168 	}
4169 	swstats->mem_allocated += skb->truesize;
4170 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4171 
4172 	if (sp->config.intr_type == MSI_X)
4173 		tx_intr_handler(fifo);
4174 
4175 	return NETDEV_TX_OK;
4176 
4177 pci_map_failed:
4178 	swstats->pci_map_fail_cnt++;
4179 	s2io_stop_tx_queue(sp, fifo->fifo_no);
4180 	swstats->mem_freed += skb->truesize;
4181 	dev_kfree_skb_any(skb);
4182 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4183 	return NETDEV_TX_OK;
4184 }
4185 
4186 static void
4187 s2io_alarm_handle(struct timer_list *t)
4188 {
4189 	struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4190 	struct net_device *dev = sp->dev;
4191 
4192 	s2io_handle_errors(dev);
4193 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4194 }
4195 
4196 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4197 {
4198 	struct ring_info *ring = (struct ring_info *)dev_id;
4199 	struct s2io_nic *sp = ring->nic;
4200 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4201 
4202 	if (unlikely(!is_s2io_card_up(sp)))
4203 		return IRQ_HANDLED;
4204 
4205 	if (sp->config.napi) {
4206 		u8 __iomem *addr = NULL;
4207 		u8 val8 = 0;
4208 
4209 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4210 		addr += (7 - ring->ring_no);
4211 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4212 		writeb(val8, addr);
4213 		val8 = readb(addr);
4214 		napi_schedule(&ring->napi);
4215 	} else {
4216 		rx_intr_handler(ring, 0);
4217 		s2io_chk_rx_buffers(sp, ring);
4218 	}
4219 
4220 	return IRQ_HANDLED;
4221 }
4222 
4223 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4224 {
4225 	int i;
4226 	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4227 	struct s2io_nic *sp = fifos->nic;
4228 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4229 	struct config_param *config  = &sp->config;
4230 	u64 reason;
4231 
4232 	if (unlikely(!is_s2io_card_up(sp)))
4233 		return IRQ_NONE;
4234 
4235 	reason = readq(&bar0->general_int_status);
4236 	if (unlikely(reason == S2IO_MINUS_ONE))
4237 		/* Nothing much can be done. Get out */
4238 		return IRQ_HANDLED;
4239 
4240 	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4241 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4242 
4243 		if (reason & GEN_INTR_TXPIC)
4244 			s2io_txpic_intr_handle(sp);
4245 
4246 		if (reason & GEN_INTR_TXTRAFFIC)
4247 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4248 
4249 		for (i = 0; i < config->tx_fifo_num; i++)
4250 			tx_intr_handler(&fifos[i]);
4251 
4252 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4253 		readl(&bar0->general_int_status);
4254 		return IRQ_HANDLED;
4255 	}
4256 	/* The interrupt was not raised by us */
4257 	return IRQ_NONE;
4258 }
4259 
4260 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4261 {
4262 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4263 	u64 val64;
4264 
4265 	val64 = readq(&bar0->pic_int_status);
4266 	if (val64 & PIC_INT_GPIO) {
4267 		val64 = readq(&bar0->gpio_int_reg);
4268 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4269 		    (val64 & GPIO_INT_REG_LINK_UP)) {
4270 			/*
4271 			 * This is unstable state so clear both up/down
4272 			 * interrupt and adapter to re-evaluate the link state.
4273 			 */
4274 			val64 |= GPIO_INT_REG_LINK_DOWN;
4275 			val64 |= GPIO_INT_REG_LINK_UP;
4276 			writeq(val64, &bar0->gpio_int_reg);
4277 			val64 = readq(&bar0->gpio_int_mask);
4278 			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4279 				   GPIO_INT_MASK_LINK_DOWN);
4280 			writeq(val64, &bar0->gpio_int_mask);
4281 		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4282 			val64 = readq(&bar0->adapter_status);
4283 			/* Enable Adapter */
4284 			val64 = readq(&bar0->adapter_control);
4285 			val64 |= ADAPTER_CNTL_EN;
4286 			writeq(val64, &bar0->adapter_control);
4287 			val64 |= ADAPTER_LED_ON;
4288 			writeq(val64, &bar0->adapter_control);
4289 			if (!sp->device_enabled_once)
4290 				sp->device_enabled_once = 1;
4291 
4292 			s2io_link(sp, LINK_UP);
4293 			/*
4294 			 * unmask link down interrupt and mask link-up
4295 			 * intr
4296 			 */
4297 			val64 = readq(&bar0->gpio_int_mask);
4298 			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4299 			val64 |= GPIO_INT_MASK_LINK_UP;
4300 			writeq(val64, &bar0->gpio_int_mask);
4301 
4302 		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4303 			val64 = readq(&bar0->adapter_status);
4304 			s2io_link(sp, LINK_DOWN);
4305 			/* Link is down so unmaks link up interrupt */
4306 			val64 = readq(&bar0->gpio_int_mask);
4307 			val64 &= ~GPIO_INT_MASK_LINK_UP;
4308 			val64 |= GPIO_INT_MASK_LINK_DOWN;
4309 			writeq(val64, &bar0->gpio_int_mask);
4310 
4311 			/* turn off LED */
4312 			val64 = readq(&bar0->adapter_control);
4313 			val64 = val64 & (~ADAPTER_LED_ON);
4314 			writeq(val64, &bar0->adapter_control);
4315 		}
4316 	}
4317 	val64 = readq(&bar0->gpio_int_mask);
4318 }
4319 
4320 /**
4321  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4322  *  @value: alarm bits
4323  *  @addr: address value
4324  *  @cnt: counter variable
4325  *  Description: Check for alarm and increment the counter
4326  *  Return Value:
4327  *  1 - if alarm bit set
4328  *  0 - if alarm bit is not set
4329  */
4330 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4331 				 unsigned long long *cnt)
4332 {
4333 	u64 val64;
4334 	val64 = readq(addr);
4335 	if (val64 & value) {
4336 		writeq(val64, addr);
4337 		(*cnt)++;
4338 		return 1;
4339 	}
4340 	return 0;
4341 
4342 }
4343 
4344 /**
4345  *  s2io_handle_errors - Xframe error indication handler
4346  *  @nic: device private variable
4347  *  Description: Handle alarms such as loss of link, single or
4348  *  double ECC errors, critical and serious errors.
4349  *  Return Value:
4350  *  NONE
4351  */
4352 static void s2io_handle_errors(void *dev_id)
4353 {
4354 	struct net_device *dev = (struct net_device *)dev_id;
4355 	struct s2io_nic *sp = netdev_priv(dev);
4356 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4357 	u64 temp64 = 0, val64 = 0;
4358 	int i = 0;
4359 
4360 	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4361 	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4362 
4363 	if (!is_s2io_card_up(sp))
4364 		return;
4365 
4366 	if (pci_channel_offline(sp->pdev))
4367 		return;
4368 
4369 	memset(&sw_stat->ring_full_cnt, 0,
4370 	       sizeof(sw_stat->ring_full_cnt));
4371 
4372 	/* Handling the XPAK counters update */
4373 	if (stats->xpak_timer_count < 72000) {
4374 		/* waiting for an hour */
4375 		stats->xpak_timer_count++;
4376 	} else {
4377 		s2io_updt_xpak_counter(dev);
4378 		/* reset the count to zero */
4379 		stats->xpak_timer_count = 0;
4380 	}
4381 
4382 	/* Handling link status change error Intr */
4383 	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4384 		val64 = readq(&bar0->mac_rmac_err_reg);
4385 		writeq(val64, &bar0->mac_rmac_err_reg);
4386 		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4387 			schedule_work(&sp->set_link_task);
4388 	}
4389 
4390 	/* In case of a serious error, the device will be Reset. */
4391 	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4392 				  &sw_stat->serious_err_cnt))
4393 		goto reset;
4394 
4395 	/* Check for data parity error */
4396 	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4397 				  &sw_stat->parity_err_cnt))
4398 		goto reset;
4399 
4400 	/* Check for ring full counter */
4401 	if (sp->device_type == XFRAME_II_DEVICE) {
4402 		val64 = readq(&bar0->ring_bump_counter1);
4403 		for (i = 0; i < 4; i++) {
4404 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4405 			temp64 >>= 64 - ((i+1)*16);
4406 			sw_stat->ring_full_cnt[i] += temp64;
4407 		}
4408 
4409 		val64 = readq(&bar0->ring_bump_counter2);
4410 		for (i = 0; i < 4; i++) {
4411 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4412 			temp64 >>= 64 - ((i+1)*16);
4413 			sw_stat->ring_full_cnt[i+4] += temp64;
4414 		}
4415 	}
4416 
4417 	val64 = readq(&bar0->txdma_int_status);
4418 	/*check for pfc_err*/
4419 	if (val64 & TXDMA_PFC_INT) {
4420 		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4421 					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4422 					  PFC_PCIX_ERR,
4423 					  &bar0->pfc_err_reg,
4424 					  &sw_stat->pfc_err_cnt))
4425 			goto reset;
4426 		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4427 				      &bar0->pfc_err_reg,
4428 				      &sw_stat->pfc_err_cnt);
4429 	}
4430 
4431 	/*check for tda_err*/
4432 	if (val64 & TXDMA_TDA_INT) {
4433 		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4434 					  TDA_SM0_ERR_ALARM |
4435 					  TDA_SM1_ERR_ALARM,
4436 					  &bar0->tda_err_reg,
4437 					  &sw_stat->tda_err_cnt))
4438 			goto reset;
4439 		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4440 				      &bar0->tda_err_reg,
4441 				      &sw_stat->tda_err_cnt);
4442 	}
4443 	/*check for pcc_err*/
4444 	if (val64 & TXDMA_PCC_INT) {
4445 		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4446 					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4447 					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4448 					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4449 					  PCC_TXB_ECC_DB_ERR,
4450 					  &bar0->pcc_err_reg,
4451 					  &sw_stat->pcc_err_cnt))
4452 			goto reset;
4453 		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4454 				      &bar0->pcc_err_reg,
4455 				      &sw_stat->pcc_err_cnt);
4456 	}
4457 
4458 	/*check for tti_err*/
4459 	if (val64 & TXDMA_TTI_INT) {
4460 		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4461 					  &bar0->tti_err_reg,
4462 					  &sw_stat->tti_err_cnt))
4463 			goto reset;
4464 		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4465 				      &bar0->tti_err_reg,
4466 				      &sw_stat->tti_err_cnt);
4467 	}
4468 
4469 	/*check for lso_err*/
4470 	if (val64 & TXDMA_LSO_INT) {
4471 		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4472 					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4473 					  &bar0->lso_err_reg,
4474 					  &sw_stat->lso_err_cnt))
4475 			goto reset;
4476 		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4477 				      &bar0->lso_err_reg,
4478 				      &sw_stat->lso_err_cnt);
4479 	}
4480 
4481 	/*check for tpa_err*/
4482 	if (val64 & TXDMA_TPA_INT) {
4483 		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4484 					  &bar0->tpa_err_reg,
4485 					  &sw_stat->tpa_err_cnt))
4486 			goto reset;
4487 		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4488 				      &bar0->tpa_err_reg,
4489 				      &sw_stat->tpa_err_cnt);
4490 	}
4491 
4492 	/*check for sm_err*/
4493 	if (val64 & TXDMA_SM_INT) {
4494 		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4495 					  &bar0->sm_err_reg,
4496 					  &sw_stat->sm_err_cnt))
4497 			goto reset;
4498 	}
4499 
4500 	val64 = readq(&bar0->mac_int_status);
4501 	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4502 		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4503 					  &bar0->mac_tmac_err_reg,
4504 					  &sw_stat->mac_tmac_err_cnt))
4505 			goto reset;
4506 		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4507 				      TMAC_DESC_ECC_SG_ERR |
4508 				      TMAC_DESC_ECC_DB_ERR,
4509 				      &bar0->mac_tmac_err_reg,
4510 				      &sw_stat->mac_tmac_err_cnt);
4511 	}
4512 
4513 	val64 = readq(&bar0->xgxs_int_status);
4514 	if (val64 & XGXS_INT_STATUS_TXGXS) {
4515 		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4516 					  &bar0->xgxs_txgxs_err_reg,
4517 					  &sw_stat->xgxs_txgxs_err_cnt))
4518 			goto reset;
4519 		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4520 				      &bar0->xgxs_txgxs_err_reg,
4521 				      &sw_stat->xgxs_txgxs_err_cnt);
4522 	}
4523 
4524 	val64 = readq(&bar0->rxdma_int_status);
4525 	if (val64 & RXDMA_INT_RC_INT_M) {
4526 		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4527 					  RC_FTC_ECC_DB_ERR |
4528 					  RC_PRCn_SM_ERR_ALARM |
4529 					  RC_FTC_SM_ERR_ALARM,
4530 					  &bar0->rc_err_reg,
4531 					  &sw_stat->rc_err_cnt))
4532 			goto reset;
4533 		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4534 				      RC_FTC_ECC_SG_ERR |
4535 				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4536 				      &sw_stat->rc_err_cnt);
4537 		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4538 					  PRC_PCI_AB_WR_Rn |
4539 					  PRC_PCI_AB_F_WR_Rn,
4540 					  &bar0->prc_pcix_err_reg,
4541 					  &sw_stat->prc_pcix_err_cnt))
4542 			goto reset;
4543 		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4544 				      PRC_PCI_DP_WR_Rn |
4545 				      PRC_PCI_DP_F_WR_Rn,
4546 				      &bar0->prc_pcix_err_reg,
4547 				      &sw_stat->prc_pcix_err_cnt);
4548 	}
4549 
4550 	if (val64 & RXDMA_INT_RPA_INT_M) {
4551 		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4552 					  &bar0->rpa_err_reg,
4553 					  &sw_stat->rpa_err_cnt))
4554 			goto reset;
4555 		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4556 				      &bar0->rpa_err_reg,
4557 				      &sw_stat->rpa_err_cnt);
4558 	}
4559 
4560 	if (val64 & RXDMA_INT_RDA_INT_M) {
4561 		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4562 					  RDA_FRM_ECC_DB_N_AERR |
4563 					  RDA_SM1_ERR_ALARM |
4564 					  RDA_SM0_ERR_ALARM |
4565 					  RDA_RXD_ECC_DB_SERR,
4566 					  &bar0->rda_err_reg,
4567 					  &sw_stat->rda_err_cnt))
4568 			goto reset;
4569 		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4570 				      RDA_FRM_ECC_SG_ERR |
4571 				      RDA_MISC_ERR |
4572 				      RDA_PCIX_ERR,
4573 				      &bar0->rda_err_reg,
4574 				      &sw_stat->rda_err_cnt);
4575 	}
4576 
4577 	if (val64 & RXDMA_INT_RTI_INT_M) {
4578 		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4579 					  &bar0->rti_err_reg,
4580 					  &sw_stat->rti_err_cnt))
4581 			goto reset;
4582 		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4583 				      &bar0->rti_err_reg,
4584 				      &sw_stat->rti_err_cnt);
4585 	}
4586 
4587 	val64 = readq(&bar0->mac_int_status);
4588 	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4589 		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4590 					  &bar0->mac_rmac_err_reg,
4591 					  &sw_stat->mac_rmac_err_cnt))
4592 			goto reset;
4593 		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4594 				      RMAC_SINGLE_ECC_ERR |
4595 				      RMAC_DOUBLE_ECC_ERR,
4596 				      &bar0->mac_rmac_err_reg,
4597 				      &sw_stat->mac_rmac_err_cnt);
4598 	}
4599 
4600 	val64 = readq(&bar0->xgxs_int_status);
4601 	if (val64 & XGXS_INT_STATUS_RXGXS) {
4602 		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4603 					  &bar0->xgxs_rxgxs_err_reg,
4604 					  &sw_stat->xgxs_rxgxs_err_cnt))
4605 			goto reset;
4606 	}
4607 
4608 	val64 = readq(&bar0->mc_int_status);
4609 	if (val64 & MC_INT_STATUS_MC_INT) {
4610 		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4611 					  &bar0->mc_err_reg,
4612 					  &sw_stat->mc_err_cnt))
4613 			goto reset;
4614 
4615 		/* Handling Ecc errors */
4616 		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4617 			writeq(val64, &bar0->mc_err_reg);
4618 			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4619 				sw_stat->double_ecc_errs++;
4620 				if (sp->device_type != XFRAME_II_DEVICE) {
4621 					/*
4622 					 * Reset XframeI only if critical error
4623 					 */
4624 					if (val64 &
4625 					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4626 					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4627 						goto reset;
4628 				}
4629 			} else
4630 				sw_stat->single_ecc_errs++;
4631 		}
4632 	}
4633 	return;
4634 
4635 reset:
4636 	s2io_stop_all_tx_queue(sp);
4637 	schedule_work(&sp->rst_timer_task);
4638 	sw_stat->soft_reset_cnt++;
4639 }
4640 
4641 /**
4642  *  s2io_isr - ISR handler of the device .
4643  *  @irq: the irq of the device.
4644  *  @dev_id: a void pointer to the dev structure of the NIC.
4645  *  Description:  This function is the ISR handler of the device. It
4646  *  identifies the reason for the interrupt and calls the relevant
4647  *  service routines. As a contongency measure, this ISR allocates the
4648  *  recv buffers, if their numbers are below the panic value which is
4649  *  presently set to 25% of the original number of rcv buffers allocated.
4650  *  Return value:
4651  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4652  *   IRQ_NONE: will be returned if interrupt is not from our device
4653  */
4654 static irqreturn_t s2io_isr(int irq, void *dev_id)
4655 {
4656 	struct net_device *dev = (struct net_device *)dev_id;
4657 	struct s2io_nic *sp = netdev_priv(dev);
4658 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4659 	int i;
4660 	u64 reason = 0;
4661 	struct mac_info *mac_control;
4662 	struct config_param *config;
4663 
4664 	/* Pretend we handled any irq's from a disconnected card */
4665 	if (pci_channel_offline(sp->pdev))
4666 		return IRQ_NONE;
4667 
4668 	if (!is_s2io_card_up(sp))
4669 		return IRQ_NONE;
4670 
4671 	config = &sp->config;
4672 	mac_control = &sp->mac_control;
4673 
4674 	/*
4675 	 * Identify the cause for interrupt and call the appropriate
4676 	 * interrupt handler. Causes for the interrupt could be;
4677 	 * 1. Rx of packet.
4678 	 * 2. Tx complete.
4679 	 * 3. Link down.
4680 	 */
4681 	reason = readq(&bar0->general_int_status);
4682 
4683 	if (unlikely(reason == S2IO_MINUS_ONE))
4684 		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4685 
4686 	if (reason &
4687 	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4688 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4689 
4690 		if (config->napi) {
4691 			if (reason & GEN_INTR_RXTRAFFIC) {
4692 				napi_schedule(&sp->napi);
4693 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4694 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4695 				readl(&bar0->rx_traffic_int);
4696 			}
4697 		} else {
4698 			/*
4699 			 * rx_traffic_int reg is an R1 register, writing all 1's
4700 			 * will ensure that the actual interrupt causing bit
4701 			 * get's cleared and hence a read can be avoided.
4702 			 */
4703 			if (reason & GEN_INTR_RXTRAFFIC)
4704 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4705 
4706 			for (i = 0; i < config->rx_ring_num; i++) {
4707 				struct ring_info *ring = &mac_control->rings[i];
4708 
4709 				rx_intr_handler(ring, 0);
4710 			}
4711 		}
4712 
4713 		/*
4714 		 * tx_traffic_int reg is an R1 register, writing all 1's
4715 		 * will ensure that the actual interrupt causing bit get's
4716 		 * cleared and hence a read can be avoided.
4717 		 */
4718 		if (reason & GEN_INTR_TXTRAFFIC)
4719 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4720 
4721 		for (i = 0; i < config->tx_fifo_num; i++)
4722 			tx_intr_handler(&mac_control->fifos[i]);
4723 
4724 		if (reason & GEN_INTR_TXPIC)
4725 			s2io_txpic_intr_handle(sp);
4726 
4727 		/*
4728 		 * Reallocate the buffers from the interrupt handler itself.
4729 		 */
4730 		if (!config->napi) {
4731 			for (i = 0; i < config->rx_ring_num; i++) {
4732 				struct ring_info *ring = &mac_control->rings[i];
4733 
4734 				s2io_chk_rx_buffers(sp, ring);
4735 			}
4736 		}
4737 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4738 		readl(&bar0->general_int_status);
4739 
4740 		return IRQ_HANDLED;
4741 
4742 	} else if (!reason) {
4743 		/* The interrupt was not raised by us */
4744 		return IRQ_NONE;
4745 	}
4746 
4747 	return IRQ_HANDLED;
4748 }
4749 
4750 /**
4751  * s2io_updt_stats -
4752  */
4753 static void s2io_updt_stats(struct s2io_nic *sp)
4754 {
4755 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4756 	u64 val64;
4757 	int cnt = 0;
4758 
4759 	if (is_s2io_card_up(sp)) {
4760 		/* Apprx 30us on a 133 MHz bus */
4761 		val64 = SET_UPDT_CLICKS(10) |
4762 			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4763 		writeq(val64, &bar0->stat_cfg);
4764 		do {
4765 			udelay(100);
4766 			val64 = readq(&bar0->stat_cfg);
4767 			if (!(val64 & s2BIT(0)))
4768 				break;
4769 			cnt++;
4770 			if (cnt == 5)
4771 				break; /* Updt failed */
4772 		} while (1);
4773 	}
4774 }
4775 
4776 /**
4777  *  s2io_get_stats - Updates the device statistics structure.
4778  *  @dev : pointer to the device structure.
4779  *  Description:
4780  *  This function updates the device statistics structure in the s2io_nic
4781  *  structure and returns a pointer to the same.
4782  *  Return value:
4783  *  pointer to the updated net_device_stats structure.
4784  */
4785 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4786 {
4787 	struct s2io_nic *sp = netdev_priv(dev);
4788 	struct mac_info *mac_control = &sp->mac_control;
4789 	struct stat_block *stats = mac_control->stats_info;
4790 	u64 delta;
4791 
4792 	/* Configure Stats for immediate updt */
4793 	s2io_updt_stats(sp);
4794 
4795 	/* A device reset will cause the on-adapter statistics to be zero'ed.
4796 	 * This can be done while running by changing the MTU.  To prevent the
4797 	 * system from having the stats zero'ed, the driver keeps a copy of the
4798 	 * last update to the system (which is also zero'ed on reset).  This
4799 	 * enables the driver to accurately know the delta between the last
4800 	 * update and the current update.
4801 	 */
4802 	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4803 		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4804 	sp->stats.rx_packets += delta;
4805 	dev->stats.rx_packets += delta;
4806 
4807 	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4808 		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4809 	sp->stats.tx_packets += delta;
4810 	dev->stats.tx_packets += delta;
4811 
4812 	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4813 		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4814 	sp->stats.rx_bytes += delta;
4815 	dev->stats.rx_bytes += delta;
4816 
4817 	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4818 		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4819 	sp->stats.tx_bytes += delta;
4820 	dev->stats.tx_bytes += delta;
4821 
4822 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4823 	sp->stats.rx_errors += delta;
4824 	dev->stats.rx_errors += delta;
4825 
4826 	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4827 		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4828 	sp->stats.tx_errors += delta;
4829 	dev->stats.tx_errors += delta;
4830 
4831 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4832 	sp->stats.rx_dropped += delta;
4833 	dev->stats.rx_dropped += delta;
4834 
4835 	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4836 	sp->stats.tx_dropped += delta;
4837 	dev->stats.tx_dropped += delta;
4838 
4839 	/* The adapter MAC interprets pause frames as multicast packets, but
4840 	 * does not pass them up.  This erroneously increases the multicast
4841 	 * packet count and needs to be deducted when the multicast frame count
4842 	 * is queried.
4843 	 */
4844 	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4845 		le32_to_cpu(stats->rmac_vld_mcst_frms);
4846 	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4847 	delta -= sp->stats.multicast;
4848 	sp->stats.multicast += delta;
4849 	dev->stats.multicast += delta;
4850 
4851 	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4852 		le32_to_cpu(stats->rmac_usized_frms)) +
4853 		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4854 	sp->stats.rx_length_errors += delta;
4855 	dev->stats.rx_length_errors += delta;
4856 
4857 	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4858 	sp->stats.rx_crc_errors += delta;
4859 	dev->stats.rx_crc_errors += delta;
4860 
4861 	return &dev->stats;
4862 }
4863 
4864 /**
4865  *  s2io_set_multicast - entry point for multicast address enable/disable.
4866  *  @dev : pointer to the device structure
4867  *  Description:
4868  *  This function is a driver entry point which gets called by the kernel
4869  *  whenever multicast addresses must be enabled/disabled. This also gets
4870  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4871  *  determine, if multicast address must be enabled or if promiscuous mode
4872  *  is to be disabled etc.
4873  *  Return value:
4874  *  void.
4875  */
4876 
4877 static void s2io_set_multicast(struct net_device *dev)
4878 {
4879 	int i, j, prev_cnt;
4880 	struct netdev_hw_addr *ha;
4881 	struct s2io_nic *sp = netdev_priv(dev);
4882 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4883 	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4884 		0xfeffffffffffULL;
4885 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4886 	void __iomem *add;
4887 	struct config_param *config = &sp->config;
4888 
4889 	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4890 		/*  Enable all Multicast addresses */
4891 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4892 		       &bar0->rmac_addr_data0_mem);
4893 		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4894 		       &bar0->rmac_addr_data1_mem);
4895 		val64 = RMAC_ADDR_CMD_MEM_WE |
4896 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4897 			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4898 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4899 		/* Wait till command completes */
4900 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4901 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4902 				      S2IO_BIT_RESET);
4903 
4904 		sp->m_cast_flg = 1;
4905 		sp->all_multi_pos = config->max_mc_addr - 1;
4906 	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4907 		/*  Disable all Multicast addresses */
4908 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4909 		       &bar0->rmac_addr_data0_mem);
4910 		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4911 		       &bar0->rmac_addr_data1_mem);
4912 		val64 = RMAC_ADDR_CMD_MEM_WE |
4913 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4914 			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4915 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4916 		/* Wait till command completes */
4917 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4918 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4919 				      S2IO_BIT_RESET);
4920 
4921 		sp->m_cast_flg = 0;
4922 		sp->all_multi_pos = 0;
4923 	}
4924 
4925 	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4926 		/*  Put the NIC into promiscuous mode */
4927 		add = &bar0->mac_cfg;
4928 		val64 = readq(&bar0->mac_cfg);
4929 		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4930 
4931 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4932 		writel((u32)val64, add);
4933 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4934 		writel((u32) (val64 >> 32), (add + 4));
4935 
4936 		if (vlan_tag_strip != 1) {
4937 			val64 = readq(&bar0->rx_pa_cfg);
4938 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4939 			writeq(val64, &bar0->rx_pa_cfg);
4940 			sp->vlan_strip_flag = 0;
4941 		}
4942 
4943 		val64 = readq(&bar0->mac_cfg);
4944 		sp->promisc_flg = 1;
4945 		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4946 			  dev->name);
4947 	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4948 		/*  Remove the NIC from promiscuous mode */
4949 		add = &bar0->mac_cfg;
4950 		val64 = readq(&bar0->mac_cfg);
4951 		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4952 
4953 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4954 		writel((u32)val64, add);
4955 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4956 		writel((u32) (val64 >> 32), (add + 4));
4957 
4958 		if (vlan_tag_strip != 0) {
4959 			val64 = readq(&bar0->rx_pa_cfg);
4960 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4961 			writeq(val64, &bar0->rx_pa_cfg);
4962 			sp->vlan_strip_flag = 1;
4963 		}
4964 
4965 		val64 = readq(&bar0->mac_cfg);
4966 		sp->promisc_flg = 0;
4967 		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4968 	}
4969 
4970 	/*  Update individual M_CAST address list */
4971 	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4972 		if (netdev_mc_count(dev) >
4973 		    (config->max_mc_addr - config->max_mac_addr)) {
4974 			DBG_PRINT(ERR_DBG,
4975 				  "%s: No more Rx filters can be added - "
4976 				  "please enable ALL_MULTI instead\n",
4977 				  dev->name);
4978 			return;
4979 		}
4980 
4981 		prev_cnt = sp->mc_addr_count;
4982 		sp->mc_addr_count = netdev_mc_count(dev);
4983 
4984 		/* Clear out the previous list of Mc in the H/W. */
4985 		for (i = 0; i < prev_cnt; i++) {
4986 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4987 			       &bar0->rmac_addr_data0_mem);
4988 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4989 			       &bar0->rmac_addr_data1_mem);
4990 			val64 = RMAC_ADDR_CMD_MEM_WE |
4991 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4992 				RMAC_ADDR_CMD_MEM_OFFSET
4993 				(config->mc_start_offset + i);
4994 			writeq(val64, &bar0->rmac_addr_cmd_mem);
4995 
4996 			/* Wait for command completes */
4997 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4998 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4999 						  S2IO_BIT_RESET)) {
5000 				DBG_PRINT(ERR_DBG,
5001 					  "%s: Adding Multicasts failed\n",
5002 					  dev->name);
5003 				return;
5004 			}
5005 		}
5006 
5007 		/* Create the new Rx filter list and update the same in H/W. */
5008 		i = 0;
5009 		netdev_for_each_mc_addr(ha, dev) {
5010 			mac_addr = 0;
5011 			for (j = 0; j < ETH_ALEN; j++) {
5012 				mac_addr |= ha->addr[j];
5013 				mac_addr <<= 8;
5014 			}
5015 			mac_addr >>= 8;
5016 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5017 			       &bar0->rmac_addr_data0_mem);
5018 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5019 			       &bar0->rmac_addr_data1_mem);
5020 			val64 = RMAC_ADDR_CMD_MEM_WE |
5021 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5022 				RMAC_ADDR_CMD_MEM_OFFSET
5023 				(i + config->mc_start_offset);
5024 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5025 
5026 			/* Wait for command completes */
5027 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5028 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5029 						  S2IO_BIT_RESET)) {
5030 				DBG_PRINT(ERR_DBG,
5031 					  "%s: Adding Multicasts failed\n",
5032 					  dev->name);
5033 				return;
5034 			}
5035 			i++;
5036 		}
5037 	}
5038 }
5039 
5040 /* read from CAM unicast & multicast addresses and store it in
5041  * def_mac_addr structure
5042  */
5043 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5044 {
5045 	int offset;
5046 	u64 mac_addr = 0x0;
5047 	struct config_param *config = &sp->config;
5048 
5049 	/* store unicast & multicast mac addresses */
5050 	for (offset = 0; offset < config->max_mc_addr; offset++) {
5051 		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5052 		/* if read fails disable the entry */
5053 		if (mac_addr == FAILURE)
5054 			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5055 		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5056 	}
5057 }
5058 
5059 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5060 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5061 {
5062 	int offset;
5063 	struct config_param *config = &sp->config;
5064 	/* restore unicast mac address */
5065 	for (offset = 0; offset < config->max_mac_addr; offset++)
5066 		do_s2io_prog_unicast(sp->dev,
5067 				     sp->def_mac_addr[offset].mac_addr);
5068 
5069 	/* restore multicast mac address */
5070 	for (offset = config->mc_start_offset;
5071 	     offset < config->max_mc_addr; offset++)
5072 		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5073 }
5074 
5075 /* add a multicast MAC address to CAM */
5076 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5077 {
5078 	int i;
5079 	u64 mac_addr = 0;
5080 	struct config_param *config = &sp->config;
5081 
5082 	for (i = 0; i < ETH_ALEN; i++) {
5083 		mac_addr <<= 8;
5084 		mac_addr |= addr[i];
5085 	}
5086 	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5087 		return SUCCESS;
5088 
5089 	/* check if the multicast mac already preset in CAM */
5090 	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5091 		u64 tmp64;
5092 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5093 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5094 			break;
5095 
5096 		if (tmp64 == mac_addr)
5097 			return SUCCESS;
5098 	}
5099 	if (i == config->max_mc_addr) {
5100 		DBG_PRINT(ERR_DBG,
5101 			  "CAM full no space left for multicast MAC\n");
5102 		return FAILURE;
5103 	}
5104 	/* Update the internal structure with this new mac address */
5105 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5106 
5107 	return do_s2io_add_mac(sp, mac_addr, i);
5108 }
5109 
5110 /* add MAC address to CAM */
5111 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5112 {
5113 	u64 val64;
5114 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5115 
5116 	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5117 	       &bar0->rmac_addr_data0_mem);
5118 
5119 	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5120 		RMAC_ADDR_CMD_MEM_OFFSET(off);
5121 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5122 
5123 	/* Wait till command completes */
5124 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5125 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5126 				  S2IO_BIT_RESET)) {
5127 		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5128 		return FAILURE;
5129 	}
5130 	return SUCCESS;
5131 }
5132 /* deletes a specified unicast/multicast mac entry from CAM */
5133 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5134 {
5135 	int offset;
5136 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5137 	struct config_param *config = &sp->config;
5138 
5139 	for (offset = 1;
5140 	     offset < config->max_mc_addr; offset++) {
5141 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5142 		if (tmp64 == addr) {
5143 			/* disable the entry by writing  0xffffffffffffULL */
5144 			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5145 				return FAILURE;
5146 			/* store the new mac list from CAM */
5147 			do_s2io_store_unicast_mc(sp);
5148 			return SUCCESS;
5149 		}
5150 	}
5151 	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5152 		  (unsigned long long)addr);
5153 	return FAILURE;
5154 }
5155 
5156 /* read mac entries from CAM */
5157 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5158 {
5159 	u64 tmp64 = 0xffffffffffff0000ULL, val64;
5160 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5161 
5162 	/* read mac addr */
5163 	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5164 		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5165 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5166 
5167 	/* Wait till command completes */
5168 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5169 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5170 				  S2IO_BIT_RESET)) {
5171 		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5172 		return FAILURE;
5173 	}
5174 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5175 
5176 	return tmp64 >> 16;
5177 }
5178 
5179 /**
5180  * s2io_set_mac_addr - driver entry point
5181  */
5182 
5183 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5184 {
5185 	struct sockaddr *addr = p;
5186 
5187 	if (!is_valid_ether_addr(addr->sa_data))
5188 		return -EADDRNOTAVAIL;
5189 
5190 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5191 
5192 	/* store the MAC address in CAM */
5193 	return do_s2io_prog_unicast(dev, dev->dev_addr);
5194 }
5195 /**
5196  *  do_s2io_prog_unicast - Programs the Xframe mac address
5197  *  @dev : pointer to the device structure.
5198  *  @addr: a uchar pointer to the new mac address which is to be set.
5199  *  Description : This procedure will program the Xframe to receive
5200  *  frames with new Mac Address
5201  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5202  *  as defined in errno.h file on failure.
5203  */
5204 
5205 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5206 {
5207 	struct s2io_nic *sp = netdev_priv(dev);
5208 	register u64 mac_addr = 0, perm_addr = 0;
5209 	int i;
5210 	u64 tmp64;
5211 	struct config_param *config = &sp->config;
5212 
5213 	/*
5214 	 * Set the new MAC address as the new unicast filter and reflect this
5215 	 * change on the device address registered with the OS. It will be
5216 	 * at offset 0.
5217 	 */
5218 	for (i = 0; i < ETH_ALEN; i++) {
5219 		mac_addr <<= 8;
5220 		mac_addr |= addr[i];
5221 		perm_addr <<= 8;
5222 		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5223 	}
5224 
5225 	/* check if the dev_addr is different than perm_addr */
5226 	if (mac_addr == perm_addr)
5227 		return SUCCESS;
5228 
5229 	/* check if the mac already preset in CAM */
5230 	for (i = 1; i < config->max_mac_addr; i++) {
5231 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5232 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5233 			break;
5234 
5235 		if (tmp64 == mac_addr) {
5236 			DBG_PRINT(INFO_DBG,
5237 				  "MAC addr:0x%llx already present in CAM\n",
5238 				  (unsigned long long)mac_addr);
5239 			return SUCCESS;
5240 		}
5241 	}
5242 	if (i == config->max_mac_addr) {
5243 		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5244 		return FAILURE;
5245 	}
5246 	/* Update the internal structure with this new mac address */
5247 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5248 
5249 	return do_s2io_add_mac(sp, mac_addr, i);
5250 }
5251 
5252 /**
5253  * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5254  * @sp : private member of the device structure, which is a pointer to the
5255  * s2io_nic structure.
5256  * @cmd: pointer to the structure with parameters given by ethtool to set
5257  * link information.
5258  * Description:
5259  * The function sets different link parameters provided by the user onto
5260  * the NIC.
5261  * Return value:
5262  * 0 on success.
5263  */
5264 
5265 static int
5266 s2io_ethtool_set_link_ksettings(struct net_device *dev,
5267 				const struct ethtool_link_ksettings *cmd)
5268 {
5269 	struct s2io_nic *sp = netdev_priv(dev);
5270 	if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5271 	    (cmd->base.speed != SPEED_10000) ||
5272 	    (cmd->base.duplex != DUPLEX_FULL))
5273 		return -EINVAL;
5274 	else {
5275 		s2io_close(sp->dev);
5276 		s2io_open(sp->dev);
5277 	}
5278 
5279 	return 0;
5280 }
5281 
5282 /**
5283  * s2io_ethtol_get_link_ksettings - Return link specific information.
5284  * @sp : private member of the device structure, pointer to the
5285  *      s2io_nic structure.
5286  * @cmd : pointer to the structure with parameters given by ethtool
5287  * to return link information.
5288  * Description:
5289  * Returns link specific information like speed, duplex etc.. to ethtool.
5290  * Return value :
5291  * return 0 on success.
5292  */
5293 
5294 static int
5295 s2io_ethtool_get_link_ksettings(struct net_device *dev,
5296 				struct ethtool_link_ksettings *cmd)
5297 {
5298 	struct s2io_nic *sp = netdev_priv(dev);
5299 
5300 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
5301 	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5302 	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5303 
5304 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5305 	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5306 	ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5307 
5308 	cmd->base.port = PORT_FIBRE;
5309 
5310 	if (netif_carrier_ok(sp->dev)) {
5311 		cmd->base.speed = SPEED_10000;
5312 		cmd->base.duplex = DUPLEX_FULL;
5313 	} else {
5314 		cmd->base.speed = SPEED_UNKNOWN;
5315 		cmd->base.duplex = DUPLEX_UNKNOWN;
5316 	}
5317 
5318 	cmd->base.autoneg = AUTONEG_DISABLE;
5319 	return 0;
5320 }
5321 
5322 /**
5323  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5324  * @sp : private member of the device structure, which is a pointer to the
5325  * s2io_nic structure.
5326  * @info : pointer to the structure with parameters given by ethtool to
5327  * return driver information.
5328  * Description:
5329  * Returns driver specefic information like name, version etc.. to ethtool.
5330  * Return value:
5331  *  void
5332  */
5333 
5334 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5335 				  struct ethtool_drvinfo *info)
5336 {
5337 	struct s2io_nic *sp = netdev_priv(dev);
5338 
5339 	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5340 	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5341 	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5342 }
5343 
5344 /**
5345  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5346  *  @sp: private member of the device structure, which is a pointer to the
5347  *  s2io_nic structure.
5348  *  @regs : pointer to the structure with parameters given by ethtool for
5349  *  dumping the registers.
5350  *  @reg_space: The input argument into which all the registers are dumped.
5351  *  Description:
5352  *  Dumps the entire register space of xFrame NIC into the user given
5353  *  buffer area.
5354  * Return value :
5355  * void .
5356  */
5357 
5358 static void s2io_ethtool_gregs(struct net_device *dev,
5359 			       struct ethtool_regs *regs, void *space)
5360 {
5361 	int i;
5362 	u64 reg;
5363 	u8 *reg_space = (u8 *)space;
5364 	struct s2io_nic *sp = netdev_priv(dev);
5365 
5366 	regs->len = XENA_REG_SPACE;
5367 	regs->version = sp->pdev->subsystem_device;
5368 
5369 	for (i = 0; i < regs->len; i += 8) {
5370 		reg = readq(sp->bar0 + i);
5371 		memcpy((reg_space + i), &reg, 8);
5372 	}
5373 }
5374 
5375 /*
5376  *  s2io_set_led - control NIC led
5377  */
5378 static void s2io_set_led(struct s2io_nic *sp, bool on)
5379 {
5380 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5381 	u16 subid = sp->pdev->subsystem_device;
5382 	u64 val64;
5383 
5384 	if ((sp->device_type == XFRAME_II_DEVICE) ||
5385 	    ((subid & 0xFF) >= 0x07)) {
5386 		val64 = readq(&bar0->gpio_control);
5387 		if (on)
5388 			val64 |= GPIO_CTRL_GPIO_0;
5389 		else
5390 			val64 &= ~GPIO_CTRL_GPIO_0;
5391 
5392 		writeq(val64, &bar0->gpio_control);
5393 	} else {
5394 		val64 = readq(&bar0->adapter_control);
5395 		if (on)
5396 			val64 |= ADAPTER_LED_ON;
5397 		else
5398 			val64 &= ~ADAPTER_LED_ON;
5399 
5400 		writeq(val64, &bar0->adapter_control);
5401 	}
5402 
5403 }
5404 
5405 /**
5406  * s2io_ethtool_set_led - To physically identify the nic on the system.
5407  * @dev : network device
5408  * @state: led setting
5409  *
5410  * Description: Used to physically identify the NIC on the system.
5411  * The Link LED will blink for a time specified by the user for
5412  * identification.
5413  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5414  * identification is possible only if it's link is up.
5415  */
5416 
5417 static int s2io_ethtool_set_led(struct net_device *dev,
5418 				enum ethtool_phys_id_state state)
5419 {
5420 	struct s2io_nic *sp = netdev_priv(dev);
5421 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5422 	u16 subid = sp->pdev->subsystem_device;
5423 
5424 	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5425 		u64 val64 = readq(&bar0->adapter_control);
5426 		if (!(val64 & ADAPTER_CNTL_EN)) {
5427 			pr_err("Adapter Link down, cannot blink LED\n");
5428 			return -EAGAIN;
5429 		}
5430 	}
5431 
5432 	switch (state) {
5433 	case ETHTOOL_ID_ACTIVE:
5434 		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5435 		return 1;	/* cycle on/off once per second */
5436 
5437 	case ETHTOOL_ID_ON:
5438 		s2io_set_led(sp, true);
5439 		break;
5440 
5441 	case ETHTOOL_ID_OFF:
5442 		s2io_set_led(sp, false);
5443 		break;
5444 
5445 	case ETHTOOL_ID_INACTIVE:
5446 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5447 			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5448 	}
5449 
5450 	return 0;
5451 }
5452 
5453 static void s2io_ethtool_gringparam(struct net_device *dev,
5454 				    struct ethtool_ringparam *ering)
5455 {
5456 	struct s2io_nic *sp = netdev_priv(dev);
5457 	int i, tx_desc_count = 0, rx_desc_count = 0;
5458 
5459 	if (sp->rxd_mode == RXD_MODE_1) {
5460 		ering->rx_max_pending = MAX_RX_DESC_1;
5461 		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5462 	} else {
5463 		ering->rx_max_pending = MAX_RX_DESC_2;
5464 		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5465 	}
5466 
5467 	ering->tx_max_pending = MAX_TX_DESC;
5468 
5469 	for (i = 0; i < sp->config.rx_ring_num; i++)
5470 		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5471 	ering->rx_pending = rx_desc_count;
5472 	ering->rx_jumbo_pending = rx_desc_count;
5473 
5474 	for (i = 0; i < sp->config.tx_fifo_num; i++)
5475 		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5476 	ering->tx_pending = tx_desc_count;
5477 	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5478 }
5479 
5480 /**
5481  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5482  * @sp : private member of the device structure, which is a pointer to the
5483  *	s2io_nic structure.
5484  * @ep : pointer to the structure with pause parameters given by ethtool.
5485  * Description:
5486  * Returns the Pause frame generation and reception capability of the NIC.
5487  * Return value:
5488  *  void
5489  */
5490 static void s2io_ethtool_getpause_data(struct net_device *dev,
5491 				       struct ethtool_pauseparam *ep)
5492 {
5493 	u64 val64;
5494 	struct s2io_nic *sp = netdev_priv(dev);
5495 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5496 
5497 	val64 = readq(&bar0->rmac_pause_cfg);
5498 	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5499 		ep->tx_pause = true;
5500 	if (val64 & RMAC_PAUSE_RX_ENABLE)
5501 		ep->rx_pause = true;
5502 	ep->autoneg = false;
5503 }
5504 
5505 /**
5506  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5507  * @sp : private member of the device structure, which is a pointer to the
5508  *      s2io_nic structure.
5509  * @ep : pointer to the structure with pause parameters given by ethtool.
5510  * Description:
5511  * It can be used to set or reset Pause frame generation or reception
5512  * support of the NIC.
5513  * Return value:
5514  * int, returns 0 on Success
5515  */
5516 
5517 static int s2io_ethtool_setpause_data(struct net_device *dev,
5518 				      struct ethtool_pauseparam *ep)
5519 {
5520 	u64 val64;
5521 	struct s2io_nic *sp = netdev_priv(dev);
5522 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5523 
5524 	val64 = readq(&bar0->rmac_pause_cfg);
5525 	if (ep->tx_pause)
5526 		val64 |= RMAC_PAUSE_GEN_ENABLE;
5527 	else
5528 		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5529 	if (ep->rx_pause)
5530 		val64 |= RMAC_PAUSE_RX_ENABLE;
5531 	else
5532 		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5533 	writeq(val64, &bar0->rmac_pause_cfg);
5534 	return 0;
5535 }
5536 
5537 /**
5538  * read_eeprom - reads 4 bytes of data from user given offset.
5539  * @sp : private member of the device structure, which is a pointer to the
5540  *      s2io_nic structure.
5541  * @off : offset at which the data must be written
5542  * @data : Its an output parameter where the data read at the given
5543  *	offset is stored.
5544  * Description:
5545  * Will read 4 bytes of data from the user given offset and return the
5546  * read data.
5547  * NOTE: Will allow to read only part of the EEPROM visible through the
5548  *   I2C bus.
5549  * Return value:
5550  *  -1 on failure and 0 on success.
5551  */
5552 
5553 #define S2IO_DEV_ID		5
5554 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5555 {
5556 	int ret = -1;
5557 	u32 exit_cnt = 0;
5558 	u64 val64;
5559 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5560 
5561 	if (sp->device_type == XFRAME_I_DEVICE) {
5562 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5563 			I2C_CONTROL_ADDR(off) |
5564 			I2C_CONTROL_BYTE_CNT(0x3) |
5565 			I2C_CONTROL_READ |
5566 			I2C_CONTROL_CNTL_START;
5567 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5568 
5569 		while (exit_cnt < 5) {
5570 			val64 = readq(&bar0->i2c_control);
5571 			if (I2C_CONTROL_CNTL_END(val64)) {
5572 				*data = I2C_CONTROL_GET_DATA(val64);
5573 				ret = 0;
5574 				break;
5575 			}
5576 			msleep(50);
5577 			exit_cnt++;
5578 		}
5579 	}
5580 
5581 	if (sp->device_type == XFRAME_II_DEVICE) {
5582 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5583 			SPI_CONTROL_BYTECNT(0x3) |
5584 			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5585 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5586 		val64 |= SPI_CONTROL_REQ;
5587 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5588 		while (exit_cnt < 5) {
5589 			val64 = readq(&bar0->spi_control);
5590 			if (val64 & SPI_CONTROL_NACK) {
5591 				ret = 1;
5592 				break;
5593 			} else if (val64 & SPI_CONTROL_DONE) {
5594 				*data = readq(&bar0->spi_data);
5595 				*data &= 0xffffff;
5596 				ret = 0;
5597 				break;
5598 			}
5599 			msleep(50);
5600 			exit_cnt++;
5601 		}
5602 	}
5603 	return ret;
5604 }
5605 
5606 /**
5607  *  write_eeprom - actually writes the relevant part of the data value.
5608  *  @sp : private member of the device structure, which is a pointer to the
5609  *       s2io_nic structure.
5610  *  @off : offset at which the data must be written
5611  *  @data : The data that is to be written
5612  *  @cnt : Number of bytes of the data that are actually to be written into
5613  *  the Eeprom. (max of 3)
5614  * Description:
5615  *  Actually writes the relevant part of the data value into the Eeprom
5616  *  through the I2C bus.
5617  * Return value:
5618  *  0 on success, -1 on failure.
5619  */
5620 
5621 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5622 {
5623 	int exit_cnt = 0, ret = -1;
5624 	u64 val64;
5625 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5626 
5627 	if (sp->device_type == XFRAME_I_DEVICE) {
5628 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5629 			I2C_CONTROL_ADDR(off) |
5630 			I2C_CONTROL_BYTE_CNT(cnt) |
5631 			I2C_CONTROL_SET_DATA((u32)data) |
5632 			I2C_CONTROL_CNTL_START;
5633 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5634 
5635 		while (exit_cnt < 5) {
5636 			val64 = readq(&bar0->i2c_control);
5637 			if (I2C_CONTROL_CNTL_END(val64)) {
5638 				if (!(val64 & I2C_CONTROL_NACK))
5639 					ret = 0;
5640 				break;
5641 			}
5642 			msleep(50);
5643 			exit_cnt++;
5644 		}
5645 	}
5646 
5647 	if (sp->device_type == XFRAME_II_DEVICE) {
5648 		int write_cnt = (cnt == 8) ? 0 : cnt;
5649 		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5650 
5651 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5652 			SPI_CONTROL_BYTECNT(write_cnt) |
5653 			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5654 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5655 		val64 |= SPI_CONTROL_REQ;
5656 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5657 		while (exit_cnt < 5) {
5658 			val64 = readq(&bar0->spi_control);
5659 			if (val64 & SPI_CONTROL_NACK) {
5660 				ret = 1;
5661 				break;
5662 			} else if (val64 & SPI_CONTROL_DONE) {
5663 				ret = 0;
5664 				break;
5665 			}
5666 			msleep(50);
5667 			exit_cnt++;
5668 		}
5669 	}
5670 	return ret;
5671 }
5672 static void s2io_vpd_read(struct s2io_nic *nic)
5673 {
5674 	u8 *vpd_data;
5675 	u8 data;
5676 	int i = 0, cnt, len, fail = 0;
5677 	int vpd_addr = 0x80;
5678 	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5679 
5680 	if (nic->device_type == XFRAME_II_DEVICE) {
5681 		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5682 		vpd_addr = 0x80;
5683 	} else {
5684 		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5685 		vpd_addr = 0x50;
5686 	}
5687 	strcpy(nic->serial_num, "NOT AVAILABLE");
5688 
5689 	vpd_data = kmalloc(256, GFP_KERNEL);
5690 	if (!vpd_data) {
5691 		swstats->mem_alloc_fail_cnt++;
5692 		return;
5693 	}
5694 	swstats->mem_allocated += 256;
5695 
5696 	for (i = 0; i < 256; i += 4) {
5697 		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5698 		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5699 		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5700 		for (cnt = 0; cnt < 5; cnt++) {
5701 			msleep(2);
5702 			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5703 			if (data == 0x80)
5704 				break;
5705 		}
5706 		if (cnt >= 5) {
5707 			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5708 			fail = 1;
5709 			break;
5710 		}
5711 		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5712 				      (u32 *)&vpd_data[i]);
5713 	}
5714 
5715 	if (!fail) {
5716 		/* read serial number of adapter */
5717 		for (cnt = 0; cnt < 252; cnt++) {
5718 			if ((vpd_data[cnt] == 'S') &&
5719 			    (vpd_data[cnt+1] == 'N')) {
5720 				len = vpd_data[cnt+2];
5721 				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5722 					memcpy(nic->serial_num,
5723 					       &vpd_data[cnt + 3],
5724 					       len);
5725 					memset(nic->serial_num+len,
5726 					       0,
5727 					       VPD_STRING_LEN-len);
5728 					break;
5729 				}
5730 			}
5731 		}
5732 	}
5733 
5734 	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5735 		len = vpd_data[1];
5736 		memcpy(nic->product_name, &vpd_data[3], len);
5737 		nic->product_name[len] = 0;
5738 	}
5739 	kfree(vpd_data);
5740 	swstats->mem_freed += 256;
5741 }
5742 
5743 /**
5744  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5745  *  @sp : private member of the device structure, which is a pointer to the
5746  *  s2io_nic structure.
5747  *  @eeprom : pointer to the user level structure provided by ethtool,
5748  *  containing all relevant information.
5749  *  @data_buf : user defined value to be written into Eeprom.
5750  *  Description: Reads the values stored in the Eeprom at given offset
5751  *  for a given length. Stores these values int the input argument data
5752  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5753  *  Return value:
5754  *  int  0 on success
5755  */
5756 
5757 static int s2io_ethtool_geeprom(struct net_device *dev,
5758 				struct ethtool_eeprom *eeprom, u8 * data_buf)
5759 {
5760 	u32 i, valid;
5761 	u64 data;
5762 	struct s2io_nic *sp = netdev_priv(dev);
5763 
5764 	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5765 
5766 	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5767 		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5768 
5769 	for (i = 0; i < eeprom->len; i += 4) {
5770 		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5771 			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5772 			return -EFAULT;
5773 		}
5774 		valid = INV(data);
5775 		memcpy((data_buf + i), &valid, 4);
5776 	}
5777 	return 0;
5778 }
5779 
5780 /**
5781  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5782  *  @sp : private member of the device structure, which is a pointer to the
5783  *  s2io_nic structure.
5784  *  @eeprom : pointer to the user level structure provided by ethtool,
5785  *  containing all relevant information.
5786  *  @data_buf ; user defined value to be written into Eeprom.
5787  *  Description:
5788  *  Tries to write the user provided value in the Eeprom, at the offset
5789  *  given by the user.
5790  *  Return value:
5791  *  0 on success, -EFAULT on failure.
5792  */
5793 
5794 static int s2io_ethtool_seeprom(struct net_device *dev,
5795 				struct ethtool_eeprom *eeprom,
5796 				u8 *data_buf)
5797 {
5798 	int len = eeprom->len, cnt = 0;
5799 	u64 valid = 0, data;
5800 	struct s2io_nic *sp = netdev_priv(dev);
5801 
5802 	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5803 		DBG_PRINT(ERR_DBG,
5804 			  "ETHTOOL_WRITE_EEPROM Err: "
5805 			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5806 			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5807 			  eeprom->magic);
5808 		return -EFAULT;
5809 	}
5810 
5811 	while (len) {
5812 		data = (u32)data_buf[cnt] & 0x000000FF;
5813 		if (data)
5814 			valid = (u32)(data << 24);
5815 		else
5816 			valid = data;
5817 
5818 		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5819 			DBG_PRINT(ERR_DBG,
5820 				  "ETHTOOL_WRITE_EEPROM Err: "
5821 				  "Cannot write into the specified offset\n");
5822 			return -EFAULT;
5823 		}
5824 		cnt++;
5825 		len--;
5826 	}
5827 
5828 	return 0;
5829 }
5830 
5831 /**
5832  * s2io_register_test - reads and writes into all clock domains.
5833  * @sp : private member of the device structure, which is a pointer to the
5834  * s2io_nic structure.
5835  * @data : variable that returns the result of each of the test conducted b
5836  * by the driver.
5837  * Description:
5838  * Read and write into all clock domains. The NIC has 3 clock domains,
5839  * see that registers in all the three regions are accessible.
5840  * Return value:
5841  * 0 on success.
5842  */
5843 
5844 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5845 {
5846 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5847 	u64 val64 = 0, exp_val;
5848 	int fail = 0;
5849 
5850 	val64 = readq(&bar0->pif_rd_swapper_fb);
5851 	if (val64 != 0x123456789abcdefULL) {
5852 		fail = 1;
5853 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5854 	}
5855 
5856 	val64 = readq(&bar0->rmac_pause_cfg);
5857 	if (val64 != 0xc000ffff00000000ULL) {
5858 		fail = 1;
5859 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5860 	}
5861 
5862 	val64 = readq(&bar0->rx_queue_cfg);
5863 	if (sp->device_type == XFRAME_II_DEVICE)
5864 		exp_val = 0x0404040404040404ULL;
5865 	else
5866 		exp_val = 0x0808080808080808ULL;
5867 	if (val64 != exp_val) {
5868 		fail = 1;
5869 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5870 	}
5871 
5872 	val64 = readq(&bar0->xgxs_efifo_cfg);
5873 	if (val64 != 0x000000001923141EULL) {
5874 		fail = 1;
5875 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5876 	}
5877 
5878 	val64 = 0x5A5A5A5A5A5A5A5AULL;
5879 	writeq(val64, &bar0->xmsi_data);
5880 	val64 = readq(&bar0->xmsi_data);
5881 	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5882 		fail = 1;
5883 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5884 	}
5885 
5886 	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5887 	writeq(val64, &bar0->xmsi_data);
5888 	val64 = readq(&bar0->xmsi_data);
5889 	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5890 		fail = 1;
5891 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5892 	}
5893 
5894 	*data = fail;
5895 	return fail;
5896 }
5897 
5898 /**
5899  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5900  * @sp : private member of the device structure, which is a pointer to the
5901  * s2io_nic structure.
5902  * @data:variable that returns the result of each of the test conducted by
5903  * the driver.
5904  * Description:
5905  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5906  * register.
5907  * Return value:
5908  * 0 on success.
5909  */
5910 
5911 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5912 {
5913 	int fail = 0;
5914 	u64 ret_data, org_4F0, org_7F0;
5915 	u8 saved_4F0 = 0, saved_7F0 = 0;
5916 	struct net_device *dev = sp->dev;
5917 
5918 	/* Test Write Error at offset 0 */
5919 	/* Note that SPI interface allows write access to all areas
5920 	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5921 	 */
5922 	if (sp->device_type == XFRAME_I_DEVICE)
5923 		if (!write_eeprom(sp, 0, 0, 3))
5924 			fail = 1;
5925 
5926 	/* Save current values at offsets 0x4F0 and 0x7F0 */
5927 	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5928 		saved_4F0 = 1;
5929 	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5930 		saved_7F0 = 1;
5931 
5932 	/* Test Write at offset 4f0 */
5933 	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5934 		fail = 1;
5935 	if (read_eeprom(sp, 0x4F0, &ret_data))
5936 		fail = 1;
5937 
5938 	if (ret_data != 0x012345) {
5939 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5940 			  "Data written %llx Data read %llx\n",
5941 			  dev->name, (unsigned long long)0x12345,
5942 			  (unsigned long long)ret_data);
5943 		fail = 1;
5944 	}
5945 
5946 	/* Reset the EEPROM data go FFFF */
5947 	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5948 
5949 	/* Test Write Request Error at offset 0x7c */
5950 	if (sp->device_type == XFRAME_I_DEVICE)
5951 		if (!write_eeprom(sp, 0x07C, 0, 3))
5952 			fail = 1;
5953 
5954 	/* Test Write Request at offset 0x7f0 */
5955 	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5956 		fail = 1;
5957 	if (read_eeprom(sp, 0x7F0, &ret_data))
5958 		fail = 1;
5959 
5960 	if (ret_data != 0x012345) {
5961 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5962 			  "Data written %llx Data read %llx\n",
5963 			  dev->name, (unsigned long long)0x12345,
5964 			  (unsigned long long)ret_data);
5965 		fail = 1;
5966 	}
5967 
5968 	/* Reset the EEPROM data go FFFF */
5969 	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5970 
5971 	if (sp->device_type == XFRAME_I_DEVICE) {
5972 		/* Test Write Error at offset 0x80 */
5973 		if (!write_eeprom(sp, 0x080, 0, 3))
5974 			fail = 1;
5975 
5976 		/* Test Write Error at offset 0xfc */
5977 		if (!write_eeprom(sp, 0x0FC, 0, 3))
5978 			fail = 1;
5979 
5980 		/* Test Write Error at offset 0x100 */
5981 		if (!write_eeprom(sp, 0x100, 0, 3))
5982 			fail = 1;
5983 
5984 		/* Test Write Error at offset 4ec */
5985 		if (!write_eeprom(sp, 0x4EC, 0, 3))
5986 			fail = 1;
5987 	}
5988 
5989 	/* Restore values at offsets 0x4F0 and 0x7F0 */
5990 	if (saved_4F0)
5991 		write_eeprom(sp, 0x4F0, org_4F0, 3);
5992 	if (saved_7F0)
5993 		write_eeprom(sp, 0x7F0, org_7F0, 3);
5994 
5995 	*data = fail;
5996 	return fail;
5997 }
5998 
5999 /**
6000  * s2io_bist_test - invokes the MemBist test of the card .
6001  * @sp : private member of the device structure, which is a pointer to the
6002  * s2io_nic structure.
6003  * @data:variable that returns the result of each of the test conducted by
6004  * the driver.
6005  * Description:
6006  * This invokes the MemBist test of the card. We give around
6007  * 2 secs time for the Test to complete. If it's still not complete
6008  * within this peiod, we consider that the test failed.
6009  * Return value:
6010  * 0 on success and -1 on failure.
6011  */
6012 
6013 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6014 {
6015 	u8 bist = 0;
6016 	int cnt = 0, ret = -1;
6017 
6018 	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6019 	bist |= PCI_BIST_START;
6020 	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6021 
6022 	while (cnt < 20) {
6023 		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6024 		if (!(bist & PCI_BIST_START)) {
6025 			*data = (bist & PCI_BIST_CODE_MASK);
6026 			ret = 0;
6027 			break;
6028 		}
6029 		msleep(100);
6030 		cnt++;
6031 	}
6032 
6033 	return ret;
6034 }
6035 
6036 /**
6037  * s2io_link_test - verifies the link state of the nic
6038  * @sp ; private member of the device structure, which is a pointer to the
6039  * s2io_nic structure.
6040  * @data: variable that returns the result of each of the test conducted by
6041  * the driver.
6042  * Description:
6043  * The function verifies the link state of the NIC and updates the input
6044  * argument 'data' appropriately.
6045  * Return value:
6046  * 0 on success.
6047  */
6048 
6049 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6050 {
6051 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6052 	u64 val64;
6053 
6054 	val64 = readq(&bar0->adapter_status);
6055 	if (!(LINK_IS_UP(val64)))
6056 		*data = 1;
6057 	else
6058 		*data = 0;
6059 
6060 	return *data;
6061 }
6062 
6063 /**
6064  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6065  * @sp: private member of the device structure, which is a pointer to the
6066  * s2io_nic structure.
6067  * @data: variable that returns the result of each of the test
6068  * conducted by the driver.
6069  * Description:
6070  *  This is one of the offline test that tests the read and write
6071  *  access to the RldRam chip on the NIC.
6072  * Return value:
6073  *  0 on success.
6074  */
6075 
6076 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6077 {
6078 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6079 	u64 val64;
6080 	int cnt, iteration = 0, test_fail = 0;
6081 
6082 	val64 = readq(&bar0->adapter_control);
6083 	val64 &= ~ADAPTER_ECC_EN;
6084 	writeq(val64, &bar0->adapter_control);
6085 
6086 	val64 = readq(&bar0->mc_rldram_test_ctrl);
6087 	val64 |= MC_RLDRAM_TEST_MODE;
6088 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6089 
6090 	val64 = readq(&bar0->mc_rldram_mrs);
6091 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6092 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6093 
6094 	val64 |= MC_RLDRAM_MRS_ENABLE;
6095 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6096 
6097 	while (iteration < 2) {
6098 		val64 = 0x55555555aaaa0000ULL;
6099 		if (iteration == 1)
6100 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6101 		writeq(val64, &bar0->mc_rldram_test_d0);
6102 
6103 		val64 = 0xaaaa5a5555550000ULL;
6104 		if (iteration == 1)
6105 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6106 		writeq(val64, &bar0->mc_rldram_test_d1);
6107 
6108 		val64 = 0x55aaaaaaaa5a0000ULL;
6109 		if (iteration == 1)
6110 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6111 		writeq(val64, &bar0->mc_rldram_test_d2);
6112 
6113 		val64 = (u64) (0x0000003ffffe0100ULL);
6114 		writeq(val64, &bar0->mc_rldram_test_add);
6115 
6116 		val64 = MC_RLDRAM_TEST_MODE |
6117 			MC_RLDRAM_TEST_WRITE |
6118 			MC_RLDRAM_TEST_GO;
6119 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6120 
6121 		for (cnt = 0; cnt < 5; cnt++) {
6122 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6123 			if (val64 & MC_RLDRAM_TEST_DONE)
6124 				break;
6125 			msleep(200);
6126 		}
6127 
6128 		if (cnt == 5)
6129 			break;
6130 
6131 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6132 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6133 
6134 		for (cnt = 0; cnt < 5; cnt++) {
6135 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6136 			if (val64 & MC_RLDRAM_TEST_DONE)
6137 				break;
6138 			msleep(500);
6139 		}
6140 
6141 		if (cnt == 5)
6142 			break;
6143 
6144 		val64 = readq(&bar0->mc_rldram_test_ctrl);
6145 		if (!(val64 & MC_RLDRAM_TEST_PASS))
6146 			test_fail = 1;
6147 
6148 		iteration++;
6149 	}
6150 
6151 	*data = test_fail;
6152 
6153 	/* Bring the adapter out of test mode */
6154 	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6155 
6156 	return test_fail;
6157 }
6158 
6159 /**
6160  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6161  *  @sp : private member of the device structure, which is a pointer to the
6162  *  s2io_nic structure.
6163  *  @ethtest : pointer to a ethtool command specific structure that will be
6164  *  returned to the user.
6165  *  @data : variable that returns the result of each of the test
6166  * conducted by the driver.
6167  * Description:
6168  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6169  *  the health of the card.
6170  * Return value:
6171  *  void
6172  */
6173 
6174 static void s2io_ethtool_test(struct net_device *dev,
6175 			      struct ethtool_test *ethtest,
6176 			      uint64_t *data)
6177 {
6178 	struct s2io_nic *sp = netdev_priv(dev);
6179 	int orig_state = netif_running(sp->dev);
6180 
6181 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6182 		/* Offline Tests. */
6183 		if (orig_state)
6184 			s2io_close(sp->dev);
6185 
6186 		if (s2io_register_test(sp, &data[0]))
6187 			ethtest->flags |= ETH_TEST_FL_FAILED;
6188 
6189 		s2io_reset(sp);
6190 
6191 		if (s2io_rldram_test(sp, &data[3]))
6192 			ethtest->flags |= ETH_TEST_FL_FAILED;
6193 
6194 		s2io_reset(sp);
6195 
6196 		if (s2io_eeprom_test(sp, &data[1]))
6197 			ethtest->flags |= ETH_TEST_FL_FAILED;
6198 
6199 		if (s2io_bist_test(sp, &data[4]))
6200 			ethtest->flags |= ETH_TEST_FL_FAILED;
6201 
6202 		if (orig_state)
6203 			s2io_open(sp->dev);
6204 
6205 		data[2] = 0;
6206 	} else {
6207 		/* Online Tests. */
6208 		if (!orig_state) {
6209 			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6210 				  dev->name);
6211 			data[0] = -1;
6212 			data[1] = -1;
6213 			data[2] = -1;
6214 			data[3] = -1;
6215 			data[4] = -1;
6216 		}
6217 
6218 		if (s2io_link_test(sp, &data[2]))
6219 			ethtest->flags |= ETH_TEST_FL_FAILED;
6220 
6221 		data[0] = 0;
6222 		data[1] = 0;
6223 		data[3] = 0;
6224 		data[4] = 0;
6225 	}
6226 }
6227 
6228 static void s2io_get_ethtool_stats(struct net_device *dev,
6229 				   struct ethtool_stats *estats,
6230 				   u64 *tmp_stats)
6231 {
6232 	int i = 0, k;
6233 	struct s2io_nic *sp = netdev_priv(dev);
6234 	struct stat_block *stats = sp->mac_control.stats_info;
6235 	struct swStat *swstats = &stats->sw_stat;
6236 	struct xpakStat *xstats = &stats->xpak_stat;
6237 
6238 	s2io_updt_stats(sp);
6239 	tmp_stats[i++] =
6240 		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6241 		le32_to_cpu(stats->tmac_frms);
6242 	tmp_stats[i++] =
6243 		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6244 		le32_to_cpu(stats->tmac_data_octets);
6245 	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6246 	tmp_stats[i++] =
6247 		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6248 		le32_to_cpu(stats->tmac_mcst_frms);
6249 	tmp_stats[i++] =
6250 		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6251 		le32_to_cpu(stats->tmac_bcst_frms);
6252 	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6253 	tmp_stats[i++] =
6254 		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6255 		le32_to_cpu(stats->tmac_ttl_octets);
6256 	tmp_stats[i++] =
6257 		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6258 		le32_to_cpu(stats->tmac_ucst_frms);
6259 	tmp_stats[i++] =
6260 		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6261 		le32_to_cpu(stats->tmac_nucst_frms);
6262 	tmp_stats[i++] =
6263 		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6264 		le32_to_cpu(stats->tmac_any_err_frms);
6265 	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6266 	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6267 	tmp_stats[i++] =
6268 		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6269 		le32_to_cpu(stats->tmac_vld_ip);
6270 	tmp_stats[i++] =
6271 		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6272 		le32_to_cpu(stats->tmac_drop_ip);
6273 	tmp_stats[i++] =
6274 		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6275 		le32_to_cpu(stats->tmac_icmp);
6276 	tmp_stats[i++] =
6277 		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6278 		le32_to_cpu(stats->tmac_rst_tcp);
6279 	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6280 	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6281 		le32_to_cpu(stats->tmac_udp);
6282 	tmp_stats[i++] =
6283 		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6284 		le32_to_cpu(stats->rmac_vld_frms);
6285 	tmp_stats[i++] =
6286 		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6287 		le32_to_cpu(stats->rmac_data_octets);
6288 	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6289 	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6290 	tmp_stats[i++] =
6291 		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6292 		le32_to_cpu(stats->rmac_vld_mcst_frms);
6293 	tmp_stats[i++] =
6294 		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6295 		le32_to_cpu(stats->rmac_vld_bcst_frms);
6296 	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6297 	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6298 	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6299 	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6300 	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6301 	tmp_stats[i++] =
6302 		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6303 		le32_to_cpu(stats->rmac_ttl_octets);
6304 	tmp_stats[i++] =
6305 		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6306 		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6307 	tmp_stats[i++] =
6308 		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6309 		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6310 	tmp_stats[i++] =
6311 		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6312 		le32_to_cpu(stats->rmac_discarded_frms);
6313 	tmp_stats[i++] =
6314 		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6315 		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6316 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6317 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6318 	tmp_stats[i++] =
6319 		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6320 		le32_to_cpu(stats->rmac_usized_frms);
6321 	tmp_stats[i++] =
6322 		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6323 		le32_to_cpu(stats->rmac_osized_frms);
6324 	tmp_stats[i++] =
6325 		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6326 		le32_to_cpu(stats->rmac_frag_frms);
6327 	tmp_stats[i++] =
6328 		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6329 		le32_to_cpu(stats->rmac_jabber_frms);
6330 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6331 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6332 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6333 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6334 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6335 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6336 	tmp_stats[i++] =
6337 		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6338 		le32_to_cpu(stats->rmac_ip);
6339 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6340 	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6341 	tmp_stats[i++] =
6342 		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6343 		le32_to_cpu(stats->rmac_drop_ip);
6344 	tmp_stats[i++] =
6345 		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6346 		le32_to_cpu(stats->rmac_icmp);
6347 	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6348 	tmp_stats[i++] =
6349 		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6350 		le32_to_cpu(stats->rmac_udp);
6351 	tmp_stats[i++] =
6352 		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6353 		le32_to_cpu(stats->rmac_err_drp_udp);
6354 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6355 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6356 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6357 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6358 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6359 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6360 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6361 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6362 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6363 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6364 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6365 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6366 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6367 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6368 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6369 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6370 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6371 	tmp_stats[i++] =
6372 		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6373 		le32_to_cpu(stats->rmac_pause_cnt);
6374 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6375 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6376 	tmp_stats[i++] =
6377 		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6378 		le32_to_cpu(stats->rmac_accepted_ip);
6379 	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6380 	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6381 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6382 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6383 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6384 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6385 	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6386 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6387 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6388 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6389 	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6390 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6391 	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6392 	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6393 	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6394 	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6395 	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6396 	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6397 	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6398 
6399 	/* Enhanced statistics exist only for Hercules */
6400 	if (sp->device_type == XFRAME_II_DEVICE) {
6401 		tmp_stats[i++] =
6402 			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6403 		tmp_stats[i++] =
6404 			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6405 		tmp_stats[i++] =
6406 			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6407 		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6408 		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6409 		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6410 		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6411 		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6412 		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6413 		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6414 		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6415 		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6416 		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6417 		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6418 		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6419 		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6420 	}
6421 
6422 	tmp_stats[i++] = 0;
6423 	tmp_stats[i++] = swstats->single_ecc_errs;
6424 	tmp_stats[i++] = swstats->double_ecc_errs;
6425 	tmp_stats[i++] = swstats->parity_err_cnt;
6426 	tmp_stats[i++] = swstats->serious_err_cnt;
6427 	tmp_stats[i++] = swstats->soft_reset_cnt;
6428 	tmp_stats[i++] = swstats->fifo_full_cnt;
6429 	for (k = 0; k < MAX_RX_RINGS; k++)
6430 		tmp_stats[i++] = swstats->ring_full_cnt[k];
6431 	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6432 	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6433 	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6434 	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6435 	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6436 	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6437 	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6438 	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6439 	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6440 	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6441 	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6442 	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6443 	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6444 	tmp_stats[i++] = swstats->sending_both;
6445 	tmp_stats[i++] = swstats->outof_sequence_pkts;
6446 	tmp_stats[i++] = swstats->flush_max_pkts;
6447 	if (swstats->num_aggregations) {
6448 		u64 tmp = swstats->sum_avg_pkts_aggregated;
6449 		int count = 0;
6450 		/*
6451 		 * Since 64-bit divide does not work on all platforms,
6452 		 * do repeated subtraction.
6453 		 */
6454 		while (tmp >= swstats->num_aggregations) {
6455 			tmp -= swstats->num_aggregations;
6456 			count++;
6457 		}
6458 		tmp_stats[i++] = count;
6459 	} else
6460 		tmp_stats[i++] = 0;
6461 	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6462 	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6463 	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6464 	tmp_stats[i++] = swstats->mem_allocated;
6465 	tmp_stats[i++] = swstats->mem_freed;
6466 	tmp_stats[i++] = swstats->link_up_cnt;
6467 	tmp_stats[i++] = swstats->link_down_cnt;
6468 	tmp_stats[i++] = swstats->link_up_time;
6469 	tmp_stats[i++] = swstats->link_down_time;
6470 
6471 	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6472 	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6473 	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6474 	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6475 	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6476 
6477 	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6478 	tmp_stats[i++] = swstats->rx_abort_cnt;
6479 	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6480 	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6481 	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6482 	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6483 	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6484 	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6485 	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6486 	tmp_stats[i++] = swstats->tda_err_cnt;
6487 	tmp_stats[i++] = swstats->pfc_err_cnt;
6488 	tmp_stats[i++] = swstats->pcc_err_cnt;
6489 	tmp_stats[i++] = swstats->tti_err_cnt;
6490 	tmp_stats[i++] = swstats->tpa_err_cnt;
6491 	tmp_stats[i++] = swstats->sm_err_cnt;
6492 	tmp_stats[i++] = swstats->lso_err_cnt;
6493 	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6494 	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6495 	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6496 	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6497 	tmp_stats[i++] = swstats->rc_err_cnt;
6498 	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6499 	tmp_stats[i++] = swstats->rpa_err_cnt;
6500 	tmp_stats[i++] = swstats->rda_err_cnt;
6501 	tmp_stats[i++] = swstats->rti_err_cnt;
6502 	tmp_stats[i++] = swstats->mc_err_cnt;
6503 }
6504 
6505 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6506 {
6507 	return XENA_REG_SPACE;
6508 }
6509 
6510 
6511 static int s2io_get_eeprom_len(struct net_device *dev)
6512 {
6513 	return XENA_EEPROM_SPACE;
6514 }
6515 
6516 static int s2io_get_sset_count(struct net_device *dev, int sset)
6517 {
6518 	struct s2io_nic *sp = netdev_priv(dev);
6519 
6520 	switch (sset) {
6521 	case ETH_SS_TEST:
6522 		return S2IO_TEST_LEN;
6523 	case ETH_SS_STATS:
6524 		switch (sp->device_type) {
6525 		case XFRAME_I_DEVICE:
6526 			return XFRAME_I_STAT_LEN;
6527 		case XFRAME_II_DEVICE:
6528 			return XFRAME_II_STAT_LEN;
6529 		default:
6530 			return 0;
6531 		}
6532 	default:
6533 		return -EOPNOTSUPP;
6534 	}
6535 }
6536 
6537 static void s2io_ethtool_get_strings(struct net_device *dev,
6538 				     u32 stringset, u8 *data)
6539 {
6540 	int stat_size = 0;
6541 	struct s2io_nic *sp = netdev_priv(dev);
6542 
6543 	switch (stringset) {
6544 	case ETH_SS_TEST:
6545 		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6546 		break;
6547 	case ETH_SS_STATS:
6548 		stat_size = sizeof(ethtool_xena_stats_keys);
6549 		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6550 		if (sp->device_type == XFRAME_II_DEVICE) {
6551 			memcpy(data + stat_size,
6552 			       &ethtool_enhanced_stats_keys,
6553 			       sizeof(ethtool_enhanced_stats_keys));
6554 			stat_size += sizeof(ethtool_enhanced_stats_keys);
6555 		}
6556 
6557 		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6558 		       sizeof(ethtool_driver_stats_keys));
6559 	}
6560 }
6561 
6562 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6563 {
6564 	struct s2io_nic *sp = netdev_priv(dev);
6565 	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6566 
6567 	if (changed && netif_running(dev)) {
6568 		int rc;
6569 
6570 		s2io_stop_all_tx_queue(sp);
6571 		s2io_card_down(sp);
6572 		dev->features = features;
6573 		rc = s2io_card_up(sp);
6574 		if (rc)
6575 			s2io_reset(sp);
6576 		else
6577 			s2io_start_all_tx_queue(sp);
6578 
6579 		return rc ? rc : 1;
6580 	}
6581 
6582 	return 0;
6583 }
6584 
6585 static const struct ethtool_ops netdev_ethtool_ops = {
6586 	.get_drvinfo = s2io_ethtool_gdrvinfo,
6587 	.get_regs_len = s2io_ethtool_get_regs_len,
6588 	.get_regs = s2io_ethtool_gregs,
6589 	.get_link = ethtool_op_get_link,
6590 	.get_eeprom_len = s2io_get_eeprom_len,
6591 	.get_eeprom = s2io_ethtool_geeprom,
6592 	.set_eeprom = s2io_ethtool_seeprom,
6593 	.get_ringparam = s2io_ethtool_gringparam,
6594 	.get_pauseparam = s2io_ethtool_getpause_data,
6595 	.set_pauseparam = s2io_ethtool_setpause_data,
6596 	.self_test = s2io_ethtool_test,
6597 	.get_strings = s2io_ethtool_get_strings,
6598 	.set_phys_id = s2io_ethtool_set_led,
6599 	.get_ethtool_stats = s2io_get_ethtool_stats,
6600 	.get_sset_count = s2io_get_sset_count,
6601 	.get_link_ksettings = s2io_ethtool_get_link_ksettings,
6602 	.set_link_ksettings = s2io_ethtool_set_link_ksettings,
6603 };
6604 
6605 /**
6606  *  s2io_ioctl - Entry point for the Ioctl
6607  *  @dev :  Device pointer.
6608  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6609  *  a proprietary structure used to pass information to the driver.
6610  *  @cmd :  This is used to distinguish between the different commands that
6611  *  can be passed to the IOCTL functions.
6612  *  Description:
6613  *  Currently there are no special functionality supported in IOCTL, hence
6614  *  function always return EOPNOTSUPPORTED
6615  */
6616 
6617 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6618 {
6619 	return -EOPNOTSUPP;
6620 }
6621 
6622 /**
6623  *  s2io_change_mtu - entry point to change MTU size for the device.
6624  *   @dev : device pointer.
6625  *   @new_mtu : the new MTU size for the device.
6626  *   Description: A driver entry point to change MTU size for the device.
6627  *   Before changing the MTU the device must be stopped.
6628  *  Return value:
6629  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6630  *   file on failure.
6631  */
6632 
6633 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6634 {
6635 	struct s2io_nic *sp = netdev_priv(dev);
6636 	int ret = 0;
6637 
6638 	dev->mtu = new_mtu;
6639 	if (netif_running(dev)) {
6640 		s2io_stop_all_tx_queue(sp);
6641 		s2io_card_down(sp);
6642 		ret = s2io_card_up(sp);
6643 		if (ret) {
6644 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6645 				  __func__);
6646 			return ret;
6647 		}
6648 		s2io_wake_all_tx_queue(sp);
6649 	} else { /* Device is down */
6650 		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6651 		u64 val64 = new_mtu;
6652 
6653 		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6654 	}
6655 
6656 	return ret;
6657 }
6658 
6659 /**
6660  * s2io_set_link - Set the LInk status
6661  * @data: long pointer to device private structue
6662  * Description: Sets the link status for the adapter
6663  */
6664 
6665 static void s2io_set_link(struct work_struct *work)
6666 {
6667 	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6668 					    set_link_task);
6669 	struct net_device *dev = nic->dev;
6670 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6671 	register u64 val64;
6672 	u16 subid;
6673 
6674 	rtnl_lock();
6675 
6676 	if (!netif_running(dev))
6677 		goto out_unlock;
6678 
6679 	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6680 		/* The card is being reset, no point doing anything */
6681 		goto out_unlock;
6682 	}
6683 
6684 	subid = nic->pdev->subsystem_device;
6685 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6686 		/*
6687 		 * Allow a small delay for the NICs self initiated
6688 		 * cleanup to complete.
6689 		 */
6690 		msleep(100);
6691 	}
6692 
6693 	val64 = readq(&bar0->adapter_status);
6694 	if (LINK_IS_UP(val64)) {
6695 		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6696 			if (verify_xena_quiescence(nic)) {
6697 				val64 = readq(&bar0->adapter_control);
6698 				val64 |= ADAPTER_CNTL_EN;
6699 				writeq(val64, &bar0->adapter_control);
6700 				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6701 					    nic->device_type, subid)) {
6702 					val64 = readq(&bar0->gpio_control);
6703 					val64 |= GPIO_CTRL_GPIO_0;
6704 					writeq(val64, &bar0->gpio_control);
6705 					val64 = readq(&bar0->gpio_control);
6706 				} else {
6707 					val64 |= ADAPTER_LED_ON;
6708 					writeq(val64, &bar0->adapter_control);
6709 				}
6710 				nic->device_enabled_once = true;
6711 			} else {
6712 				DBG_PRINT(ERR_DBG,
6713 					  "%s: Error: device is not Quiescent\n",
6714 					  dev->name);
6715 				s2io_stop_all_tx_queue(nic);
6716 			}
6717 		}
6718 		val64 = readq(&bar0->adapter_control);
6719 		val64 |= ADAPTER_LED_ON;
6720 		writeq(val64, &bar0->adapter_control);
6721 		s2io_link(nic, LINK_UP);
6722 	} else {
6723 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6724 						      subid)) {
6725 			val64 = readq(&bar0->gpio_control);
6726 			val64 &= ~GPIO_CTRL_GPIO_0;
6727 			writeq(val64, &bar0->gpio_control);
6728 			val64 = readq(&bar0->gpio_control);
6729 		}
6730 		/* turn off LED */
6731 		val64 = readq(&bar0->adapter_control);
6732 		val64 = val64 & (~ADAPTER_LED_ON);
6733 		writeq(val64, &bar0->adapter_control);
6734 		s2io_link(nic, LINK_DOWN);
6735 	}
6736 	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6737 
6738 out_unlock:
6739 	rtnl_unlock();
6740 }
6741 
6742 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6743 				  struct buffAdd *ba,
6744 				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6745 				  u64 *temp2, int size)
6746 {
6747 	struct net_device *dev = sp->dev;
6748 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6749 
6750 	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6751 		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6752 		/* allocate skb */
6753 		if (*skb) {
6754 			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6755 			/*
6756 			 * As Rx frame are not going to be processed,
6757 			 * using same mapped address for the Rxd
6758 			 * buffer pointer
6759 			 */
6760 			rxdp1->Buffer0_ptr = *temp0;
6761 		} else {
6762 			*skb = netdev_alloc_skb(dev, size);
6763 			if (!(*skb)) {
6764 				DBG_PRINT(INFO_DBG,
6765 					  "%s: Out of memory to allocate %s\n",
6766 					  dev->name, "1 buf mode SKBs");
6767 				stats->mem_alloc_fail_cnt++;
6768 				return -ENOMEM ;
6769 			}
6770 			stats->mem_allocated += (*skb)->truesize;
6771 			/* storing the mapped addr in a temp variable
6772 			 * such it will be used for next rxd whose
6773 			 * Host Control is NULL
6774 			 */
6775 			rxdp1->Buffer0_ptr = *temp0 =
6776 				pci_map_single(sp->pdev, (*skb)->data,
6777 					       size - NET_IP_ALIGN,
6778 					       PCI_DMA_FROMDEVICE);
6779 			if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6780 				goto memalloc_failed;
6781 			rxdp->Host_Control = (unsigned long) (*skb);
6782 		}
6783 	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6784 		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6785 		/* Two buffer Mode */
6786 		if (*skb) {
6787 			rxdp3->Buffer2_ptr = *temp2;
6788 			rxdp3->Buffer0_ptr = *temp0;
6789 			rxdp3->Buffer1_ptr = *temp1;
6790 		} else {
6791 			*skb = netdev_alloc_skb(dev, size);
6792 			if (!(*skb)) {
6793 				DBG_PRINT(INFO_DBG,
6794 					  "%s: Out of memory to allocate %s\n",
6795 					  dev->name,
6796 					  "2 buf mode SKBs");
6797 				stats->mem_alloc_fail_cnt++;
6798 				return -ENOMEM;
6799 			}
6800 			stats->mem_allocated += (*skb)->truesize;
6801 			rxdp3->Buffer2_ptr = *temp2 =
6802 				pci_map_single(sp->pdev, (*skb)->data,
6803 					       dev->mtu + 4,
6804 					       PCI_DMA_FROMDEVICE);
6805 			if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6806 				goto memalloc_failed;
6807 			rxdp3->Buffer0_ptr = *temp0 =
6808 				pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6809 					       PCI_DMA_FROMDEVICE);
6810 			if (pci_dma_mapping_error(sp->pdev,
6811 						  rxdp3->Buffer0_ptr)) {
6812 				pci_unmap_single(sp->pdev,
6813 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6814 						 dev->mtu + 4,
6815 						 PCI_DMA_FROMDEVICE);
6816 				goto memalloc_failed;
6817 			}
6818 			rxdp->Host_Control = (unsigned long) (*skb);
6819 
6820 			/* Buffer-1 will be dummy buffer not used */
6821 			rxdp3->Buffer1_ptr = *temp1 =
6822 				pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6823 					       PCI_DMA_FROMDEVICE);
6824 			if (pci_dma_mapping_error(sp->pdev,
6825 						  rxdp3->Buffer1_ptr)) {
6826 				pci_unmap_single(sp->pdev,
6827 						 (dma_addr_t)rxdp3->Buffer0_ptr,
6828 						 BUF0_LEN, PCI_DMA_FROMDEVICE);
6829 				pci_unmap_single(sp->pdev,
6830 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6831 						 dev->mtu + 4,
6832 						 PCI_DMA_FROMDEVICE);
6833 				goto memalloc_failed;
6834 			}
6835 		}
6836 	}
6837 	return 0;
6838 
6839 memalloc_failed:
6840 	stats->pci_map_fail_cnt++;
6841 	stats->mem_freed += (*skb)->truesize;
6842 	dev_kfree_skb(*skb);
6843 	return -ENOMEM;
6844 }
6845 
6846 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6847 				int size)
6848 {
6849 	struct net_device *dev = sp->dev;
6850 	if (sp->rxd_mode == RXD_MODE_1) {
6851 		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6852 	} else if (sp->rxd_mode == RXD_MODE_3B) {
6853 		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6854 		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6855 		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6856 	}
6857 }
6858 
6859 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6860 {
6861 	int i, j, k, blk_cnt = 0, size;
6862 	struct config_param *config = &sp->config;
6863 	struct mac_info *mac_control = &sp->mac_control;
6864 	struct net_device *dev = sp->dev;
6865 	struct RxD_t *rxdp = NULL;
6866 	struct sk_buff *skb = NULL;
6867 	struct buffAdd *ba = NULL;
6868 	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6869 
6870 	/* Calculate the size based on ring mode */
6871 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6872 		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6873 	if (sp->rxd_mode == RXD_MODE_1)
6874 		size += NET_IP_ALIGN;
6875 	else if (sp->rxd_mode == RXD_MODE_3B)
6876 		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6877 
6878 	for (i = 0; i < config->rx_ring_num; i++) {
6879 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6880 		struct ring_info *ring = &mac_control->rings[i];
6881 
6882 		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6883 
6884 		for (j = 0; j < blk_cnt; j++) {
6885 			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6886 				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6887 				if (sp->rxd_mode == RXD_MODE_3B)
6888 					ba = &ring->ba[j][k];
6889 				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6890 							   &temp0_64,
6891 							   &temp1_64,
6892 							   &temp2_64,
6893 							   size) == -ENOMEM) {
6894 					return 0;
6895 				}
6896 
6897 				set_rxd_buffer_size(sp, rxdp, size);
6898 				dma_wmb();
6899 				/* flip the Ownership bit to Hardware */
6900 				rxdp->Control_1 |= RXD_OWN_XENA;
6901 			}
6902 		}
6903 	}
6904 	return 0;
6905 
6906 }
6907 
6908 static int s2io_add_isr(struct s2io_nic *sp)
6909 {
6910 	int ret = 0;
6911 	struct net_device *dev = sp->dev;
6912 	int err = 0;
6913 
6914 	if (sp->config.intr_type == MSI_X)
6915 		ret = s2io_enable_msi_x(sp);
6916 	if (ret) {
6917 		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6918 		sp->config.intr_type = INTA;
6919 	}
6920 
6921 	/*
6922 	 * Store the values of the MSIX table in
6923 	 * the struct s2io_nic structure
6924 	 */
6925 	store_xmsi_data(sp);
6926 
6927 	/* After proper initialization of H/W, register ISR */
6928 	if (sp->config.intr_type == MSI_X) {
6929 		int i, msix_rx_cnt = 0;
6930 
6931 		for (i = 0; i < sp->num_entries; i++) {
6932 			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6933 				if (sp->s2io_entries[i].type ==
6934 				    MSIX_RING_TYPE) {
6935 					snprintf(sp->desc[i],
6936 						sizeof(sp->desc[i]),
6937 						"%s:MSI-X-%d-RX",
6938 						dev->name, i);
6939 					err = request_irq(sp->entries[i].vector,
6940 							  s2io_msix_ring_handle,
6941 							  0,
6942 							  sp->desc[i],
6943 							  sp->s2io_entries[i].arg);
6944 				} else if (sp->s2io_entries[i].type ==
6945 					   MSIX_ALARM_TYPE) {
6946 					snprintf(sp->desc[i],
6947 						sizeof(sp->desc[i]),
6948 						"%s:MSI-X-%d-TX",
6949 						dev->name, i);
6950 					err = request_irq(sp->entries[i].vector,
6951 							  s2io_msix_fifo_handle,
6952 							  0,
6953 							  sp->desc[i],
6954 							  sp->s2io_entries[i].arg);
6955 
6956 				}
6957 				/* if either data or addr is zero print it. */
6958 				if (!(sp->msix_info[i].addr &&
6959 				      sp->msix_info[i].data)) {
6960 					DBG_PRINT(ERR_DBG,
6961 						  "%s @Addr:0x%llx Data:0x%llx\n",
6962 						  sp->desc[i],
6963 						  (unsigned long long)
6964 						  sp->msix_info[i].addr,
6965 						  (unsigned long long)
6966 						  ntohl(sp->msix_info[i].data));
6967 				} else
6968 					msix_rx_cnt++;
6969 				if (err) {
6970 					remove_msix_isr(sp);
6971 
6972 					DBG_PRINT(ERR_DBG,
6973 						  "%s:MSI-X-%d registration "
6974 						  "failed\n", dev->name, i);
6975 
6976 					DBG_PRINT(ERR_DBG,
6977 						  "%s: Defaulting to INTA\n",
6978 						  dev->name);
6979 					sp->config.intr_type = INTA;
6980 					break;
6981 				}
6982 				sp->s2io_entries[i].in_use =
6983 					MSIX_REGISTERED_SUCCESS;
6984 			}
6985 		}
6986 		if (!err) {
6987 			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6988 			DBG_PRINT(INFO_DBG,
6989 				  "MSI-X-TX entries enabled through alarm vector\n");
6990 		}
6991 	}
6992 	if (sp->config.intr_type == INTA) {
6993 		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6994 				  sp->name, dev);
6995 		if (err) {
6996 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6997 				  dev->name);
6998 			return -1;
6999 		}
7000 	}
7001 	return 0;
7002 }
7003 
7004 static void s2io_rem_isr(struct s2io_nic *sp)
7005 {
7006 	if (sp->config.intr_type == MSI_X)
7007 		remove_msix_isr(sp);
7008 	else
7009 		remove_inta_isr(sp);
7010 }
7011 
7012 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7013 {
7014 	int cnt = 0;
7015 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7016 	register u64 val64 = 0;
7017 	struct config_param *config;
7018 	config = &sp->config;
7019 
7020 	if (!is_s2io_card_up(sp))
7021 		return;
7022 
7023 	del_timer_sync(&sp->alarm_timer);
7024 	/* If s2io_set_link task is executing, wait till it completes. */
7025 	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7026 		msleep(50);
7027 	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7028 
7029 	/* Disable napi */
7030 	if (sp->config.napi) {
7031 		int off = 0;
7032 		if (config->intr_type ==  MSI_X) {
7033 			for (; off < sp->config.rx_ring_num; off++)
7034 				napi_disable(&sp->mac_control.rings[off].napi);
7035 		}
7036 		else
7037 			napi_disable(&sp->napi);
7038 	}
7039 
7040 	/* disable Tx and Rx traffic on the NIC */
7041 	if (do_io)
7042 		stop_nic(sp);
7043 
7044 	s2io_rem_isr(sp);
7045 
7046 	/* stop the tx queue, indicate link down */
7047 	s2io_link(sp, LINK_DOWN);
7048 
7049 	/* Check if the device is Quiescent and then Reset the NIC */
7050 	while (do_io) {
7051 		/* As per the HW requirement we need to replenish the
7052 		 * receive buffer to avoid the ring bump. Since there is
7053 		 * no intention of processing the Rx frame at this pointwe are
7054 		 * just setting the ownership bit of rxd in Each Rx
7055 		 * ring to HW and set the appropriate buffer size
7056 		 * based on the ring mode
7057 		 */
7058 		rxd_owner_bit_reset(sp);
7059 
7060 		val64 = readq(&bar0->adapter_status);
7061 		if (verify_xena_quiescence(sp)) {
7062 			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7063 				break;
7064 		}
7065 
7066 		msleep(50);
7067 		cnt++;
7068 		if (cnt == 10) {
7069 			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7070 				  "adapter status reads 0x%llx\n",
7071 				  (unsigned long long)val64);
7072 			break;
7073 		}
7074 	}
7075 	if (do_io)
7076 		s2io_reset(sp);
7077 
7078 	/* Free all Tx buffers */
7079 	free_tx_buffers(sp);
7080 
7081 	/* Free all Rx buffers */
7082 	free_rx_buffers(sp);
7083 
7084 	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7085 }
7086 
7087 static void s2io_card_down(struct s2io_nic *sp)
7088 {
7089 	do_s2io_card_down(sp, 1);
7090 }
7091 
7092 static int s2io_card_up(struct s2io_nic *sp)
7093 {
7094 	int i, ret = 0;
7095 	struct config_param *config;
7096 	struct mac_info *mac_control;
7097 	struct net_device *dev = sp->dev;
7098 	u16 interruptible;
7099 
7100 	/* Initialize the H/W I/O registers */
7101 	ret = init_nic(sp);
7102 	if (ret != 0) {
7103 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7104 			  dev->name);
7105 		if (ret != -EIO)
7106 			s2io_reset(sp);
7107 		return ret;
7108 	}
7109 
7110 	/*
7111 	 * Initializing the Rx buffers. For now we are considering only 1
7112 	 * Rx ring and initializing buffers into 30 Rx blocks
7113 	 */
7114 	config = &sp->config;
7115 	mac_control = &sp->mac_control;
7116 
7117 	for (i = 0; i < config->rx_ring_num; i++) {
7118 		struct ring_info *ring = &mac_control->rings[i];
7119 
7120 		ring->mtu = dev->mtu;
7121 		ring->lro = !!(dev->features & NETIF_F_LRO);
7122 		ret = fill_rx_buffers(sp, ring, 1);
7123 		if (ret) {
7124 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7125 				  dev->name);
7126 			s2io_reset(sp);
7127 			free_rx_buffers(sp);
7128 			return -ENOMEM;
7129 		}
7130 		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7131 			  ring->rx_bufs_left);
7132 	}
7133 
7134 	/* Initialise napi */
7135 	if (config->napi) {
7136 		if (config->intr_type ==  MSI_X) {
7137 			for (i = 0; i < sp->config.rx_ring_num; i++)
7138 				napi_enable(&sp->mac_control.rings[i].napi);
7139 		} else {
7140 			napi_enable(&sp->napi);
7141 		}
7142 	}
7143 
7144 	/* Maintain the state prior to the open */
7145 	if (sp->promisc_flg)
7146 		sp->promisc_flg = 0;
7147 	if (sp->m_cast_flg) {
7148 		sp->m_cast_flg = 0;
7149 		sp->all_multi_pos = 0;
7150 	}
7151 
7152 	/* Setting its receive mode */
7153 	s2io_set_multicast(dev);
7154 
7155 	if (dev->features & NETIF_F_LRO) {
7156 		/* Initialize max aggregatable pkts per session based on MTU */
7157 		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7158 		/* Check if we can use (if specified) user provided value */
7159 		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7160 			sp->lro_max_aggr_per_sess = lro_max_pkts;
7161 	}
7162 
7163 	/* Enable Rx Traffic and interrupts on the NIC */
7164 	if (start_nic(sp)) {
7165 		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7166 		s2io_reset(sp);
7167 		free_rx_buffers(sp);
7168 		return -ENODEV;
7169 	}
7170 
7171 	/* Add interrupt service routine */
7172 	if (s2io_add_isr(sp) != 0) {
7173 		if (sp->config.intr_type == MSI_X)
7174 			s2io_rem_isr(sp);
7175 		s2io_reset(sp);
7176 		free_rx_buffers(sp);
7177 		return -ENODEV;
7178 	}
7179 
7180 	timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7181 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7182 
7183 	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7184 
7185 	/*  Enable select interrupts */
7186 	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7187 	if (sp->config.intr_type != INTA) {
7188 		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7189 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7190 	} else {
7191 		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7192 		interruptible |= TX_PIC_INTR;
7193 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7194 	}
7195 
7196 	return 0;
7197 }
7198 
7199 /**
7200  * s2io_restart_nic - Resets the NIC.
7201  * @data : long pointer to the device private structure
7202  * Description:
7203  * This function is scheduled to be run by the s2io_tx_watchdog
7204  * function after 0.5 secs to reset the NIC. The idea is to reduce
7205  * the run time of the watch dog routine which is run holding a
7206  * spin lock.
7207  */
7208 
7209 static void s2io_restart_nic(struct work_struct *work)
7210 {
7211 	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7212 	struct net_device *dev = sp->dev;
7213 
7214 	rtnl_lock();
7215 
7216 	if (!netif_running(dev))
7217 		goto out_unlock;
7218 
7219 	s2io_card_down(sp);
7220 	if (s2io_card_up(sp)) {
7221 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7222 	}
7223 	s2io_wake_all_tx_queue(sp);
7224 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7225 out_unlock:
7226 	rtnl_unlock();
7227 }
7228 
7229 /**
7230  *  s2io_tx_watchdog - Watchdog for transmit side.
7231  *  @dev : Pointer to net device structure
7232  *  Description:
7233  *  This function is triggered if the Tx Queue is stopped
7234  *  for a pre-defined amount of time when the Interface is still up.
7235  *  If the Interface is jammed in such a situation, the hardware is
7236  *  reset (by s2io_close) and restarted again (by s2io_open) to
7237  *  overcome any problem that might have been caused in the hardware.
7238  *  Return value:
7239  *  void
7240  */
7241 
7242 static void s2io_tx_watchdog(struct net_device *dev)
7243 {
7244 	struct s2io_nic *sp = netdev_priv(dev);
7245 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7246 
7247 	if (netif_carrier_ok(dev)) {
7248 		swstats->watchdog_timer_cnt++;
7249 		schedule_work(&sp->rst_timer_task);
7250 		swstats->soft_reset_cnt++;
7251 	}
7252 }
7253 
7254 /**
7255  *   rx_osm_handler - To perform some OS related operations on SKB.
7256  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7257  *   @skb : the socket buffer pointer.
7258  *   @len : length of the packet
7259  *   @cksum : FCS checksum of the frame.
7260  *   @ring_no : the ring from which this RxD was extracted.
7261  *   Description:
7262  *   This function is called by the Rx interrupt serivce routine to perform
7263  *   some OS related operations on the SKB before passing it to the upper
7264  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7265  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7266  *   to the upper layer. If the checksum is wrong, it increments the Rx
7267  *   packet error count, frees the SKB and returns error.
7268  *   Return value:
7269  *   SUCCESS on success and -1 on failure.
7270  */
7271 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7272 {
7273 	struct s2io_nic *sp = ring_data->nic;
7274 	struct net_device *dev = ring_data->dev;
7275 	struct sk_buff *skb = (struct sk_buff *)
7276 		((unsigned long)rxdp->Host_Control);
7277 	int ring_no = ring_data->ring_no;
7278 	u16 l3_csum, l4_csum;
7279 	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7280 	struct lro *uninitialized_var(lro);
7281 	u8 err_mask;
7282 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7283 
7284 	skb->dev = dev;
7285 
7286 	if (err) {
7287 		/* Check for parity error */
7288 		if (err & 0x1)
7289 			swstats->parity_err_cnt++;
7290 
7291 		err_mask = err >> 48;
7292 		switch (err_mask) {
7293 		case 1:
7294 			swstats->rx_parity_err_cnt++;
7295 			break;
7296 
7297 		case 2:
7298 			swstats->rx_abort_cnt++;
7299 			break;
7300 
7301 		case 3:
7302 			swstats->rx_parity_abort_cnt++;
7303 			break;
7304 
7305 		case 4:
7306 			swstats->rx_rda_fail_cnt++;
7307 			break;
7308 
7309 		case 5:
7310 			swstats->rx_unkn_prot_cnt++;
7311 			break;
7312 
7313 		case 6:
7314 			swstats->rx_fcs_err_cnt++;
7315 			break;
7316 
7317 		case 7:
7318 			swstats->rx_buf_size_err_cnt++;
7319 			break;
7320 
7321 		case 8:
7322 			swstats->rx_rxd_corrupt_cnt++;
7323 			break;
7324 
7325 		case 15:
7326 			swstats->rx_unkn_err_cnt++;
7327 			break;
7328 		}
7329 		/*
7330 		 * Drop the packet if bad transfer code. Exception being
7331 		 * 0x5, which could be due to unsupported IPv6 extension header.
7332 		 * In this case, we let stack handle the packet.
7333 		 * Note that in this case, since checksum will be incorrect,
7334 		 * stack will validate the same.
7335 		 */
7336 		if (err_mask != 0x5) {
7337 			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7338 				  dev->name, err_mask);
7339 			dev->stats.rx_crc_errors++;
7340 			swstats->mem_freed
7341 				+= skb->truesize;
7342 			dev_kfree_skb(skb);
7343 			ring_data->rx_bufs_left -= 1;
7344 			rxdp->Host_Control = 0;
7345 			return 0;
7346 		}
7347 	}
7348 
7349 	rxdp->Host_Control = 0;
7350 	if (sp->rxd_mode == RXD_MODE_1) {
7351 		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7352 
7353 		skb_put(skb, len);
7354 	} else if (sp->rxd_mode == RXD_MODE_3B) {
7355 		int get_block = ring_data->rx_curr_get_info.block_index;
7356 		int get_off = ring_data->rx_curr_get_info.offset;
7357 		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7358 		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7359 		unsigned char *buff = skb_push(skb, buf0_len);
7360 
7361 		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7362 		memcpy(buff, ba->ba_0, buf0_len);
7363 		skb_put(skb, buf2_len);
7364 	}
7365 
7366 	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7367 	    ((!ring_data->lro) ||
7368 	     (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7369 	    (dev->features & NETIF_F_RXCSUM)) {
7370 		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7371 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7372 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7373 			/*
7374 			 * NIC verifies if the Checksum of the received
7375 			 * frame is Ok or not and accordingly returns
7376 			 * a flag in the RxD.
7377 			 */
7378 			skb->ip_summed = CHECKSUM_UNNECESSARY;
7379 			if (ring_data->lro) {
7380 				u32 tcp_len = 0;
7381 				u8 *tcp;
7382 				int ret = 0;
7383 
7384 				ret = s2io_club_tcp_session(ring_data,
7385 							    skb->data, &tcp,
7386 							    &tcp_len, &lro,
7387 							    rxdp, sp);
7388 				switch (ret) {
7389 				case 3: /* Begin anew */
7390 					lro->parent = skb;
7391 					goto aggregate;
7392 				case 1: /* Aggregate */
7393 					lro_append_pkt(sp, lro, skb, tcp_len);
7394 					goto aggregate;
7395 				case 4: /* Flush session */
7396 					lro_append_pkt(sp, lro, skb, tcp_len);
7397 					queue_rx_frame(lro->parent,
7398 						       lro->vlan_tag);
7399 					clear_lro_session(lro);
7400 					swstats->flush_max_pkts++;
7401 					goto aggregate;
7402 				case 2: /* Flush both */
7403 					lro->parent->data_len = lro->frags_len;
7404 					swstats->sending_both++;
7405 					queue_rx_frame(lro->parent,
7406 						       lro->vlan_tag);
7407 					clear_lro_session(lro);
7408 					goto send_up;
7409 				case 0: /* sessions exceeded */
7410 				case -1: /* non-TCP or not L2 aggregatable */
7411 				case 5: /*
7412 					 * First pkt in session not
7413 					 * L3/L4 aggregatable
7414 					 */
7415 					break;
7416 				default:
7417 					DBG_PRINT(ERR_DBG,
7418 						  "%s: Samadhana!!\n",
7419 						  __func__);
7420 					BUG();
7421 				}
7422 			}
7423 		} else {
7424 			/*
7425 			 * Packet with erroneous checksum, let the
7426 			 * upper layers deal with it.
7427 			 */
7428 			skb_checksum_none_assert(skb);
7429 		}
7430 	} else
7431 		skb_checksum_none_assert(skb);
7432 
7433 	swstats->mem_freed += skb->truesize;
7434 send_up:
7435 	skb_record_rx_queue(skb, ring_no);
7436 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7437 aggregate:
7438 	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7439 	return SUCCESS;
7440 }
7441 
7442 /**
7443  *  s2io_link - stops/starts the Tx queue.
7444  *  @sp : private member of the device structure, which is a pointer to the
7445  *  s2io_nic structure.
7446  *  @link : inidicates whether link is UP/DOWN.
7447  *  Description:
7448  *  This function stops/starts the Tx queue depending on whether the link
7449  *  status of the NIC is is down or up. This is called by the Alarm
7450  *  interrupt handler whenever a link change interrupt comes up.
7451  *  Return value:
7452  *  void.
7453  */
7454 
7455 static void s2io_link(struct s2io_nic *sp, int link)
7456 {
7457 	struct net_device *dev = sp->dev;
7458 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7459 
7460 	if (link != sp->last_link_state) {
7461 		init_tti(sp, link);
7462 		if (link == LINK_DOWN) {
7463 			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7464 			s2io_stop_all_tx_queue(sp);
7465 			netif_carrier_off(dev);
7466 			if (swstats->link_up_cnt)
7467 				swstats->link_up_time =
7468 					jiffies - sp->start_time;
7469 			swstats->link_down_cnt++;
7470 		} else {
7471 			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7472 			if (swstats->link_down_cnt)
7473 				swstats->link_down_time =
7474 					jiffies - sp->start_time;
7475 			swstats->link_up_cnt++;
7476 			netif_carrier_on(dev);
7477 			s2io_wake_all_tx_queue(sp);
7478 		}
7479 	}
7480 	sp->last_link_state = link;
7481 	sp->start_time = jiffies;
7482 }
7483 
7484 /**
7485  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7486  *  @sp : private member of the device structure, which is a pointer to the
7487  *  s2io_nic structure.
7488  *  Description:
7489  *  This function initializes a few of the PCI and PCI-X configuration registers
7490  *  with recommended values.
7491  *  Return value:
7492  *  void
7493  */
7494 
7495 static void s2io_init_pci(struct s2io_nic *sp)
7496 {
7497 	u16 pci_cmd = 0, pcix_cmd = 0;
7498 
7499 	/* Enable Data Parity Error Recovery in PCI-X command register. */
7500 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7501 			     &(pcix_cmd));
7502 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7503 			      (pcix_cmd | 1));
7504 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7505 			     &(pcix_cmd));
7506 
7507 	/* Set the PErr Response bit in PCI command register. */
7508 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7509 	pci_write_config_word(sp->pdev, PCI_COMMAND,
7510 			      (pci_cmd | PCI_COMMAND_PARITY));
7511 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7512 }
7513 
7514 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7515 			    u8 *dev_multiq)
7516 {
7517 	int i;
7518 
7519 	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7520 		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7521 			  "(%d) not supported\n", tx_fifo_num);
7522 
7523 		if (tx_fifo_num < 1)
7524 			tx_fifo_num = 1;
7525 		else
7526 			tx_fifo_num = MAX_TX_FIFOS;
7527 
7528 		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7529 	}
7530 
7531 	if (multiq)
7532 		*dev_multiq = multiq;
7533 
7534 	if (tx_steering_type && (1 == tx_fifo_num)) {
7535 		if (tx_steering_type != TX_DEFAULT_STEERING)
7536 			DBG_PRINT(ERR_DBG,
7537 				  "Tx steering is not supported with "
7538 				  "one fifo. Disabling Tx steering.\n");
7539 		tx_steering_type = NO_STEERING;
7540 	}
7541 
7542 	if ((tx_steering_type < NO_STEERING) ||
7543 	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7544 		DBG_PRINT(ERR_DBG,
7545 			  "Requested transmit steering not supported\n");
7546 		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7547 		tx_steering_type = NO_STEERING;
7548 	}
7549 
7550 	if (rx_ring_num > MAX_RX_RINGS) {
7551 		DBG_PRINT(ERR_DBG,
7552 			  "Requested number of rx rings not supported\n");
7553 		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7554 			  MAX_RX_RINGS);
7555 		rx_ring_num = MAX_RX_RINGS;
7556 	}
7557 
7558 	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7559 		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7560 			  "Defaulting to INTA\n");
7561 		*dev_intr_type = INTA;
7562 	}
7563 
7564 	if ((*dev_intr_type == MSI_X) &&
7565 	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7566 	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7567 		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7568 			  "Defaulting to INTA\n");
7569 		*dev_intr_type = INTA;
7570 	}
7571 
7572 	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7573 		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7574 		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7575 		rx_ring_mode = 1;
7576 	}
7577 
7578 	for (i = 0; i < MAX_RX_RINGS; i++)
7579 		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7580 			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7581 				  "supported\nDefaulting to %d\n",
7582 				  MAX_RX_BLOCKS_PER_RING);
7583 			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7584 		}
7585 
7586 	return SUCCESS;
7587 }
7588 
7589 /**
7590  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7591  * or Traffic class respectively.
7592  * @nic: device private variable
7593  * Description: The function configures the receive steering to
7594  * desired receive ring.
7595  * Return Value:  SUCCESS on success and
7596  * '-1' on failure (endian settings incorrect).
7597  */
7598 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7599 {
7600 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7601 	register u64 val64 = 0;
7602 
7603 	if (ds_codepoint > 63)
7604 		return FAILURE;
7605 
7606 	val64 = RTS_DS_MEM_DATA(ring);
7607 	writeq(val64, &bar0->rts_ds_mem_data);
7608 
7609 	val64 = RTS_DS_MEM_CTRL_WE |
7610 		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7611 		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7612 
7613 	writeq(val64, &bar0->rts_ds_mem_ctrl);
7614 
7615 	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7616 				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7617 				     S2IO_BIT_RESET);
7618 }
7619 
7620 static const struct net_device_ops s2io_netdev_ops = {
7621 	.ndo_open	        = s2io_open,
7622 	.ndo_stop	        = s2io_close,
7623 	.ndo_get_stats	        = s2io_get_stats,
7624 	.ndo_start_xmit    	= s2io_xmit,
7625 	.ndo_validate_addr	= eth_validate_addr,
7626 	.ndo_set_rx_mode	= s2io_set_multicast,
7627 	.ndo_do_ioctl	   	= s2io_ioctl,
7628 	.ndo_set_mac_address    = s2io_set_mac_addr,
7629 	.ndo_change_mtu	   	= s2io_change_mtu,
7630 	.ndo_set_features	= s2io_set_features,
7631 	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7632 #ifdef CONFIG_NET_POLL_CONTROLLER
7633 	.ndo_poll_controller    = s2io_netpoll,
7634 #endif
7635 };
7636 
7637 /**
7638  *  s2io_init_nic - Initialization of the adapter .
7639  *  @pdev : structure containing the PCI related information of the device.
7640  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7641  *  Description:
7642  *  The function initializes an adapter identified by the pci_dec structure.
7643  *  All OS related initialization including memory and device structure and
7644  *  initlaization of the device private variable is done. Also the swapper
7645  *  control register is initialized to enable read and write into the I/O
7646  *  registers of the device.
7647  *  Return value:
7648  *  returns 0 on success and negative on failure.
7649  */
7650 
7651 static int
7652 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7653 {
7654 	struct s2io_nic *sp;
7655 	struct net_device *dev;
7656 	int i, j, ret;
7657 	int dma_flag = false;
7658 	u32 mac_up, mac_down;
7659 	u64 val64 = 0, tmp64 = 0;
7660 	struct XENA_dev_config __iomem *bar0 = NULL;
7661 	u16 subid;
7662 	struct config_param *config;
7663 	struct mac_info *mac_control;
7664 	int mode;
7665 	u8 dev_intr_type = intr_type;
7666 	u8 dev_multiq = 0;
7667 
7668 	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7669 	if (ret)
7670 		return ret;
7671 
7672 	ret = pci_enable_device(pdev);
7673 	if (ret) {
7674 		DBG_PRINT(ERR_DBG,
7675 			  "%s: pci_enable_device failed\n", __func__);
7676 		return ret;
7677 	}
7678 
7679 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7680 		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7681 		dma_flag = true;
7682 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7683 			DBG_PRINT(ERR_DBG,
7684 				  "Unable to obtain 64bit DMA "
7685 				  "for consistent allocations\n");
7686 			pci_disable_device(pdev);
7687 			return -ENOMEM;
7688 		}
7689 	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7690 		DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7691 	} else {
7692 		pci_disable_device(pdev);
7693 		return -ENOMEM;
7694 	}
7695 	ret = pci_request_regions(pdev, s2io_driver_name);
7696 	if (ret) {
7697 		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7698 			  __func__, ret);
7699 		pci_disable_device(pdev);
7700 		return -ENODEV;
7701 	}
7702 	if (dev_multiq)
7703 		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7704 	else
7705 		dev = alloc_etherdev(sizeof(struct s2io_nic));
7706 	if (dev == NULL) {
7707 		pci_disable_device(pdev);
7708 		pci_release_regions(pdev);
7709 		return -ENODEV;
7710 	}
7711 
7712 	pci_set_master(pdev);
7713 	pci_set_drvdata(pdev, dev);
7714 	SET_NETDEV_DEV(dev, &pdev->dev);
7715 
7716 	/*  Private member variable initialized to s2io NIC structure */
7717 	sp = netdev_priv(dev);
7718 	sp->dev = dev;
7719 	sp->pdev = pdev;
7720 	sp->high_dma_flag = dma_flag;
7721 	sp->device_enabled_once = false;
7722 	if (rx_ring_mode == 1)
7723 		sp->rxd_mode = RXD_MODE_1;
7724 	if (rx_ring_mode == 2)
7725 		sp->rxd_mode = RXD_MODE_3B;
7726 
7727 	sp->config.intr_type = dev_intr_type;
7728 
7729 	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7730 	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7731 		sp->device_type = XFRAME_II_DEVICE;
7732 	else
7733 		sp->device_type = XFRAME_I_DEVICE;
7734 
7735 
7736 	/* Initialize some PCI/PCI-X fields of the NIC. */
7737 	s2io_init_pci(sp);
7738 
7739 	/*
7740 	 * Setting the device configuration parameters.
7741 	 * Most of these parameters can be specified by the user during
7742 	 * module insertion as they are module loadable parameters. If
7743 	 * these parameters are not not specified during load time, they
7744 	 * are initialized with default values.
7745 	 */
7746 	config = &sp->config;
7747 	mac_control = &sp->mac_control;
7748 
7749 	config->napi = napi;
7750 	config->tx_steering_type = tx_steering_type;
7751 
7752 	/* Tx side parameters. */
7753 	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7754 		config->tx_fifo_num = MAX_TX_FIFOS;
7755 	else
7756 		config->tx_fifo_num = tx_fifo_num;
7757 
7758 	/* Initialize the fifos used for tx steering */
7759 	if (config->tx_fifo_num < 5) {
7760 		if (config->tx_fifo_num  == 1)
7761 			sp->total_tcp_fifos = 1;
7762 		else
7763 			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7764 		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7765 		sp->total_udp_fifos = 1;
7766 		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7767 	} else {
7768 		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7769 				       FIFO_OTHER_MAX_NUM);
7770 		sp->udp_fifo_idx = sp->total_tcp_fifos;
7771 		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7772 		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7773 	}
7774 
7775 	config->multiq = dev_multiq;
7776 	for (i = 0; i < config->tx_fifo_num; i++) {
7777 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7778 
7779 		tx_cfg->fifo_len = tx_fifo_len[i];
7780 		tx_cfg->fifo_priority = i;
7781 	}
7782 
7783 	/* mapping the QoS priority to the configured fifos */
7784 	for (i = 0; i < MAX_TX_FIFOS; i++)
7785 		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7786 
7787 	/* map the hashing selector table to the configured fifos */
7788 	for (i = 0; i < config->tx_fifo_num; i++)
7789 		sp->fifo_selector[i] = fifo_selector[i];
7790 
7791 
7792 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7793 	for (i = 0; i < config->tx_fifo_num; i++) {
7794 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7795 
7796 		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7797 		if (tx_cfg->fifo_len < 65) {
7798 			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7799 			break;
7800 		}
7801 	}
7802 	/* + 2 because one Txd for skb->data and one Txd for UFO */
7803 	config->max_txds = MAX_SKB_FRAGS + 2;
7804 
7805 	/* Rx side parameters. */
7806 	config->rx_ring_num = rx_ring_num;
7807 	for (i = 0; i < config->rx_ring_num; i++) {
7808 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7809 		struct ring_info *ring = &mac_control->rings[i];
7810 
7811 		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7812 		rx_cfg->ring_priority = i;
7813 		ring->rx_bufs_left = 0;
7814 		ring->rxd_mode = sp->rxd_mode;
7815 		ring->rxd_count = rxd_count[sp->rxd_mode];
7816 		ring->pdev = sp->pdev;
7817 		ring->dev = sp->dev;
7818 	}
7819 
7820 	for (i = 0; i < rx_ring_num; i++) {
7821 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7822 
7823 		rx_cfg->ring_org = RING_ORG_BUFF1;
7824 		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7825 	}
7826 
7827 	/*  Setting Mac Control parameters */
7828 	mac_control->rmac_pause_time = rmac_pause_time;
7829 	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7830 	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7831 
7832 
7833 	/*  initialize the shared memory used by the NIC and the host */
7834 	if (init_shared_mem(sp)) {
7835 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7836 		ret = -ENOMEM;
7837 		goto mem_alloc_failed;
7838 	}
7839 
7840 	sp->bar0 = pci_ioremap_bar(pdev, 0);
7841 	if (!sp->bar0) {
7842 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7843 			  dev->name);
7844 		ret = -ENOMEM;
7845 		goto bar0_remap_failed;
7846 	}
7847 
7848 	sp->bar1 = pci_ioremap_bar(pdev, 2);
7849 	if (!sp->bar1) {
7850 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7851 			  dev->name);
7852 		ret = -ENOMEM;
7853 		goto bar1_remap_failed;
7854 	}
7855 
7856 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7857 	for (j = 0; j < MAX_TX_FIFOS; j++) {
7858 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7859 	}
7860 
7861 	/*  Driver entry points */
7862 	dev->netdev_ops = &s2io_netdev_ops;
7863 	dev->ethtool_ops = &netdev_ethtool_ops;
7864 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7865 		NETIF_F_TSO | NETIF_F_TSO6 |
7866 		NETIF_F_RXCSUM | NETIF_F_LRO;
7867 	dev->features |= dev->hw_features |
7868 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7869 	if (sp->high_dma_flag == true)
7870 		dev->features |= NETIF_F_HIGHDMA;
7871 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7872 	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7873 	INIT_WORK(&sp->set_link_task, s2io_set_link);
7874 
7875 	pci_save_state(sp->pdev);
7876 
7877 	/* Setting swapper control on the NIC, for proper reset operation */
7878 	if (s2io_set_swapper(sp)) {
7879 		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7880 			  dev->name);
7881 		ret = -EAGAIN;
7882 		goto set_swap_failed;
7883 	}
7884 
7885 	/* Verify if the Herc works on the slot its placed into */
7886 	if (sp->device_type & XFRAME_II_DEVICE) {
7887 		mode = s2io_verify_pci_mode(sp);
7888 		if (mode < 0) {
7889 			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7890 				  __func__);
7891 			ret = -EBADSLT;
7892 			goto set_swap_failed;
7893 		}
7894 	}
7895 
7896 	if (sp->config.intr_type == MSI_X) {
7897 		sp->num_entries = config->rx_ring_num + 1;
7898 		ret = s2io_enable_msi_x(sp);
7899 
7900 		if (!ret) {
7901 			ret = s2io_test_msi(sp);
7902 			/* rollback MSI-X, will re-enable during add_isr() */
7903 			remove_msix_isr(sp);
7904 		}
7905 		if (ret) {
7906 
7907 			DBG_PRINT(ERR_DBG,
7908 				  "MSI-X requested but failed to enable\n");
7909 			sp->config.intr_type = INTA;
7910 		}
7911 	}
7912 
7913 	if (config->intr_type ==  MSI_X) {
7914 		for (i = 0; i < config->rx_ring_num ; i++) {
7915 			struct ring_info *ring = &mac_control->rings[i];
7916 
7917 			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7918 		}
7919 	} else {
7920 		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7921 	}
7922 
7923 	/* Not needed for Herc */
7924 	if (sp->device_type & XFRAME_I_DEVICE) {
7925 		/*
7926 		 * Fix for all "FFs" MAC address problems observed on
7927 		 * Alpha platforms
7928 		 */
7929 		fix_mac_address(sp);
7930 		s2io_reset(sp);
7931 	}
7932 
7933 	/*
7934 	 * MAC address initialization.
7935 	 * For now only one mac address will be read and used.
7936 	 */
7937 	bar0 = sp->bar0;
7938 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7939 		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7940 	writeq(val64, &bar0->rmac_addr_cmd_mem);
7941 	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7942 			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7943 			      S2IO_BIT_RESET);
7944 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
7945 	mac_down = (u32)tmp64;
7946 	mac_up = (u32) (tmp64 >> 32);
7947 
7948 	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7949 	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7950 	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7951 	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7952 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7953 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7954 
7955 	/*  Set the factory defined MAC address initially   */
7956 	dev->addr_len = ETH_ALEN;
7957 	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7958 
7959 	/* initialize number of multicast & unicast MAC entries variables */
7960 	if (sp->device_type == XFRAME_I_DEVICE) {
7961 		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7962 		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7963 		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7964 	} else if (sp->device_type == XFRAME_II_DEVICE) {
7965 		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7966 		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7967 		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7968 	}
7969 
7970 	/* MTU range: 46 - 9600 */
7971 	dev->min_mtu = MIN_MTU;
7972 	dev->max_mtu = S2IO_JUMBO_SIZE;
7973 
7974 	/* store mac addresses from CAM to s2io_nic structure */
7975 	do_s2io_store_unicast_mc(sp);
7976 
7977 	/* Configure MSIX vector for number of rings configured plus one */
7978 	if ((sp->device_type == XFRAME_II_DEVICE) &&
7979 	    (config->intr_type == MSI_X))
7980 		sp->num_entries = config->rx_ring_num + 1;
7981 
7982 	/* Store the values of the MSIX table in the s2io_nic structure */
7983 	store_xmsi_data(sp);
7984 	/* reset Nic and bring it to known state */
7985 	s2io_reset(sp);
7986 
7987 	/*
7988 	 * Initialize link state flags
7989 	 * and the card state parameter
7990 	 */
7991 	sp->state = 0;
7992 
7993 	/* Initialize spinlocks */
7994 	for (i = 0; i < sp->config.tx_fifo_num; i++) {
7995 		struct fifo_info *fifo = &mac_control->fifos[i];
7996 
7997 		spin_lock_init(&fifo->tx_lock);
7998 	}
7999 
8000 	/*
8001 	 * SXE-002: Configure link and activity LED to init state
8002 	 * on driver load.
8003 	 */
8004 	subid = sp->pdev->subsystem_device;
8005 	if ((subid & 0xFF) >= 0x07) {
8006 		val64 = readq(&bar0->gpio_control);
8007 		val64 |= 0x0000800000000000ULL;
8008 		writeq(val64, &bar0->gpio_control);
8009 		val64 = 0x0411040400000000ULL;
8010 		writeq(val64, (void __iomem *)bar0 + 0x2700);
8011 		val64 = readq(&bar0->gpio_control);
8012 	}
8013 
8014 	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8015 
8016 	if (register_netdev(dev)) {
8017 		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8018 		ret = -ENODEV;
8019 		goto register_failed;
8020 	}
8021 	s2io_vpd_read(sp);
8022 	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8023 	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8024 		  sp->product_name, pdev->revision);
8025 	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8026 		  s2io_driver_version);
8027 	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8028 	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8029 	if (sp->device_type & XFRAME_II_DEVICE) {
8030 		mode = s2io_print_pci_mode(sp);
8031 		if (mode < 0) {
8032 			ret = -EBADSLT;
8033 			unregister_netdev(dev);
8034 			goto set_swap_failed;
8035 		}
8036 	}
8037 	switch (sp->rxd_mode) {
8038 	case RXD_MODE_1:
8039 		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8040 			  dev->name);
8041 		break;
8042 	case RXD_MODE_3B:
8043 		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8044 			  dev->name);
8045 		break;
8046 	}
8047 
8048 	switch (sp->config.napi) {
8049 	case 0:
8050 		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8051 		break;
8052 	case 1:
8053 		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8054 		break;
8055 	}
8056 
8057 	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8058 		  sp->config.tx_fifo_num);
8059 
8060 	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8061 		  sp->config.rx_ring_num);
8062 
8063 	switch (sp->config.intr_type) {
8064 	case INTA:
8065 		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8066 		break;
8067 	case MSI_X:
8068 		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8069 		break;
8070 	}
8071 	if (sp->config.multiq) {
8072 		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8073 			struct fifo_info *fifo = &mac_control->fifos[i];
8074 
8075 			fifo->multiq = config->multiq;
8076 		}
8077 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8078 			  dev->name);
8079 	} else
8080 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8081 			  dev->name);
8082 
8083 	switch (sp->config.tx_steering_type) {
8084 	case NO_STEERING:
8085 		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8086 			  dev->name);
8087 		break;
8088 	case TX_PRIORITY_STEERING:
8089 		DBG_PRINT(ERR_DBG,
8090 			  "%s: Priority steering enabled for transmit\n",
8091 			  dev->name);
8092 		break;
8093 	case TX_DEFAULT_STEERING:
8094 		DBG_PRINT(ERR_DBG,
8095 			  "%s: Default steering enabled for transmit\n",
8096 			  dev->name);
8097 	}
8098 
8099 	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8100 		  dev->name);
8101 	/* Initialize device name */
8102 	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8103 		 sp->product_name);
8104 
8105 	if (vlan_tag_strip)
8106 		sp->vlan_strip_flag = 1;
8107 	else
8108 		sp->vlan_strip_flag = 0;
8109 
8110 	/*
8111 	 * Make Link state as off at this point, when the Link change
8112 	 * interrupt comes the state will be automatically changed to
8113 	 * the right state.
8114 	 */
8115 	netif_carrier_off(dev);
8116 
8117 	return 0;
8118 
8119 register_failed:
8120 set_swap_failed:
8121 	iounmap(sp->bar1);
8122 bar1_remap_failed:
8123 	iounmap(sp->bar0);
8124 bar0_remap_failed:
8125 mem_alloc_failed:
8126 	free_shared_mem(sp);
8127 	pci_disable_device(pdev);
8128 	pci_release_regions(pdev);
8129 	free_netdev(dev);
8130 
8131 	return ret;
8132 }
8133 
8134 /**
8135  * s2io_rem_nic - Free the PCI device
8136  * @pdev: structure containing the PCI related information of the device.
8137  * Description: This function is called by the Pci subsystem to release a
8138  * PCI device and free up all resource held up by the device. This could
8139  * be in response to a Hot plug event or when the driver is to be removed
8140  * from memory.
8141  */
8142 
8143 static void s2io_rem_nic(struct pci_dev *pdev)
8144 {
8145 	struct net_device *dev = pci_get_drvdata(pdev);
8146 	struct s2io_nic *sp;
8147 
8148 	if (dev == NULL) {
8149 		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8150 		return;
8151 	}
8152 
8153 	sp = netdev_priv(dev);
8154 
8155 	cancel_work_sync(&sp->rst_timer_task);
8156 	cancel_work_sync(&sp->set_link_task);
8157 
8158 	unregister_netdev(dev);
8159 
8160 	free_shared_mem(sp);
8161 	iounmap(sp->bar0);
8162 	iounmap(sp->bar1);
8163 	pci_release_regions(pdev);
8164 	free_netdev(dev);
8165 	pci_disable_device(pdev);
8166 }
8167 
8168 module_pci_driver(s2io_driver);
8169 
8170 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8171 				struct tcphdr **tcp, struct RxD_t *rxdp,
8172 				struct s2io_nic *sp)
8173 {
8174 	int ip_off;
8175 	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8176 
8177 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8178 		DBG_PRINT(INIT_DBG,
8179 			  "%s: Non-TCP frames not supported for LRO\n",
8180 			  __func__);
8181 		return -1;
8182 	}
8183 
8184 	/* Checking for DIX type or DIX type with VLAN */
8185 	if ((l2_type == 0) || (l2_type == 4)) {
8186 		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8187 		/*
8188 		 * If vlan stripping is disabled and the frame is VLAN tagged,
8189 		 * shift the offset by the VLAN header size bytes.
8190 		 */
8191 		if ((!sp->vlan_strip_flag) &&
8192 		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8193 			ip_off += HEADER_VLAN_SIZE;
8194 	} else {
8195 		/* LLC, SNAP etc are considered non-mergeable */
8196 		return -1;
8197 	}
8198 
8199 	*ip = (struct iphdr *)(buffer + ip_off);
8200 	ip_len = (u8)((*ip)->ihl);
8201 	ip_len <<= 2;
8202 	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8203 
8204 	return 0;
8205 }
8206 
8207 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8208 				  struct tcphdr *tcp)
8209 {
8210 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8211 	if ((lro->iph->saddr != ip->saddr) ||
8212 	    (lro->iph->daddr != ip->daddr) ||
8213 	    (lro->tcph->source != tcp->source) ||
8214 	    (lro->tcph->dest != tcp->dest))
8215 		return -1;
8216 	return 0;
8217 }
8218 
8219 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8220 {
8221 	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8222 }
8223 
8224 static void initiate_new_session(struct lro *lro, u8 *l2h,
8225 				 struct iphdr *ip, struct tcphdr *tcp,
8226 				 u32 tcp_pyld_len, u16 vlan_tag)
8227 {
8228 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8229 	lro->l2h = l2h;
8230 	lro->iph = ip;
8231 	lro->tcph = tcp;
8232 	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8233 	lro->tcp_ack = tcp->ack_seq;
8234 	lro->sg_num = 1;
8235 	lro->total_len = ntohs(ip->tot_len);
8236 	lro->frags_len = 0;
8237 	lro->vlan_tag = vlan_tag;
8238 	/*
8239 	 * Check if we saw TCP timestamp.
8240 	 * Other consistency checks have already been done.
8241 	 */
8242 	if (tcp->doff == 8) {
8243 		__be32 *ptr;
8244 		ptr = (__be32 *)(tcp+1);
8245 		lro->saw_ts = 1;
8246 		lro->cur_tsval = ntohl(*(ptr+1));
8247 		lro->cur_tsecr = *(ptr+2);
8248 	}
8249 	lro->in_use = 1;
8250 }
8251 
8252 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8253 {
8254 	struct iphdr *ip = lro->iph;
8255 	struct tcphdr *tcp = lro->tcph;
8256 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8257 
8258 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8259 
8260 	/* Update L3 header */
8261 	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8262 	ip->tot_len = htons(lro->total_len);
8263 
8264 	/* Update L4 header */
8265 	tcp->ack_seq = lro->tcp_ack;
8266 	tcp->window = lro->window;
8267 
8268 	/* Update tsecr field if this session has timestamps enabled */
8269 	if (lro->saw_ts) {
8270 		__be32 *ptr = (__be32 *)(tcp + 1);
8271 		*(ptr+2) = lro->cur_tsecr;
8272 	}
8273 
8274 	/* Update counters required for calculation of
8275 	 * average no. of packets aggregated.
8276 	 */
8277 	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8278 	swstats->num_aggregations++;
8279 }
8280 
8281 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8282 			     struct tcphdr *tcp, u32 l4_pyld)
8283 {
8284 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8285 	lro->total_len += l4_pyld;
8286 	lro->frags_len += l4_pyld;
8287 	lro->tcp_next_seq += l4_pyld;
8288 	lro->sg_num++;
8289 
8290 	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8291 	lro->tcp_ack = tcp->ack_seq;
8292 	lro->window = tcp->window;
8293 
8294 	if (lro->saw_ts) {
8295 		__be32 *ptr;
8296 		/* Update tsecr and tsval from this packet */
8297 		ptr = (__be32 *)(tcp+1);
8298 		lro->cur_tsval = ntohl(*(ptr+1));
8299 		lro->cur_tsecr = *(ptr + 2);
8300 	}
8301 }
8302 
8303 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8304 				    struct tcphdr *tcp, u32 tcp_pyld_len)
8305 {
8306 	u8 *ptr;
8307 
8308 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8309 
8310 	if (!tcp_pyld_len) {
8311 		/* Runt frame or a pure ack */
8312 		return -1;
8313 	}
8314 
8315 	if (ip->ihl != 5) /* IP has options */
8316 		return -1;
8317 
8318 	/* If we see CE codepoint in IP header, packet is not mergeable */
8319 	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8320 		return -1;
8321 
8322 	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8323 	if (tcp->urg || tcp->psh || tcp->rst ||
8324 	    tcp->syn || tcp->fin ||
8325 	    tcp->ece || tcp->cwr || !tcp->ack) {
8326 		/*
8327 		 * Currently recognize only the ack control word and
8328 		 * any other control field being set would result in
8329 		 * flushing the LRO session
8330 		 */
8331 		return -1;
8332 	}
8333 
8334 	/*
8335 	 * Allow only one TCP timestamp option. Don't aggregate if
8336 	 * any other options are detected.
8337 	 */
8338 	if (tcp->doff != 5 && tcp->doff != 8)
8339 		return -1;
8340 
8341 	if (tcp->doff == 8) {
8342 		ptr = (u8 *)(tcp + 1);
8343 		while (*ptr == TCPOPT_NOP)
8344 			ptr++;
8345 		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8346 			return -1;
8347 
8348 		/* Ensure timestamp value increases monotonically */
8349 		if (l_lro)
8350 			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8351 				return -1;
8352 
8353 		/* timestamp echo reply should be non-zero */
8354 		if (*((__be32 *)(ptr+6)) == 0)
8355 			return -1;
8356 	}
8357 
8358 	return 0;
8359 }
8360 
8361 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8362 				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8363 				 struct RxD_t *rxdp, struct s2io_nic *sp)
8364 {
8365 	struct iphdr *ip;
8366 	struct tcphdr *tcph;
8367 	int ret = 0, i;
8368 	u16 vlan_tag = 0;
8369 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8370 
8371 	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8372 				   rxdp, sp);
8373 	if (ret)
8374 		return ret;
8375 
8376 	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8377 
8378 	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8379 	tcph = (struct tcphdr *)*tcp;
8380 	*tcp_len = get_l4_pyld_length(ip, tcph);
8381 	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8382 		struct lro *l_lro = &ring_data->lro0_n[i];
8383 		if (l_lro->in_use) {
8384 			if (check_for_socket_match(l_lro, ip, tcph))
8385 				continue;
8386 			/* Sock pair matched */
8387 			*lro = l_lro;
8388 
8389 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8390 				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8391 					  "expected 0x%x, actual 0x%x\n",
8392 					  __func__,
8393 					  (*lro)->tcp_next_seq,
8394 					  ntohl(tcph->seq));
8395 
8396 				swstats->outof_sequence_pkts++;
8397 				ret = 2;
8398 				break;
8399 			}
8400 
8401 			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8402 						      *tcp_len))
8403 				ret = 1; /* Aggregate */
8404 			else
8405 				ret = 2; /* Flush both */
8406 			break;
8407 		}
8408 	}
8409 
8410 	if (ret == 0) {
8411 		/* Before searching for available LRO objects,
8412 		 * check if the pkt is L3/L4 aggregatable. If not
8413 		 * don't create new LRO session. Just send this
8414 		 * packet up.
8415 		 */
8416 		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8417 			return 5;
8418 
8419 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8420 			struct lro *l_lro = &ring_data->lro0_n[i];
8421 			if (!(l_lro->in_use)) {
8422 				*lro = l_lro;
8423 				ret = 3; /* Begin anew */
8424 				break;
8425 			}
8426 		}
8427 	}
8428 
8429 	if (ret == 0) { /* sessions exceeded */
8430 		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8431 			  __func__);
8432 		*lro = NULL;
8433 		return ret;
8434 	}
8435 
8436 	switch (ret) {
8437 	case 3:
8438 		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8439 				     vlan_tag);
8440 		break;
8441 	case 2:
8442 		update_L3L4_header(sp, *lro);
8443 		break;
8444 	case 1:
8445 		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8446 		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8447 			update_L3L4_header(sp, *lro);
8448 			ret = 4; /* Flush the LRO */
8449 		}
8450 		break;
8451 	default:
8452 		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8453 		break;
8454 	}
8455 
8456 	return ret;
8457 }
8458 
8459 static void clear_lro_session(struct lro *lro)
8460 {
8461 	static u16 lro_struct_size = sizeof(struct lro);
8462 
8463 	memset(lro, 0, lro_struct_size);
8464 }
8465 
8466 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8467 {
8468 	struct net_device *dev = skb->dev;
8469 	struct s2io_nic *sp = netdev_priv(dev);
8470 
8471 	skb->protocol = eth_type_trans(skb, dev);
8472 	if (vlan_tag && sp->vlan_strip_flag)
8473 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8474 	if (sp->config.napi)
8475 		netif_receive_skb(skb);
8476 	else
8477 		netif_rx(skb);
8478 }
8479 
8480 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8481 			   struct sk_buff *skb, u32 tcp_len)
8482 {
8483 	struct sk_buff *first = lro->parent;
8484 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8485 
8486 	first->len += tcp_len;
8487 	first->data_len = lro->frags_len;
8488 	skb_pull(skb, (skb->len - tcp_len));
8489 	if (skb_shinfo(first)->frag_list)
8490 		lro->last_frag->next = skb;
8491 	else
8492 		skb_shinfo(first)->frag_list = skb;
8493 	first->truesize += skb->truesize;
8494 	lro->last_frag = skb;
8495 	swstats->clubbed_frms_cnt++;
8496 }
8497 
8498 /**
8499  * s2io_io_error_detected - called when PCI error is detected
8500  * @pdev: Pointer to PCI device
8501  * @state: The current pci connection state
8502  *
8503  * This function is called after a PCI bus error affecting
8504  * this device has been detected.
8505  */
8506 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8507 					       pci_channel_state_t state)
8508 {
8509 	struct net_device *netdev = pci_get_drvdata(pdev);
8510 	struct s2io_nic *sp = netdev_priv(netdev);
8511 
8512 	netif_device_detach(netdev);
8513 
8514 	if (state == pci_channel_io_perm_failure)
8515 		return PCI_ERS_RESULT_DISCONNECT;
8516 
8517 	if (netif_running(netdev)) {
8518 		/* Bring down the card, while avoiding PCI I/O */
8519 		do_s2io_card_down(sp, 0);
8520 	}
8521 	pci_disable_device(pdev);
8522 
8523 	return PCI_ERS_RESULT_NEED_RESET;
8524 }
8525 
8526 /**
8527  * s2io_io_slot_reset - called after the pci bus has been reset.
8528  * @pdev: Pointer to PCI device
8529  *
8530  * Restart the card from scratch, as if from a cold-boot.
8531  * At this point, the card has exprienced a hard reset,
8532  * followed by fixups by BIOS, and has its config space
8533  * set up identically to what it was at cold boot.
8534  */
8535 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8536 {
8537 	struct net_device *netdev = pci_get_drvdata(pdev);
8538 	struct s2io_nic *sp = netdev_priv(netdev);
8539 
8540 	if (pci_enable_device(pdev)) {
8541 		pr_err("Cannot re-enable PCI device after reset.\n");
8542 		return PCI_ERS_RESULT_DISCONNECT;
8543 	}
8544 
8545 	pci_set_master(pdev);
8546 	s2io_reset(sp);
8547 
8548 	return PCI_ERS_RESULT_RECOVERED;
8549 }
8550 
8551 /**
8552  * s2io_io_resume - called when traffic can start flowing again.
8553  * @pdev: Pointer to PCI device
8554  *
8555  * This callback is called when the error recovery driver tells
8556  * us that its OK to resume normal operation.
8557  */
8558 static void s2io_io_resume(struct pci_dev *pdev)
8559 {
8560 	struct net_device *netdev = pci_get_drvdata(pdev);
8561 	struct s2io_nic *sp = netdev_priv(netdev);
8562 
8563 	if (netif_running(netdev)) {
8564 		if (s2io_card_up(sp)) {
8565 			pr_err("Can't bring device back up after reset.\n");
8566 			return;
8567 		}
8568 
8569 		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8570 			s2io_card_down(sp);
8571 			pr_err("Can't restore mac addr after reset.\n");
8572 			return;
8573 		}
8574 	}
8575 
8576 	netif_device_attach(netdev);
8577 	netif_tx_wake_all_queues(netdev);
8578 }
8579