1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2010 Exar Corp.
4  *
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik		: For pointing out the improper error condition
15  *			  check in the s2io_xmit routine and also some
16  *			  issues in the Tx watch dog function. Also for
17  *			  patiently answering all those innumerable
18  *			  questions regaring the 2.6 porting issues.
19  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
20  *			  macros available only in 2.6 Kernel.
21  * Francois Romieu	: For pointing out all code part that were
22  *			  deprecated and also styling related comments.
23  * Grant Grundler	: For helping me get rid of some Architecture
24  *			  dependent code.
25  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explanation of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *		values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_max_pkts: This parameter defines maximum number of packets can be
42  *     aggregated as a single large packet
43  * napi: This parameter used to enable/disable NAPI (polling Rx)
44  *     Possible values '1' for enable and '0' for disable. Default is '1'
45  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
46  *                 Possible values '1' for enable , '0' for disable.
47  *                 Default is '2' - which means disable in promisc mode
48  *                 and enable in non-promiscuous mode.
49  * multiq: This parameter used to enable/disable MULTIQUEUE support.
50  *      Possible values '1' for enable and '0' for disable. Default is '0'
51  ************************************************************************/
52 
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/mdio.h>
65 #include <linux/skbuff.h>
66 #include <linux/init.h>
67 #include <linux/delay.h>
68 #include <linux/stddef.h>
69 #include <linux/ioctl.h>
70 #include <linux/timex.h>
71 #include <linux/ethtool.h>
72 #include <linux/workqueue.h>
73 #include <linux/if_vlan.h>
74 #include <linux/ip.h>
75 #include <linux/tcp.h>
76 #include <linux/uaccess.h>
77 #include <linux/io.h>
78 #include <linux/slab.h>
79 #include <linux/prefetch.h>
80 #include <net/tcp.h>
81 #include <net/checksum.h>
82 
83 #include <asm/div64.h>
84 #include <asm/irq.h>
85 
86 /* local include */
87 #include "s2io.h"
88 #include "s2io-regs.h"
89 
90 #define DRV_VERSION "2.0.26.28"
91 
92 /* S2io Driver name & version. */
93 static const char s2io_driver_name[] = "Neterion";
94 static const char s2io_driver_version[] = DRV_VERSION;
95 
96 static const int rxd_size[2] = {32, 48};
97 static const int rxd_count[2] = {127, 85};
98 
99 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100 {
101 	int ret;
102 
103 	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
104 	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
105 
106 	return ret;
107 }
108 
109 /*
110  * Cards with following subsystem_id have a link state indication
111  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
112  * macro below identifies these cards given the subsystem_id.
113  */
114 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
115 	(dev_type == XFRAME_I_DEVICE) ?					\
116 	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
117 	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
118 
119 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
120 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
121 
122 static inline int is_s2io_card_up(const struct s2io_nic *sp)
123 {
124 	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
125 }
126 
127 /* Ethtool related variables and Macros. */
128 static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
129 	"Register test\t(offline)",
130 	"Eeprom test\t(offline)",
131 	"Link test\t(online)",
132 	"RLDRAM test\t(offline)",
133 	"BIST Test\t(offline)"
134 };
135 
136 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
137 	{"tmac_frms"},
138 	{"tmac_data_octets"},
139 	{"tmac_drop_frms"},
140 	{"tmac_mcst_frms"},
141 	{"tmac_bcst_frms"},
142 	{"tmac_pause_ctrl_frms"},
143 	{"tmac_ttl_octets"},
144 	{"tmac_ucst_frms"},
145 	{"tmac_nucst_frms"},
146 	{"tmac_any_err_frms"},
147 	{"tmac_ttl_less_fb_octets"},
148 	{"tmac_vld_ip_octets"},
149 	{"tmac_vld_ip"},
150 	{"tmac_drop_ip"},
151 	{"tmac_icmp"},
152 	{"tmac_rst_tcp"},
153 	{"tmac_tcp"},
154 	{"tmac_udp"},
155 	{"rmac_vld_frms"},
156 	{"rmac_data_octets"},
157 	{"rmac_fcs_err_frms"},
158 	{"rmac_drop_frms"},
159 	{"rmac_vld_mcst_frms"},
160 	{"rmac_vld_bcst_frms"},
161 	{"rmac_in_rng_len_err_frms"},
162 	{"rmac_out_rng_len_err_frms"},
163 	{"rmac_long_frms"},
164 	{"rmac_pause_ctrl_frms"},
165 	{"rmac_unsup_ctrl_frms"},
166 	{"rmac_ttl_octets"},
167 	{"rmac_accepted_ucst_frms"},
168 	{"rmac_accepted_nucst_frms"},
169 	{"rmac_discarded_frms"},
170 	{"rmac_drop_events"},
171 	{"rmac_ttl_less_fb_octets"},
172 	{"rmac_ttl_frms"},
173 	{"rmac_usized_frms"},
174 	{"rmac_osized_frms"},
175 	{"rmac_frag_frms"},
176 	{"rmac_jabber_frms"},
177 	{"rmac_ttl_64_frms"},
178 	{"rmac_ttl_65_127_frms"},
179 	{"rmac_ttl_128_255_frms"},
180 	{"rmac_ttl_256_511_frms"},
181 	{"rmac_ttl_512_1023_frms"},
182 	{"rmac_ttl_1024_1518_frms"},
183 	{"rmac_ip"},
184 	{"rmac_ip_octets"},
185 	{"rmac_hdr_err_ip"},
186 	{"rmac_drop_ip"},
187 	{"rmac_icmp"},
188 	{"rmac_tcp"},
189 	{"rmac_udp"},
190 	{"rmac_err_drp_udp"},
191 	{"rmac_xgmii_err_sym"},
192 	{"rmac_frms_q0"},
193 	{"rmac_frms_q1"},
194 	{"rmac_frms_q2"},
195 	{"rmac_frms_q3"},
196 	{"rmac_frms_q4"},
197 	{"rmac_frms_q5"},
198 	{"rmac_frms_q6"},
199 	{"rmac_frms_q7"},
200 	{"rmac_full_q0"},
201 	{"rmac_full_q1"},
202 	{"rmac_full_q2"},
203 	{"rmac_full_q3"},
204 	{"rmac_full_q4"},
205 	{"rmac_full_q5"},
206 	{"rmac_full_q6"},
207 	{"rmac_full_q7"},
208 	{"rmac_pause_cnt"},
209 	{"rmac_xgmii_data_err_cnt"},
210 	{"rmac_xgmii_ctrl_err_cnt"},
211 	{"rmac_accepted_ip"},
212 	{"rmac_err_tcp"},
213 	{"rd_req_cnt"},
214 	{"new_rd_req_cnt"},
215 	{"new_rd_req_rtry_cnt"},
216 	{"rd_rtry_cnt"},
217 	{"wr_rtry_rd_ack_cnt"},
218 	{"wr_req_cnt"},
219 	{"new_wr_req_cnt"},
220 	{"new_wr_req_rtry_cnt"},
221 	{"wr_rtry_cnt"},
222 	{"wr_disc_cnt"},
223 	{"rd_rtry_wr_ack_cnt"},
224 	{"txp_wr_cnt"},
225 	{"txd_rd_cnt"},
226 	{"txd_wr_cnt"},
227 	{"rxd_rd_cnt"},
228 	{"rxd_wr_cnt"},
229 	{"txf_rd_cnt"},
230 	{"rxf_wr_cnt"}
231 };
232 
233 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
234 	{"rmac_ttl_1519_4095_frms"},
235 	{"rmac_ttl_4096_8191_frms"},
236 	{"rmac_ttl_8192_max_frms"},
237 	{"rmac_ttl_gt_max_frms"},
238 	{"rmac_osized_alt_frms"},
239 	{"rmac_jabber_alt_frms"},
240 	{"rmac_gt_max_alt_frms"},
241 	{"rmac_vlan_frms"},
242 	{"rmac_len_discard"},
243 	{"rmac_fcs_discard"},
244 	{"rmac_pf_discard"},
245 	{"rmac_da_discard"},
246 	{"rmac_red_discard"},
247 	{"rmac_rts_discard"},
248 	{"rmac_ingm_full_discard"},
249 	{"link_fault_cnt"}
250 };
251 
252 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
253 	{"\n DRIVER STATISTICS"},
254 	{"single_bit_ecc_errs"},
255 	{"double_bit_ecc_errs"},
256 	{"parity_err_cnt"},
257 	{"serious_err_cnt"},
258 	{"soft_reset_cnt"},
259 	{"fifo_full_cnt"},
260 	{"ring_0_full_cnt"},
261 	{"ring_1_full_cnt"},
262 	{"ring_2_full_cnt"},
263 	{"ring_3_full_cnt"},
264 	{"ring_4_full_cnt"},
265 	{"ring_5_full_cnt"},
266 	{"ring_6_full_cnt"},
267 	{"ring_7_full_cnt"},
268 	{"alarm_transceiver_temp_high"},
269 	{"alarm_transceiver_temp_low"},
270 	{"alarm_laser_bias_current_high"},
271 	{"alarm_laser_bias_current_low"},
272 	{"alarm_laser_output_power_high"},
273 	{"alarm_laser_output_power_low"},
274 	{"warn_transceiver_temp_high"},
275 	{"warn_transceiver_temp_low"},
276 	{"warn_laser_bias_current_high"},
277 	{"warn_laser_bias_current_low"},
278 	{"warn_laser_output_power_high"},
279 	{"warn_laser_output_power_low"},
280 	{"lro_aggregated_pkts"},
281 	{"lro_flush_both_count"},
282 	{"lro_out_of_sequence_pkts"},
283 	{"lro_flush_due_to_max_pkts"},
284 	{"lro_avg_aggr_pkts"},
285 	{"mem_alloc_fail_cnt"},
286 	{"pci_map_fail_cnt"},
287 	{"watchdog_timer_cnt"},
288 	{"mem_allocated"},
289 	{"mem_freed"},
290 	{"link_up_cnt"},
291 	{"link_down_cnt"},
292 	{"link_up_time"},
293 	{"link_down_time"},
294 	{"tx_tcode_buf_abort_cnt"},
295 	{"tx_tcode_desc_abort_cnt"},
296 	{"tx_tcode_parity_err_cnt"},
297 	{"tx_tcode_link_loss_cnt"},
298 	{"tx_tcode_list_proc_err_cnt"},
299 	{"rx_tcode_parity_err_cnt"},
300 	{"rx_tcode_abort_cnt"},
301 	{"rx_tcode_parity_abort_cnt"},
302 	{"rx_tcode_rda_fail_cnt"},
303 	{"rx_tcode_unkn_prot_cnt"},
304 	{"rx_tcode_fcs_err_cnt"},
305 	{"rx_tcode_buf_size_err_cnt"},
306 	{"rx_tcode_rxd_corrupt_cnt"},
307 	{"rx_tcode_unkn_err_cnt"},
308 	{"tda_err_cnt"},
309 	{"pfc_err_cnt"},
310 	{"pcc_err_cnt"},
311 	{"tti_err_cnt"},
312 	{"tpa_err_cnt"},
313 	{"sm_err_cnt"},
314 	{"lso_err_cnt"},
315 	{"mac_tmac_err_cnt"},
316 	{"mac_rmac_err_cnt"},
317 	{"xgxs_txgxs_err_cnt"},
318 	{"xgxs_rxgxs_err_cnt"},
319 	{"rc_err_cnt"},
320 	{"prc_pcix_err_cnt"},
321 	{"rpa_err_cnt"},
322 	{"rda_err_cnt"},
323 	{"rti_err_cnt"},
324 	{"mc_err_cnt"}
325 };
326 
327 #define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
328 #define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
329 #define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
330 
331 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
332 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
333 
334 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
335 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
336 
337 #define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
338 #define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
339 
340 #define S2IO_TIMER_CONF(timer, handle, arg, exp)	\
341 	init_timer(&timer);				\
342 	timer.function = handle;			\
343 	timer.data = (unsigned long)arg;		\
344 	mod_timer(&timer, (jiffies + exp))		\
345 
346 /* copy mac addr to def_mac_addr array */
347 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
348 {
349 	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
350 	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
351 	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
352 	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
353 	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
354 	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
355 }
356 
357 /*
358  * Constants to be programmed into the Xena's registers, to configure
359  * the XAUI.
360  */
361 
362 #define	END_SIGN	0x0
363 static const u64 herc_act_dtx_cfg[] = {
364 	/* Set address */
365 	0x8000051536750000ULL, 0x80000515367500E0ULL,
366 	/* Write data */
367 	0x8000051536750004ULL, 0x80000515367500E4ULL,
368 	/* Set address */
369 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
370 	/* Write data */
371 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
372 	/* Set address */
373 	0x801205150D440000ULL, 0x801205150D4400E0ULL,
374 	/* Write data */
375 	0x801205150D440004ULL, 0x801205150D4400E4ULL,
376 	/* Set address */
377 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
378 	/* Write data */
379 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
380 	/* Done */
381 	END_SIGN
382 };
383 
384 static const u64 xena_dtx_cfg[] = {
385 	/* Set address */
386 	0x8000051500000000ULL, 0x80000515000000E0ULL,
387 	/* Write data */
388 	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
389 	/* Set address */
390 	0x8001051500000000ULL, 0x80010515000000E0ULL,
391 	/* Write data */
392 	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
393 	/* Set address */
394 	0x8002051500000000ULL, 0x80020515000000E0ULL,
395 	/* Write data */
396 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
397 	END_SIGN
398 };
399 
400 /*
401  * Constants for Fixing the MacAddress problem seen mostly on
402  * Alpha machines.
403  */
404 static const u64 fix_mac[] = {
405 	0x0060000000000000ULL, 0x0060600000000000ULL,
406 	0x0040600000000000ULL, 0x0000600000000000ULL,
407 	0x0020600000000000ULL, 0x0060600000000000ULL,
408 	0x0020600000000000ULL, 0x0060600000000000ULL,
409 	0x0020600000000000ULL, 0x0060600000000000ULL,
410 	0x0020600000000000ULL, 0x0060600000000000ULL,
411 	0x0020600000000000ULL, 0x0060600000000000ULL,
412 	0x0020600000000000ULL, 0x0060600000000000ULL,
413 	0x0020600000000000ULL, 0x0060600000000000ULL,
414 	0x0020600000000000ULL, 0x0060600000000000ULL,
415 	0x0020600000000000ULL, 0x0060600000000000ULL,
416 	0x0020600000000000ULL, 0x0060600000000000ULL,
417 	0x0020600000000000ULL, 0x0000600000000000ULL,
418 	0x0040600000000000ULL, 0x0060600000000000ULL,
419 	END_SIGN
420 };
421 
422 MODULE_LICENSE("GPL");
423 MODULE_VERSION(DRV_VERSION);
424 
425 
426 /* Module Loadable parameters. */
427 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
428 S2IO_PARM_INT(rx_ring_num, 1);
429 S2IO_PARM_INT(multiq, 0);
430 S2IO_PARM_INT(rx_ring_mode, 1);
431 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
432 S2IO_PARM_INT(rmac_pause_time, 0x100);
433 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
434 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
435 S2IO_PARM_INT(shared_splits, 0);
436 S2IO_PARM_INT(tmac_util_period, 5);
437 S2IO_PARM_INT(rmac_util_period, 5);
438 S2IO_PARM_INT(l3l4hdr_size, 128);
439 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
440 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
441 /* Frequency of Rx desc syncs expressed as power of 2 */
442 S2IO_PARM_INT(rxsync_frequency, 3);
443 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
444 S2IO_PARM_INT(intr_type, 2);
445 /* Large receive offload feature */
446 
447 /* Max pkts to be aggregated by LRO at one time. If not specified,
448  * aggregation happens until we hit max IP pkt size(64K)
449  */
450 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
451 S2IO_PARM_INT(indicate_max_pkts, 0);
452 
453 S2IO_PARM_INT(napi, 1);
454 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
455 
456 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
457 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
458 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
459 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
460 static unsigned int rts_frm_len[MAX_RX_RINGS] =
461 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
462 
463 module_param_array(tx_fifo_len, uint, NULL, 0);
464 module_param_array(rx_ring_sz, uint, NULL, 0);
465 module_param_array(rts_frm_len, uint, NULL, 0);
466 
467 /*
468  * S2IO device table.
469  * This table lists all the devices that this driver supports.
470  */
471 static const struct pci_device_id s2io_tbl[] = {
472 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
473 	 PCI_ANY_ID, PCI_ANY_ID},
474 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
475 	 PCI_ANY_ID, PCI_ANY_ID},
476 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
477 	 PCI_ANY_ID, PCI_ANY_ID},
478 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
479 	 PCI_ANY_ID, PCI_ANY_ID},
480 	{0,}
481 };
482 
483 MODULE_DEVICE_TABLE(pci, s2io_tbl);
484 
485 static const struct pci_error_handlers s2io_err_handler = {
486 	.error_detected = s2io_io_error_detected,
487 	.slot_reset = s2io_io_slot_reset,
488 	.resume = s2io_io_resume,
489 };
490 
491 static struct pci_driver s2io_driver = {
492 	.name = "S2IO",
493 	.id_table = s2io_tbl,
494 	.probe = s2io_init_nic,
495 	.remove = s2io_rem_nic,
496 	.err_handler = &s2io_err_handler,
497 };
498 
499 /* A simplifier macro used both by init and free shared_mem Fns(). */
500 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
501 
502 /* netqueue manipulation helper functions */
503 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
504 {
505 	if (!sp->config.multiq) {
506 		int i;
507 
508 		for (i = 0; i < sp->config.tx_fifo_num; i++)
509 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
510 	}
511 	netif_tx_stop_all_queues(sp->dev);
512 }
513 
514 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
515 {
516 	if (!sp->config.multiq)
517 		sp->mac_control.fifos[fifo_no].queue_state =
518 			FIFO_QUEUE_STOP;
519 
520 	netif_tx_stop_all_queues(sp->dev);
521 }
522 
523 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
524 {
525 	if (!sp->config.multiq) {
526 		int i;
527 
528 		for (i = 0; i < sp->config.tx_fifo_num; i++)
529 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
530 	}
531 	netif_tx_start_all_queues(sp->dev);
532 }
533 
534 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
535 {
536 	if (!sp->config.multiq) {
537 		int i;
538 
539 		for (i = 0; i < sp->config.tx_fifo_num; i++)
540 			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
541 	}
542 	netif_tx_wake_all_queues(sp->dev);
543 }
544 
545 static inline void s2io_wake_tx_queue(
546 	struct fifo_info *fifo, int cnt, u8 multiq)
547 {
548 
549 	if (multiq) {
550 		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
551 			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
552 	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
553 		if (netif_queue_stopped(fifo->dev)) {
554 			fifo->queue_state = FIFO_QUEUE_START;
555 			netif_wake_queue(fifo->dev);
556 		}
557 	}
558 }
559 
560 /**
561  * init_shared_mem - Allocation and Initialization of Memory
562  * @nic: Device private variable.
563  * Description: The function allocates all the memory areas shared
564  * between the NIC and the driver. This includes Tx descriptors,
565  * Rx descriptors and the statistics block.
566  */
567 
568 static int init_shared_mem(struct s2io_nic *nic)
569 {
570 	u32 size;
571 	void *tmp_v_addr, *tmp_v_addr_next;
572 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
573 	struct RxD_block *pre_rxd_blk = NULL;
574 	int i, j, blk_cnt;
575 	int lst_size, lst_per_page;
576 	struct net_device *dev = nic->dev;
577 	unsigned long tmp;
578 	struct buffAdd *ba;
579 	struct config_param *config = &nic->config;
580 	struct mac_info *mac_control = &nic->mac_control;
581 	unsigned long long mem_allocated = 0;
582 
583 	/* Allocation and initialization of TXDLs in FIFOs */
584 	size = 0;
585 	for (i = 0; i < config->tx_fifo_num; i++) {
586 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
587 
588 		size += tx_cfg->fifo_len;
589 	}
590 	if (size > MAX_AVAILABLE_TXDS) {
591 		DBG_PRINT(ERR_DBG,
592 			  "Too many TxDs requested: %d, max supported: %d\n",
593 			  size, MAX_AVAILABLE_TXDS);
594 		return -EINVAL;
595 	}
596 
597 	size = 0;
598 	for (i = 0; i < config->tx_fifo_num; i++) {
599 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
600 
601 		size = tx_cfg->fifo_len;
602 		/*
603 		 * Legal values are from 2 to 8192
604 		 */
605 		if (size < 2) {
606 			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
607 				  "Valid lengths are 2 through 8192\n",
608 				  i, size);
609 			return -EINVAL;
610 		}
611 	}
612 
613 	lst_size = (sizeof(struct TxD) * config->max_txds);
614 	lst_per_page = PAGE_SIZE / lst_size;
615 
616 	for (i = 0; i < config->tx_fifo_num; i++) {
617 		struct fifo_info *fifo = &mac_control->fifos[i];
618 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
619 		int fifo_len = tx_cfg->fifo_len;
620 		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
621 
622 		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
623 		if (!fifo->list_info) {
624 			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
625 			return -ENOMEM;
626 		}
627 		mem_allocated += list_holder_size;
628 	}
629 	for (i = 0; i < config->tx_fifo_num; i++) {
630 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
631 						lst_per_page);
632 		struct fifo_info *fifo = &mac_control->fifos[i];
633 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
634 
635 		fifo->tx_curr_put_info.offset = 0;
636 		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
637 		fifo->tx_curr_get_info.offset = 0;
638 		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
639 		fifo->fifo_no = i;
640 		fifo->nic = nic;
641 		fifo->max_txds = MAX_SKB_FRAGS + 2;
642 		fifo->dev = dev;
643 
644 		for (j = 0; j < page_num; j++) {
645 			int k = 0;
646 			dma_addr_t tmp_p;
647 			void *tmp_v;
648 			tmp_v = pci_alloc_consistent(nic->pdev,
649 						     PAGE_SIZE, &tmp_p);
650 			if (!tmp_v) {
651 				DBG_PRINT(INFO_DBG,
652 					  "pci_alloc_consistent failed for TxDL\n");
653 				return -ENOMEM;
654 			}
655 			/* If we got a zero DMA address(can happen on
656 			 * certain platforms like PPC), reallocate.
657 			 * Store virtual address of page we don't want,
658 			 * to be freed later.
659 			 */
660 			if (!tmp_p) {
661 				mac_control->zerodma_virt_addr = tmp_v;
662 				DBG_PRINT(INIT_DBG,
663 					  "%s: Zero DMA address for TxDL. "
664 					  "Virtual address %p\n",
665 					  dev->name, tmp_v);
666 				tmp_v = pci_alloc_consistent(nic->pdev,
667 							     PAGE_SIZE, &tmp_p);
668 				if (!tmp_v) {
669 					DBG_PRINT(INFO_DBG,
670 						  "pci_alloc_consistent failed for TxDL\n");
671 					return -ENOMEM;
672 				}
673 				mem_allocated += PAGE_SIZE;
674 			}
675 			while (k < lst_per_page) {
676 				int l = (j * lst_per_page) + k;
677 				if (l == tx_cfg->fifo_len)
678 					break;
679 				fifo->list_info[l].list_virt_addr =
680 					tmp_v + (k * lst_size);
681 				fifo->list_info[l].list_phy_addr =
682 					tmp_p + (k * lst_size);
683 				k++;
684 			}
685 		}
686 	}
687 
688 	for (i = 0; i < config->tx_fifo_num; i++) {
689 		struct fifo_info *fifo = &mac_control->fifos[i];
690 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
691 
692 		size = tx_cfg->fifo_len;
693 		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
694 		if (!fifo->ufo_in_band_v)
695 			return -ENOMEM;
696 		mem_allocated += (size * sizeof(u64));
697 	}
698 
699 	/* Allocation and initialization of RXDs in Rings */
700 	size = 0;
701 	for (i = 0; i < config->rx_ring_num; i++) {
702 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
703 		struct ring_info *ring = &mac_control->rings[i];
704 
705 		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
706 			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
707 				  "multiple of RxDs per Block\n",
708 				  dev->name, i);
709 			return FAILURE;
710 		}
711 		size += rx_cfg->num_rxd;
712 		ring->block_count = rx_cfg->num_rxd /
713 			(rxd_count[nic->rxd_mode] + 1);
714 		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
715 	}
716 	if (nic->rxd_mode == RXD_MODE_1)
717 		size = (size * (sizeof(struct RxD1)));
718 	else
719 		size = (size * (sizeof(struct RxD3)));
720 
721 	for (i = 0; i < config->rx_ring_num; i++) {
722 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
723 		struct ring_info *ring = &mac_control->rings[i];
724 
725 		ring->rx_curr_get_info.block_index = 0;
726 		ring->rx_curr_get_info.offset = 0;
727 		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
728 		ring->rx_curr_put_info.block_index = 0;
729 		ring->rx_curr_put_info.offset = 0;
730 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
731 		ring->nic = nic;
732 		ring->ring_no = i;
733 
734 		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
735 		/*  Allocating all the Rx blocks */
736 		for (j = 0; j < blk_cnt; j++) {
737 			struct rx_block_info *rx_blocks;
738 			int l;
739 
740 			rx_blocks = &ring->rx_blocks[j];
741 			size = SIZE_OF_BLOCK;	/* size is always page size */
742 			tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
743 							  &tmp_p_addr);
744 			if (tmp_v_addr == NULL) {
745 				/*
746 				 * In case of failure, free_shared_mem()
747 				 * is called, which should free any
748 				 * memory that was alloced till the
749 				 * failure happened.
750 				 */
751 				rx_blocks->block_virt_addr = tmp_v_addr;
752 				return -ENOMEM;
753 			}
754 			mem_allocated += size;
755 			memset(tmp_v_addr, 0, size);
756 
757 			size = sizeof(struct rxd_info) *
758 				rxd_count[nic->rxd_mode];
759 			rx_blocks->block_virt_addr = tmp_v_addr;
760 			rx_blocks->block_dma_addr = tmp_p_addr;
761 			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
762 			if (!rx_blocks->rxds)
763 				return -ENOMEM;
764 			mem_allocated += size;
765 			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
766 				rx_blocks->rxds[l].virt_addr =
767 					rx_blocks->block_virt_addr +
768 					(rxd_size[nic->rxd_mode] * l);
769 				rx_blocks->rxds[l].dma_addr =
770 					rx_blocks->block_dma_addr +
771 					(rxd_size[nic->rxd_mode] * l);
772 			}
773 		}
774 		/* Interlinking all Rx Blocks */
775 		for (j = 0; j < blk_cnt; j++) {
776 			int next = (j + 1) % blk_cnt;
777 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
778 			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
779 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
780 			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
781 
782 			pre_rxd_blk = tmp_v_addr;
783 			pre_rxd_blk->reserved_2_pNext_RxD_block =
784 				(unsigned long)tmp_v_addr_next;
785 			pre_rxd_blk->pNext_RxD_Blk_physical =
786 				(u64)tmp_p_addr_next;
787 		}
788 	}
789 	if (nic->rxd_mode == RXD_MODE_3B) {
790 		/*
791 		 * Allocation of Storages for buffer addresses in 2BUFF mode
792 		 * and the buffers as well.
793 		 */
794 		for (i = 0; i < config->rx_ring_num; i++) {
795 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
796 			struct ring_info *ring = &mac_control->rings[i];
797 
798 			blk_cnt = rx_cfg->num_rxd /
799 				(rxd_count[nic->rxd_mode] + 1);
800 			size = sizeof(struct buffAdd *) * blk_cnt;
801 			ring->ba = kmalloc(size, GFP_KERNEL);
802 			if (!ring->ba)
803 				return -ENOMEM;
804 			mem_allocated += size;
805 			for (j = 0; j < blk_cnt; j++) {
806 				int k = 0;
807 
808 				size = sizeof(struct buffAdd) *
809 					(rxd_count[nic->rxd_mode] + 1);
810 				ring->ba[j] = kmalloc(size, GFP_KERNEL);
811 				if (!ring->ba[j])
812 					return -ENOMEM;
813 				mem_allocated += size;
814 				while (k != rxd_count[nic->rxd_mode]) {
815 					ba = &ring->ba[j][k];
816 					size = BUF0_LEN + ALIGN_SIZE;
817 					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
818 					if (!ba->ba_0_org)
819 						return -ENOMEM;
820 					mem_allocated += size;
821 					tmp = (unsigned long)ba->ba_0_org;
822 					tmp += ALIGN_SIZE;
823 					tmp &= ~((unsigned long)ALIGN_SIZE);
824 					ba->ba_0 = (void *)tmp;
825 
826 					size = BUF1_LEN + ALIGN_SIZE;
827 					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
828 					if (!ba->ba_1_org)
829 						return -ENOMEM;
830 					mem_allocated += size;
831 					tmp = (unsigned long)ba->ba_1_org;
832 					tmp += ALIGN_SIZE;
833 					tmp &= ~((unsigned long)ALIGN_SIZE);
834 					ba->ba_1 = (void *)tmp;
835 					k++;
836 				}
837 			}
838 		}
839 	}
840 
841 	/* Allocation and initialization of Statistics block */
842 	size = sizeof(struct stat_block);
843 	mac_control->stats_mem =
844 		pci_alloc_consistent(nic->pdev, size,
845 				     &mac_control->stats_mem_phy);
846 
847 	if (!mac_control->stats_mem) {
848 		/*
849 		 * In case of failure, free_shared_mem() is called, which
850 		 * should free any memory that was alloced till the
851 		 * failure happened.
852 		 */
853 		return -ENOMEM;
854 	}
855 	mem_allocated += size;
856 	mac_control->stats_mem_sz = size;
857 
858 	tmp_v_addr = mac_control->stats_mem;
859 	mac_control->stats_info = tmp_v_addr;
860 	memset(tmp_v_addr, 0, size);
861 	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
862 		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
863 	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
864 	return SUCCESS;
865 }
866 
867 /**
868  * free_shared_mem - Free the allocated Memory
869  * @nic:  Device private variable.
870  * Description: This function is to free all memory locations allocated by
871  * the init_shared_mem() function and return it to the kernel.
872  */
873 
874 static void free_shared_mem(struct s2io_nic *nic)
875 {
876 	int i, j, blk_cnt, size;
877 	void *tmp_v_addr;
878 	dma_addr_t tmp_p_addr;
879 	int lst_size, lst_per_page;
880 	struct net_device *dev;
881 	int page_num = 0;
882 	struct config_param *config;
883 	struct mac_info *mac_control;
884 	struct stat_block *stats;
885 	struct swStat *swstats;
886 
887 	if (!nic)
888 		return;
889 
890 	dev = nic->dev;
891 
892 	config = &nic->config;
893 	mac_control = &nic->mac_control;
894 	stats = mac_control->stats_info;
895 	swstats = &stats->sw_stat;
896 
897 	lst_size = sizeof(struct TxD) * config->max_txds;
898 	lst_per_page = PAGE_SIZE / lst_size;
899 
900 	for (i = 0; i < config->tx_fifo_num; i++) {
901 		struct fifo_info *fifo = &mac_control->fifos[i];
902 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
903 
904 		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
905 		for (j = 0; j < page_num; j++) {
906 			int mem_blks = (j * lst_per_page);
907 			struct list_info_hold *fli;
908 
909 			if (!fifo->list_info)
910 				return;
911 
912 			fli = &fifo->list_info[mem_blks];
913 			if (!fli->list_virt_addr)
914 				break;
915 			pci_free_consistent(nic->pdev, PAGE_SIZE,
916 					    fli->list_virt_addr,
917 					    fli->list_phy_addr);
918 			swstats->mem_freed += PAGE_SIZE;
919 		}
920 		/* If we got a zero DMA address during allocation,
921 		 * free the page now
922 		 */
923 		if (mac_control->zerodma_virt_addr) {
924 			pci_free_consistent(nic->pdev, PAGE_SIZE,
925 					    mac_control->zerodma_virt_addr,
926 					    (dma_addr_t)0);
927 			DBG_PRINT(INIT_DBG,
928 				  "%s: Freeing TxDL with zero DMA address. "
929 				  "Virtual address %p\n",
930 				  dev->name, mac_control->zerodma_virt_addr);
931 			swstats->mem_freed += PAGE_SIZE;
932 		}
933 		kfree(fifo->list_info);
934 		swstats->mem_freed += tx_cfg->fifo_len *
935 			sizeof(struct list_info_hold);
936 	}
937 
938 	size = SIZE_OF_BLOCK;
939 	for (i = 0; i < config->rx_ring_num; i++) {
940 		struct ring_info *ring = &mac_control->rings[i];
941 
942 		blk_cnt = ring->block_count;
943 		for (j = 0; j < blk_cnt; j++) {
944 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
945 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
946 			if (tmp_v_addr == NULL)
947 				break;
948 			pci_free_consistent(nic->pdev, size,
949 					    tmp_v_addr, tmp_p_addr);
950 			swstats->mem_freed += size;
951 			kfree(ring->rx_blocks[j].rxds);
952 			swstats->mem_freed += sizeof(struct rxd_info) *
953 				rxd_count[nic->rxd_mode];
954 		}
955 	}
956 
957 	if (nic->rxd_mode == RXD_MODE_3B) {
958 		/* Freeing buffer storage addresses in 2BUFF mode. */
959 		for (i = 0; i < config->rx_ring_num; i++) {
960 			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
961 			struct ring_info *ring = &mac_control->rings[i];
962 
963 			blk_cnt = rx_cfg->num_rxd /
964 				(rxd_count[nic->rxd_mode] + 1);
965 			for (j = 0; j < blk_cnt; j++) {
966 				int k = 0;
967 				if (!ring->ba[j])
968 					continue;
969 				while (k != rxd_count[nic->rxd_mode]) {
970 					struct buffAdd *ba = &ring->ba[j][k];
971 					kfree(ba->ba_0_org);
972 					swstats->mem_freed +=
973 						BUF0_LEN + ALIGN_SIZE;
974 					kfree(ba->ba_1_org);
975 					swstats->mem_freed +=
976 						BUF1_LEN + ALIGN_SIZE;
977 					k++;
978 				}
979 				kfree(ring->ba[j]);
980 				swstats->mem_freed += sizeof(struct buffAdd) *
981 					(rxd_count[nic->rxd_mode] + 1);
982 			}
983 			kfree(ring->ba);
984 			swstats->mem_freed += sizeof(struct buffAdd *) *
985 				blk_cnt;
986 		}
987 	}
988 
989 	for (i = 0; i < nic->config.tx_fifo_num; i++) {
990 		struct fifo_info *fifo = &mac_control->fifos[i];
991 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
992 
993 		if (fifo->ufo_in_band_v) {
994 			swstats->mem_freed += tx_cfg->fifo_len *
995 				sizeof(u64);
996 			kfree(fifo->ufo_in_band_v);
997 		}
998 	}
999 
1000 	if (mac_control->stats_mem) {
1001 		swstats->mem_freed += mac_control->stats_mem_sz;
1002 		pci_free_consistent(nic->pdev,
1003 				    mac_control->stats_mem_sz,
1004 				    mac_control->stats_mem,
1005 				    mac_control->stats_mem_phy);
1006 	}
1007 }
1008 
1009 /**
1010  * s2io_verify_pci_mode -
1011  */
1012 
1013 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1014 {
1015 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1016 	register u64 val64 = 0;
1017 	int     mode;
1018 
1019 	val64 = readq(&bar0->pci_mode);
1020 	mode = (u8)GET_PCI_MODE(val64);
1021 
1022 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1023 		return -1;      /* Unknown PCI mode */
1024 	return mode;
1025 }
1026 
1027 #define NEC_VENID   0x1033
1028 #define NEC_DEVID   0x0125
1029 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1030 {
1031 	struct pci_dev *tdev = NULL;
1032 	for_each_pci_dev(tdev) {
1033 		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1034 			if (tdev->bus == s2io_pdev->bus->parent) {
1035 				pci_dev_put(tdev);
1036 				return 1;
1037 			}
1038 		}
1039 	}
1040 	return 0;
1041 }
1042 
1043 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1044 /**
1045  * s2io_print_pci_mode -
1046  */
1047 static int s2io_print_pci_mode(struct s2io_nic *nic)
1048 {
1049 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1050 	register u64 val64 = 0;
1051 	int	mode;
1052 	struct config_param *config = &nic->config;
1053 	const char *pcimode;
1054 
1055 	val64 = readq(&bar0->pci_mode);
1056 	mode = (u8)GET_PCI_MODE(val64);
1057 
1058 	if (val64 & PCI_MODE_UNKNOWN_MODE)
1059 		return -1;	/* Unknown PCI mode */
1060 
1061 	config->bus_speed = bus_speed[mode];
1062 
1063 	if (s2io_on_nec_bridge(nic->pdev)) {
1064 		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1065 			  nic->dev->name);
1066 		return mode;
1067 	}
1068 
1069 	switch (mode) {
1070 	case PCI_MODE_PCI_33:
1071 		pcimode = "33MHz PCI bus";
1072 		break;
1073 	case PCI_MODE_PCI_66:
1074 		pcimode = "66MHz PCI bus";
1075 		break;
1076 	case PCI_MODE_PCIX_M1_66:
1077 		pcimode = "66MHz PCIX(M1) bus";
1078 		break;
1079 	case PCI_MODE_PCIX_M1_100:
1080 		pcimode = "100MHz PCIX(M1) bus";
1081 		break;
1082 	case PCI_MODE_PCIX_M1_133:
1083 		pcimode = "133MHz PCIX(M1) bus";
1084 		break;
1085 	case PCI_MODE_PCIX_M2_66:
1086 		pcimode = "133MHz PCIX(M2) bus";
1087 		break;
1088 	case PCI_MODE_PCIX_M2_100:
1089 		pcimode = "200MHz PCIX(M2) bus";
1090 		break;
1091 	case PCI_MODE_PCIX_M2_133:
1092 		pcimode = "266MHz PCIX(M2) bus";
1093 		break;
1094 	default:
1095 		pcimode = "unsupported bus!";
1096 		mode = -1;
1097 	}
1098 
1099 	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1100 		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1101 
1102 	return mode;
1103 }
1104 
1105 /**
1106  *  init_tti - Initialization transmit traffic interrupt scheme
1107  *  @nic: device private variable
1108  *  @link: link status (UP/DOWN) used to enable/disable continuous
1109  *  transmit interrupts
1110  *  Description: The function configures transmit traffic interrupts
1111  *  Return Value:  SUCCESS on success and
1112  *  '-1' on failure
1113  */
1114 
1115 static int init_tti(struct s2io_nic *nic, int link)
1116 {
1117 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1118 	register u64 val64 = 0;
1119 	int i;
1120 	struct config_param *config = &nic->config;
1121 
1122 	for (i = 0; i < config->tx_fifo_num; i++) {
1123 		/*
1124 		 * TTI Initialization. Default Tx timer gets us about
1125 		 * 250 interrupts per sec. Continuous interrupts are enabled
1126 		 * by default.
1127 		 */
1128 		if (nic->device_type == XFRAME_II_DEVICE) {
1129 			int count = (nic->config.bus_speed * 125)/2;
1130 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1131 		} else
1132 			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1133 
1134 		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1135 			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1136 			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1137 			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1138 		if (i == 0)
1139 			if (use_continuous_tx_intrs && (link == LINK_UP))
1140 				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1141 		writeq(val64, &bar0->tti_data1_mem);
1142 
1143 		if (nic->config.intr_type == MSI_X) {
1144 			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1145 				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1146 				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1147 				TTI_DATA2_MEM_TX_UFC_D(0x300);
1148 		} else {
1149 			if ((nic->config.tx_steering_type ==
1150 			     TX_DEFAULT_STEERING) &&
1151 			    (config->tx_fifo_num > 1) &&
1152 			    (i >= nic->udp_fifo_idx) &&
1153 			    (i < (nic->udp_fifo_idx +
1154 				  nic->total_udp_fifos)))
1155 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1156 					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1157 					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1158 					TTI_DATA2_MEM_TX_UFC_D(0x120);
1159 			else
1160 				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1161 					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1162 					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1163 					TTI_DATA2_MEM_TX_UFC_D(0x80);
1164 		}
1165 
1166 		writeq(val64, &bar0->tti_data2_mem);
1167 
1168 		val64 = TTI_CMD_MEM_WE |
1169 			TTI_CMD_MEM_STROBE_NEW_CMD |
1170 			TTI_CMD_MEM_OFFSET(i);
1171 		writeq(val64, &bar0->tti_command_mem);
1172 
1173 		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1174 					  TTI_CMD_MEM_STROBE_NEW_CMD,
1175 					  S2IO_BIT_RESET) != SUCCESS)
1176 			return FAILURE;
1177 	}
1178 
1179 	return SUCCESS;
1180 }
1181 
1182 /**
1183  *  init_nic - Initialization of hardware
1184  *  @nic: device private variable
1185  *  Description: The function sequentially configures every block
1186  *  of the H/W from their reset values.
1187  *  Return Value:  SUCCESS on success and
1188  *  '-1' on failure (endian settings incorrect).
1189  */
1190 
1191 static int init_nic(struct s2io_nic *nic)
1192 {
1193 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1194 	struct net_device *dev = nic->dev;
1195 	register u64 val64 = 0;
1196 	void __iomem *add;
1197 	u32 time;
1198 	int i, j;
1199 	int dtx_cnt = 0;
1200 	unsigned long long mem_share;
1201 	int mem_size;
1202 	struct config_param *config = &nic->config;
1203 	struct mac_info *mac_control = &nic->mac_control;
1204 
1205 	/* to set the swapper controle on the card */
1206 	if (s2io_set_swapper(nic)) {
1207 		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1208 		return -EIO;
1209 	}
1210 
1211 	/*
1212 	 * Herc requires EOI to be removed from reset before XGXS, so..
1213 	 */
1214 	if (nic->device_type & XFRAME_II_DEVICE) {
1215 		val64 = 0xA500000000ULL;
1216 		writeq(val64, &bar0->sw_reset);
1217 		msleep(500);
1218 		val64 = readq(&bar0->sw_reset);
1219 	}
1220 
1221 	/* Remove XGXS from reset state */
1222 	val64 = 0;
1223 	writeq(val64, &bar0->sw_reset);
1224 	msleep(500);
1225 	val64 = readq(&bar0->sw_reset);
1226 
1227 	/* Ensure that it's safe to access registers by checking
1228 	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1229 	 */
1230 	if (nic->device_type == XFRAME_II_DEVICE) {
1231 		for (i = 0; i < 50; i++) {
1232 			val64 = readq(&bar0->adapter_status);
1233 			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1234 				break;
1235 			msleep(10);
1236 		}
1237 		if (i == 50)
1238 			return -ENODEV;
1239 	}
1240 
1241 	/*  Enable Receiving broadcasts */
1242 	add = &bar0->mac_cfg;
1243 	val64 = readq(&bar0->mac_cfg);
1244 	val64 |= MAC_RMAC_BCAST_ENABLE;
1245 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1246 	writel((u32)val64, add);
1247 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1248 	writel((u32) (val64 >> 32), (add + 4));
1249 
1250 	/* Read registers in all blocks */
1251 	val64 = readq(&bar0->mac_int_mask);
1252 	val64 = readq(&bar0->mc_int_mask);
1253 	val64 = readq(&bar0->xgxs_int_mask);
1254 
1255 	/*  Set MTU */
1256 	val64 = dev->mtu;
1257 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1258 
1259 	if (nic->device_type & XFRAME_II_DEVICE) {
1260 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1261 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1262 					  &bar0->dtx_control, UF);
1263 			if (dtx_cnt & 0x1)
1264 				msleep(1); /* Necessary!! */
1265 			dtx_cnt++;
1266 		}
1267 	} else {
1268 		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1269 			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1270 					  &bar0->dtx_control, UF);
1271 			val64 = readq(&bar0->dtx_control);
1272 			dtx_cnt++;
1273 		}
1274 	}
1275 
1276 	/*  Tx DMA Initialization */
1277 	val64 = 0;
1278 	writeq(val64, &bar0->tx_fifo_partition_0);
1279 	writeq(val64, &bar0->tx_fifo_partition_1);
1280 	writeq(val64, &bar0->tx_fifo_partition_2);
1281 	writeq(val64, &bar0->tx_fifo_partition_3);
1282 
1283 	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1284 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1285 
1286 		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1287 			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1288 
1289 		if (i == (config->tx_fifo_num - 1)) {
1290 			if (i % 2 == 0)
1291 				i++;
1292 		}
1293 
1294 		switch (i) {
1295 		case 1:
1296 			writeq(val64, &bar0->tx_fifo_partition_0);
1297 			val64 = 0;
1298 			j = 0;
1299 			break;
1300 		case 3:
1301 			writeq(val64, &bar0->tx_fifo_partition_1);
1302 			val64 = 0;
1303 			j = 0;
1304 			break;
1305 		case 5:
1306 			writeq(val64, &bar0->tx_fifo_partition_2);
1307 			val64 = 0;
1308 			j = 0;
1309 			break;
1310 		case 7:
1311 			writeq(val64, &bar0->tx_fifo_partition_3);
1312 			val64 = 0;
1313 			j = 0;
1314 			break;
1315 		default:
1316 			j++;
1317 			break;
1318 		}
1319 	}
1320 
1321 	/*
1322 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1323 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1324 	 */
1325 	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1326 		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1327 
1328 	val64 = readq(&bar0->tx_fifo_partition_0);
1329 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1330 		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1331 
1332 	/*
1333 	 * Initialization of Tx_PA_CONFIG register to ignore packet
1334 	 * integrity checking.
1335 	 */
1336 	val64 = readq(&bar0->tx_pa_cfg);
1337 	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1338 		TX_PA_CFG_IGNORE_SNAP_OUI |
1339 		TX_PA_CFG_IGNORE_LLC_CTRL |
1340 		TX_PA_CFG_IGNORE_L2_ERR;
1341 	writeq(val64, &bar0->tx_pa_cfg);
1342 
1343 	/* Rx DMA initialization. */
1344 	val64 = 0;
1345 	for (i = 0; i < config->rx_ring_num; i++) {
1346 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1347 
1348 		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1349 	}
1350 	writeq(val64, &bar0->rx_queue_priority);
1351 
1352 	/*
1353 	 * Allocating equal share of memory to all the
1354 	 * configured Rings.
1355 	 */
1356 	val64 = 0;
1357 	if (nic->device_type & XFRAME_II_DEVICE)
1358 		mem_size = 32;
1359 	else
1360 		mem_size = 64;
1361 
1362 	for (i = 0; i < config->rx_ring_num; i++) {
1363 		switch (i) {
1364 		case 0:
1365 			mem_share = (mem_size / config->rx_ring_num +
1366 				     mem_size % config->rx_ring_num);
1367 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1368 			continue;
1369 		case 1:
1370 			mem_share = (mem_size / config->rx_ring_num);
1371 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1372 			continue;
1373 		case 2:
1374 			mem_share = (mem_size / config->rx_ring_num);
1375 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1376 			continue;
1377 		case 3:
1378 			mem_share = (mem_size / config->rx_ring_num);
1379 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1380 			continue;
1381 		case 4:
1382 			mem_share = (mem_size / config->rx_ring_num);
1383 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1384 			continue;
1385 		case 5:
1386 			mem_share = (mem_size / config->rx_ring_num);
1387 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1388 			continue;
1389 		case 6:
1390 			mem_share = (mem_size / config->rx_ring_num);
1391 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1392 			continue;
1393 		case 7:
1394 			mem_share = (mem_size / config->rx_ring_num);
1395 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1396 			continue;
1397 		}
1398 	}
1399 	writeq(val64, &bar0->rx_queue_cfg);
1400 
1401 	/*
1402 	 * Filling Tx round robin registers
1403 	 * as per the number of FIFOs for equal scheduling priority
1404 	 */
1405 	switch (config->tx_fifo_num) {
1406 	case 1:
1407 		val64 = 0x0;
1408 		writeq(val64, &bar0->tx_w_round_robin_0);
1409 		writeq(val64, &bar0->tx_w_round_robin_1);
1410 		writeq(val64, &bar0->tx_w_round_robin_2);
1411 		writeq(val64, &bar0->tx_w_round_robin_3);
1412 		writeq(val64, &bar0->tx_w_round_robin_4);
1413 		break;
1414 	case 2:
1415 		val64 = 0x0001000100010001ULL;
1416 		writeq(val64, &bar0->tx_w_round_robin_0);
1417 		writeq(val64, &bar0->tx_w_round_robin_1);
1418 		writeq(val64, &bar0->tx_w_round_robin_2);
1419 		writeq(val64, &bar0->tx_w_round_robin_3);
1420 		val64 = 0x0001000100000000ULL;
1421 		writeq(val64, &bar0->tx_w_round_robin_4);
1422 		break;
1423 	case 3:
1424 		val64 = 0x0001020001020001ULL;
1425 		writeq(val64, &bar0->tx_w_round_robin_0);
1426 		val64 = 0x0200010200010200ULL;
1427 		writeq(val64, &bar0->tx_w_round_robin_1);
1428 		val64 = 0x0102000102000102ULL;
1429 		writeq(val64, &bar0->tx_w_round_robin_2);
1430 		val64 = 0x0001020001020001ULL;
1431 		writeq(val64, &bar0->tx_w_round_robin_3);
1432 		val64 = 0x0200010200000000ULL;
1433 		writeq(val64, &bar0->tx_w_round_robin_4);
1434 		break;
1435 	case 4:
1436 		val64 = 0x0001020300010203ULL;
1437 		writeq(val64, &bar0->tx_w_round_robin_0);
1438 		writeq(val64, &bar0->tx_w_round_robin_1);
1439 		writeq(val64, &bar0->tx_w_round_robin_2);
1440 		writeq(val64, &bar0->tx_w_round_robin_3);
1441 		val64 = 0x0001020300000000ULL;
1442 		writeq(val64, &bar0->tx_w_round_robin_4);
1443 		break;
1444 	case 5:
1445 		val64 = 0x0001020304000102ULL;
1446 		writeq(val64, &bar0->tx_w_round_robin_0);
1447 		val64 = 0x0304000102030400ULL;
1448 		writeq(val64, &bar0->tx_w_round_robin_1);
1449 		val64 = 0x0102030400010203ULL;
1450 		writeq(val64, &bar0->tx_w_round_robin_2);
1451 		val64 = 0x0400010203040001ULL;
1452 		writeq(val64, &bar0->tx_w_round_robin_3);
1453 		val64 = 0x0203040000000000ULL;
1454 		writeq(val64, &bar0->tx_w_round_robin_4);
1455 		break;
1456 	case 6:
1457 		val64 = 0x0001020304050001ULL;
1458 		writeq(val64, &bar0->tx_w_round_robin_0);
1459 		val64 = 0x0203040500010203ULL;
1460 		writeq(val64, &bar0->tx_w_round_robin_1);
1461 		val64 = 0x0405000102030405ULL;
1462 		writeq(val64, &bar0->tx_w_round_robin_2);
1463 		val64 = 0x0001020304050001ULL;
1464 		writeq(val64, &bar0->tx_w_round_robin_3);
1465 		val64 = 0x0203040500000000ULL;
1466 		writeq(val64, &bar0->tx_w_round_robin_4);
1467 		break;
1468 	case 7:
1469 		val64 = 0x0001020304050600ULL;
1470 		writeq(val64, &bar0->tx_w_round_robin_0);
1471 		val64 = 0x0102030405060001ULL;
1472 		writeq(val64, &bar0->tx_w_round_robin_1);
1473 		val64 = 0x0203040506000102ULL;
1474 		writeq(val64, &bar0->tx_w_round_robin_2);
1475 		val64 = 0x0304050600010203ULL;
1476 		writeq(val64, &bar0->tx_w_round_robin_3);
1477 		val64 = 0x0405060000000000ULL;
1478 		writeq(val64, &bar0->tx_w_round_robin_4);
1479 		break;
1480 	case 8:
1481 		val64 = 0x0001020304050607ULL;
1482 		writeq(val64, &bar0->tx_w_round_robin_0);
1483 		writeq(val64, &bar0->tx_w_round_robin_1);
1484 		writeq(val64, &bar0->tx_w_round_robin_2);
1485 		writeq(val64, &bar0->tx_w_round_robin_3);
1486 		val64 = 0x0001020300000000ULL;
1487 		writeq(val64, &bar0->tx_w_round_robin_4);
1488 		break;
1489 	}
1490 
1491 	/* Enable all configured Tx FIFO partitions */
1492 	val64 = readq(&bar0->tx_fifo_partition_0);
1493 	val64 |= (TX_FIFO_PARTITION_EN);
1494 	writeq(val64, &bar0->tx_fifo_partition_0);
1495 
1496 	/* Filling the Rx round robin registers as per the
1497 	 * number of Rings and steering based on QoS with
1498 	 * equal priority.
1499 	 */
1500 	switch (config->rx_ring_num) {
1501 	case 1:
1502 		val64 = 0x0;
1503 		writeq(val64, &bar0->rx_w_round_robin_0);
1504 		writeq(val64, &bar0->rx_w_round_robin_1);
1505 		writeq(val64, &bar0->rx_w_round_robin_2);
1506 		writeq(val64, &bar0->rx_w_round_robin_3);
1507 		writeq(val64, &bar0->rx_w_round_robin_4);
1508 
1509 		val64 = 0x8080808080808080ULL;
1510 		writeq(val64, &bar0->rts_qos_steering);
1511 		break;
1512 	case 2:
1513 		val64 = 0x0001000100010001ULL;
1514 		writeq(val64, &bar0->rx_w_round_robin_0);
1515 		writeq(val64, &bar0->rx_w_round_robin_1);
1516 		writeq(val64, &bar0->rx_w_round_robin_2);
1517 		writeq(val64, &bar0->rx_w_round_robin_3);
1518 		val64 = 0x0001000100000000ULL;
1519 		writeq(val64, &bar0->rx_w_round_robin_4);
1520 
1521 		val64 = 0x8080808040404040ULL;
1522 		writeq(val64, &bar0->rts_qos_steering);
1523 		break;
1524 	case 3:
1525 		val64 = 0x0001020001020001ULL;
1526 		writeq(val64, &bar0->rx_w_round_robin_0);
1527 		val64 = 0x0200010200010200ULL;
1528 		writeq(val64, &bar0->rx_w_round_robin_1);
1529 		val64 = 0x0102000102000102ULL;
1530 		writeq(val64, &bar0->rx_w_round_robin_2);
1531 		val64 = 0x0001020001020001ULL;
1532 		writeq(val64, &bar0->rx_w_round_robin_3);
1533 		val64 = 0x0200010200000000ULL;
1534 		writeq(val64, &bar0->rx_w_round_robin_4);
1535 
1536 		val64 = 0x8080804040402020ULL;
1537 		writeq(val64, &bar0->rts_qos_steering);
1538 		break;
1539 	case 4:
1540 		val64 = 0x0001020300010203ULL;
1541 		writeq(val64, &bar0->rx_w_round_robin_0);
1542 		writeq(val64, &bar0->rx_w_round_robin_1);
1543 		writeq(val64, &bar0->rx_w_round_robin_2);
1544 		writeq(val64, &bar0->rx_w_round_robin_3);
1545 		val64 = 0x0001020300000000ULL;
1546 		writeq(val64, &bar0->rx_w_round_robin_4);
1547 
1548 		val64 = 0x8080404020201010ULL;
1549 		writeq(val64, &bar0->rts_qos_steering);
1550 		break;
1551 	case 5:
1552 		val64 = 0x0001020304000102ULL;
1553 		writeq(val64, &bar0->rx_w_round_robin_0);
1554 		val64 = 0x0304000102030400ULL;
1555 		writeq(val64, &bar0->rx_w_round_robin_1);
1556 		val64 = 0x0102030400010203ULL;
1557 		writeq(val64, &bar0->rx_w_round_robin_2);
1558 		val64 = 0x0400010203040001ULL;
1559 		writeq(val64, &bar0->rx_w_round_robin_3);
1560 		val64 = 0x0203040000000000ULL;
1561 		writeq(val64, &bar0->rx_w_round_robin_4);
1562 
1563 		val64 = 0x8080404020201008ULL;
1564 		writeq(val64, &bar0->rts_qos_steering);
1565 		break;
1566 	case 6:
1567 		val64 = 0x0001020304050001ULL;
1568 		writeq(val64, &bar0->rx_w_round_robin_0);
1569 		val64 = 0x0203040500010203ULL;
1570 		writeq(val64, &bar0->rx_w_round_robin_1);
1571 		val64 = 0x0405000102030405ULL;
1572 		writeq(val64, &bar0->rx_w_round_robin_2);
1573 		val64 = 0x0001020304050001ULL;
1574 		writeq(val64, &bar0->rx_w_round_robin_3);
1575 		val64 = 0x0203040500000000ULL;
1576 		writeq(val64, &bar0->rx_w_round_robin_4);
1577 
1578 		val64 = 0x8080404020100804ULL;
1579 		writeq(val64, &bar0->rts_qos_steering);
1580 		break;
1581 	case 7:
1582 		val64 = 0x0001020304050600ULL;
1583 		writeq(val64, &bar0->rx_w_round_robin_0);
1584 		val64 = 0x0102030405060001ULL;
1585 		writeq(val64, &bar0->rx_w_round_robin_1);
1586 		val64 = 0x0203040506000102ULL;
1587 		writeq(val64, &bar0->rx_w_round_robin_2);
1588 		val64 = 0x0304050600010203ULL;
1589 		writeq(val64, &bar0->rx_w_round_robin_3);
1590 		val64 = 0x0405060000000000ULL;
1591 		writeq(val64, &bar0->rx_w_round_robin_4);
1592 
1593 		val64 = 0x8080402010080402ULL;
1594 		writeq(val64, &bar0->rts_qos_steering);
1595 		break;
1596 	case 8:
1597 		val64 = 0x0001020304050607ULL;
1598 		writeq(val64, &bar0->rx_w_round_robin_0);
1599 		writeq(val64, &bar0->rx_w_round_robin_1);
1600 		writeq(val64, &bar0->rx_w_round_robin_2);
1601 		writeq(val64, &bar0->rx_w_round_robin_3);
1602 		val64 = 0x0001020300000000ULL;
1603 		writeq(val64, &bar0->rx_w_round_robin_4);
1604 
1605 		val64 = 0x8040201008040201ULL;
1606 		writeq(val64, &bar0->rts_qos_steering);
1607 		break;
1608 	}
1609 
1610 	/* UDP Fix */
1611 	val64 = 0;
1612 	for (i = 0; i < 8; i++)
1613 		writeq(val64, &bar0->rts_frm_len_n[i]);
1614 
1615 	/* Set the default rts frame length for the rings configured */
1616 	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1617 	for (i = 0 ; i < config->rx_ring_num ; i++)
1618 		writeq(val64, &bar0->rts_frm_len_n[i]);
1619 
1620 	/* Set the frame length for the configured rings
1621 	 * desired by the user
1622 	 */
1623 	for (i = 0; i < config->rx_ring_num; i++) {
1624 		/* If rts_frm_len[i] == 0 then it is assumed that user not
1625 		 * specified frame length steering.
1626 		 * If the user provides the frame length then program
1627 		 * the rts_frm_len register for those values or else
1628 		 * leave it as it is.
1629 		 */
1630 		if (rts_frm_len[i] != 0) {
1631 			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1632 			       &bar0->rts_frm_len_n[i]);
1633 		}
1634 	}
1635 
1636 	/* Disable differentiated services steering logic */
1637 	for (i = 0; i < 64; i++) {
1638 		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1639 			DBG_PRINT(ERR_DBG,
1640 				  "%s: rts_ds_steer failed on codepoint %d\n",
1641 				  dev->name, i);
1642 			return -ENODEV;
1643 		}
1644 	}
1645 
1646 	/* Program statistics memory */
1647 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1648 
1649 	if (nic->device_type == XFRAME_II_DEVICE) {
1650 		val64 = STAT_BC(0x320);
1651 		writeq(val64, &bar0->stat_byte_cnt);
1652 	}
1653 
1654 	/*
1655 	 * Initializing the sampling rate for the device to calculate the
1656 	 * bandwidth utilization.
1657 	 */
1658 	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1659 		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1660 	writeq(val64, &bar0->mac_link_util);
1661 
1662 	/*
1663 	 * Initializing the Transmit and Receive Traffic Interrupt
1664 	 * Scheme.
1665 	 */
1666 
1667 	/* Initialize TTI */
1668 	if (SUCCESS != init_tti(nic, nic->last_link_state))
1669 		return -ENODEV;
1670 
1671 	/* RTI Initialization */
1672 	if (nic->device_type == XFRAME_II_DEVICE) {
1673 		/*
1674 		 * Programmed to generate Apprx 500 Intrs per
1675 		 * second
1676 		 */
1677 		int count = (nic->config.bus_speed * 125)/4;
1678 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1679 	} else
1680 		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1681 	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1682 		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1683 		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1684 		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1685 
1686 	writeq(val64, &bar0->rti_data1_mem);
1687 
1688 	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1689 		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1690 	if (nic->config.intr_type == MSI_X)
1691 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1692 			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1693 	else
1694 		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1695 			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1696 	writeq(val64, &bar0->rti_data2_mem);
1697 
1698 	for (i = 0; i < config->rx_ring_num; i++) {
1699 		val64 = RTI_CMD_MEM_WE |
1700 			RTI_CMD_MEM_STROBE_NEW_CMD |
1701 			RTI_CMD_MEM_OFFSET(i);
1702 		writeq(val64, &bar0->rti_command_mem);
1703 
1704 		/*
1705 		 * Once the operation completes, the Strobe bit of the
1706 		 * command register will be reset. We poll for this
1707 		 * particular condition. We wait for a maximum of 500ms
1708 		 * for the operation to complete, if it's not complete
1709 		 * by then we return error.
1710 		 */
1711 		time = 0;
1712 		while (true) {
1713 			val64 = readq(&bar0->rti_command_mem);
1714 			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1715 				break;
1716 
1717 			if (time > 10) {
1718 				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1719 					  dev->name);
1720 				return -ENODEV;
1721 			}
1722 			time++;
1723 			msleep(50);
1724 		}
1725 	}
1726 
1727 	/*
1728 	 * Initializing proper values as Pause threshold into all
1729 	 * the 8 Queues on Rx side.
1730 	 */
1731 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1732 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1733 
1734 	/* Disable RMAC PAD STRIPPING */
1735 	add = &bar0->mac_cfg;
1736 	val64 = readq(&bar0->mac_cfg);
1737 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1738 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1739 	writel((u32) (val64), add);
1740 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1741 	writel((u32) (val64 >> 32), (add + 4));
1742 	val64 = readq(&bar0->mac_cfg);
1743 
1744 	/* Enable FCS stripping by adapter */
1745 	add = &bar0->mac_cfg;
1746 	val64 = readq(&bar0->mac_cfg);
1747 	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1748 	if (nic->device_type == XFRAME_II_DEVICE)
1749 		writeq(val64, &bar0->mac_cfg);
1750 	else {
1751 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1752 		writel((u32) (val64), add);
1753 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1754 		writel((u32) (val64 >> 32), (add + 4));
1755 	}
1756 
1757 	/*
1758 	 * Set the time value to be inserted in the pause frame
1759 	 * generated by xena.
1760 	 */
1761 	val64 = readq(&bar0->rmac_pause_cfg);
1762 	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1763 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1764 	writeq(val64, &bar0->rmac_pause_cfg);
1765 
1766 	/*
1767 	 * Set the Threshold Limit for Generating the pause frame
1768 	 * If the amount of data in any Queue exceeds ratio of
1769 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1770 	 * pause frame is generated
1771 	 */
1772 	val64 = 0;
1773 	for (i = 0; i < 4; i++) {
1774 		val64 |= (((u64)0xFF00 |
1775 			   nic->mac_control.mc_pause_threshold_q0q3)
1776 			  << (i * 2 * 8));
1777 	}
1778 	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1779 
1780 	val64 = 0;
1781 	for (i = 0; i < 4; i++) {
1782 		val64 |= (((u64)0xFF00 |
1783 			   nic->mac_control.mc_pause_threshold_q4q7)
1784 			  << (i * 2 * 8));
1785 	}
1786 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1787 
1788 	/*
1789 	 * TxDMA will stop Read request if the number of read split has
1790 	 * exceeded the limit pointed by shared_splits
1791 	 */
1792 	val64 = readq(&bar0->pic_control);
1793 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1794 	writeq(val64, &bar0->pic_control);
1795 
1796 	if (nic->config.bus_speed == 266) {
1797 		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1798 		writeq(0x0, &bar0->read_retry_delay);
1799 		writeq(0x0, &bar0->write_retry_delay);
1800 	}
1801 
1802 	/*
1803 	 * Programming the Herc to split every write transaction
1804 	 * that does not start on an ADB to reduce disconnects.
1805 	 */
1806 	if (nic->device_type == XFRAME_II_DEVICE) {
1807 		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1808 			MISC_LINK_STABILITY_PRD(3);
1809 		writeq(val64, &bar0->misc_control);
1810 		val64 = readq(&bar0->pic_control2);
1811 		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1812 		writeq(val64, &bar0->pic_control2);
1813 	}
1814 	if (strstr(nic->product_name, "CX4")) {
1815 		val64 = TMAC_AVG_IPG(0x17);
1816 		writeq(val64, &bar0->tmac_avg_ipg);
1817 	}
1818 
1819 	return SUCCESS;
1820 }
1821 #define LINK_UP_DOWN_INTERRUPT		1
1822 #define MAC_RMAC_ERR_TIMER		2
1823 
1824 static int s2io_link_fault_indication(struct s2io_nic *nic)
1825 {
1826 	if (nic->device_type == XFRAME_II_DEVICE)
1827 		return LINK_UP_DOWN_INTERRUPT;
1828 	else
1829 		return MAC_RMAC_ERR_TIMER;
1830 }
1831 
1832 /**
1833  *  do_s2io_write_bits -  update alarm bits in alarm register
1834  *  @value: alarm bits
1835  *  @flag: interrupt status
1836  *  @addr: address value
1837  *  Description: update alarm bits in alarm register
1838  *  Return Value:
1839  *  NONE.
1840  */
1841 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1842 {
1843 	u64 temp64;
1844 
1845 	temp64 = readq(addr);
1846 
1847 	if (flag == ENABLE_INTRS)
1848 		temp64 &= ~((u64)value);
1849 	else
1850 		temp64 |= ((u64)value);
1851 	writeq(temp64, addr);
1852 }
1853 
1854 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1855 {
1856 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1857 	register u64 gen_int_mask = 0;
1858 	u64 interruptible;
1859 
1860 	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1861 	if (mask & TX_DMA_INTR) {
1862 		gen_int_mask |= TXDMA_INT_M;
1863 
1864 		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1865 				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1866 				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1867 				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1868 
1869 		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1870 				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1871 				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1872 				   &bar0->pfc_err_mask);
1873 
1874 		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1875 				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1876 				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1877 
1878 		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1879 				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1880 				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1881 				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1882 				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1883 				   PCC_TXB_ECC_SG_ERR,
1884 				   flag, &bar0->pcc_err_mask);
1885 
1886 		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1887 				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1888 
1889 		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1890 				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1891 				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1892 				   flag, &bar0->lso_err_mask);
1893 
1894 		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1895 				   flag, &bar0->tpa_err_mask);
1896 
1897 		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1898 	}
1899 
1900 	if (mask & TX_MAC_INTR) {
1901 		gen_int_mask |= TXMAC_INT_M;
1902 		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1903 				   &bar0->mac_int_mask);
1904 		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1905 				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1906 				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1907 				   flag, &bar0->mac_tmac_err_mask);
1908 	}
1909 
1910 	if (mask & TX_XGXS_INTR) {
1911 		gen_int_mask |= TXXGXS_INT_M;
1912 		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1913 				   &bar0->xgxs_int_mask);
1914 		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1915 				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1916 				   flag, &bar0->xgxs_txgxs_err_mask);
1917 	}
1918 
1919 	if (mask & RX_DMA_INTR) {
1920 		gen_int_mask |= RXDMA_INT_M;
1921 		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1922 				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1923 				   flag, &bar0->rxdma_int_mask);
1924 		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1925 				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1926 				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1927 				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1928 		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1929 				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1930 				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1931 				   &bar0->prc_pcix_err_mask);
1932 		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1933 				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1934 				   &bar0->rpa_err_mask);
1935 		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1936 				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1937 				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1938 				   RDA_FRM_ECC_SG_ERR |
1939 				   RDA_MISC_ERR|RDA_PCIX_ERR,
1940 				   flag, &bar0->rda_err_mask);
1941 		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1942 				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1943 				   flag, &bar0->rti_err_mask);
1944 	}
1945 
1946 	if (mask & RX_MAC_INTR) {
1947 		gen_int_mask |= RXMAC_INT_M;
1948 		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1949 				   &bar0->mac_int_mask);
1950 		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1951 				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1952 				 RMAC_DOUBLE_ECC_ERR);
1953 		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1954 			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1955 		do_s2io_write_bits(interruptible,
1956 				   flag, &bar0->mac_rmac_err_mask);
1957 	}
1958 
1959 	if (mask & RX_XGXS_INTR) {
1960 		gen_int_mask |= RXXGXS_INT_M;
1961 		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1962 				   &bar0->xgxs_int_mask);
1963 		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1964 				   &bar0->xgxs_rxgxs_err_mask);
1965 	}
1966 
1967 	if (mask & MC_INTR) {
1968 		gen_int_mask |= MC_INT_M;
1969 		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1970 				   flag, &bar0->mc_int_mask);
1971 		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1972 				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1973 				   &bar0->mc_err_mask);
1974 	}
1975 	nic->general_int_mask = gen_int_mask;
1976 
1977 	/* Remove this line when alarm interrupts are enabled */
1978 	nic->general_int_mask = 0;
1979 }
1980 
1981 /**
1982  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1983  *  @nic: device private variable,
1984  *  @mask: A mask indicating which Intr block must be modified and,
1985  *  @flag: A flag indicating whether to enable or disable the Intrs.
1986  *  Description: This function will either disable or enable the interrupts
1987  *  depending on the flag argument. The mask argument can be used to
1988  *  enable/disable any Intr block.
1989  *  Return Value: NONE.
1990  */
1991 
1992 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1993 {
1994 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1995 	register u64 temp64 = 0, intr_mask = 0;
1996 
1997 	intr_mask = nic->general_int_mask;
1998 
1999 	/*  Top level interrupt classification */
2000 	/*  PIC Interrupts */
2001 	if (mask & TX_PIC_INTR) {
2002 		/*  Enable PIC Intrs in the general intr mask register */
2003 		intr_mask |= TXPIC_INT_M;
2004 		if (flag == ENABLE_INTRS) {
2005 			/*
2006 			 * If Hercules adapter enable GPIO otherwise
2007 			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2008 			 * interrupts for now.
2009 			 * TODO
2010 			 */
2011 			if (s2io_link_fault_indication(nic) ==
2012 			    LINK_UP_DOWN_INTERRUPT) {
2013 				do_s2io_write_bits(PIC_INT_GPIO, flag,
2014 						   &bar0->pic_int_mask);
2015 				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2016 						   &bar0->gpio_int_mask);
2017 			} else
2018 				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2019 		} else if (flag == DISABLE_INTRS) {
2020 			/*
2021 			 * Disable PIC Intrs in the general
2022 			 * intr mask register
2023 			 */
2024 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2025 		}
2026 	}
2027 
2028 	/*  Tx traffic interrupts */
2029 	if (mask & TX_TRAFFIC_INTR) {
2030 		intr_mask |= TXTRAFFIC_INT_M;
2031 		if (flag == ENABLE_INTRS) {
2032 			/*
2033 			 * Enable all the Tx side interrupts
2034 			 * writing 0 Enables all 64 TX interrupt levels
2035 			 */
2036 			writeq(0x0, &bar0->tx_traffic_mask);
2037 		} else if (flag == DISABLE_INTRS) {
2038 			/*
2039 			 * Disable Tx Traffic Intrs in the general intr mask
2040 			 * register.
2041 			 */
2042 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2043 		}
2044 	}
2045 
2046 	/*  Rx traffic interrupts */
2047 	if (mask & RX_TRAFFIC_INTR) {
2048 		intr_mask |= RXTRAFFIC_INT_M;
2049 		if (flag == ENABLE_INTRS) {
2050 			/* writing 0 Enables all 8 RX interrupt levels */
2051 			writeq(0x0, &bar0->rx_traffic_mask);
2052 		} else if (flag == DISABLE_INTRS) {
2053 			/*
2054 			 * Disable Rx Traffic Intrs in the general intr mask
2055 			 * register.
2056 			 */
2057 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2058 		}
2059 	}
2060 
2061 	temp64 = readq(&bar0->general_int_mask);
2062 	if (flag == ENABLE_INTRS)
2063 		temp64 &= ~((u64)intr_mask);
2064 	else
2065 		temp64 = DISABLE_ALL_INTRS;
2066 	writeq(temp64, &bar0->general_int_mask);
2067 
2068 	nic->general_int_mask = readq(&bar0->general_int_mask);
2069 }
2070 
2071 /**
2072  *  verify_pcc_quiescent- Checks for PCC quiescent state
2073  *  Return: 1 If PCC is quiescence
2074  *          0 If PCC is not quiescence
2075  */
2076 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2077 {
2078 	int ret = 0, herc;
2079 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2080 	u64 val64 = readq(&bar0->adapter_status);
2081 
2082 	herc = (sp->device_type == XFRAME_II_DEVICE);
2083 
2084 	if (flag == false) {
2085 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2086 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2087 				ret = 1;
2088 		} else {
2089 			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2090 				ret = 1;
2091 		}
2092 	} else {
2093 		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2094 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2095 			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2096 				ret = 1;
2097 		} else {
2098 			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2099 			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2100 				ret = 1;
2101 		}
2102 	}
2103 
2104 	return ret;
2105 }
2106 /**
2107  *  verify_xena_quiescence - Checks whether the H/W is ready
2108  *  Description: Returns whether the H/W is ready to go or not. Depending
2109  *  on whether adapter enable bit was written or not the comparison
2110  *  differs and the calling function passes the input argument flag to
2111  *  indicate this.
2112  *  Return: 1 If xena is quiescence
2113  *          0 If Xena is not quiescence
2114  */
2115 
2116 static int verify_xena_quiescence(struct s2io_nic *sp)
2117 {
2118 	int  mode;
2119 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2120 	u64 val64 = readq(&bar0->adapter_status);
2121 	mode = s2io_verify_pci_mode(sp);
2122 
2123 	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2124 		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2125 		return 0;
2126 	}
2127 	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2128 		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2129 		return 0;
2130 	}
2131 	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2132 		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2133 		return 0;
2134 	}
2135 	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2136 		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2137 		return 0;
2138 	}
2139 	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2140 		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2141 		return 0;
2142 	}
2143 	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2144 		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2145 		return 0;
2146 	}
2147 	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2148 		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2149 		return 0;
2150 	}
2151 	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2152 		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2153 		return 0;
2154 	}
2155 
2156 	/*
2157 	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2158 	 * the the P_PLL_LOCK bit in the adapter_status register will
2159 	 * not be asserted.
2160 	 */
2161 	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2162 	    sp->device_type == XFRAME_II_DEVICE &&
2163 	    mode != PCI_MODE_PCI_33) {
2164 		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2165 		return 0;
2166 	}
2167 	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2168 	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2169 		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2170 		return 0;
2171 	}
2172 	return 1;
2173 }
2174 
2175 /**
2176  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2177  * @sp: Pointer to device specifc structure
2178  * Description :
2179  * New procedure to clear mac address reading  problems on Alpha platforms
2180  *
2181  */
2182 
2183 static void fix_mac_address(struct s2io_nic *sp)
2184 {
2185 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2186 	int i = 0;
2187 
2188 	while (fix_mac[i] != END_SIGN) {
2189 		writeq(fix_mac[i++], &bar0->gpio_control);
2190 		udelay(10);
2191 		(void) readq(&bar0->gpio_control);
2192 	}
2193 }
2194 
2195 /**
2196  *  start_nic - Turns the device on
2197  *  @nic : device private variable.
2198  *  Description:
2199  *  This function actually turns the device on. Before this  function is
2200  *  called,all Registers are configured from their reset states
2201  *  and shared memory is allocated but the NIC is still quiescent. On
2202  *  calling this function, the device interrupts are cleared and the NIC is
2203  *  literally switched on by writing into the adapter control register.
2204  *  Return Value:
2205  *  SUCCESS on success and -1 on failure.
2206  */
2207 
2208 static int start_nic(struct s2io_nic *nic)
2209 {
2210 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2211 	struct net_device *dev = nic->dev;
2212 	register u64 val64 = 0;
2213 	u16 subid, i;
2214 	struct config_param *config = &nic->config;
2215 	struct mac_info *mac_control = &nic->mac_control;
2216 
2217 	/*  PRC Initialization and configuration */
2218 	for (i = 0; i < config->rx_ring_num; i++) {
2219 		struct ring_info *ring = &mac_control->rings[i];
2220 
2221 		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2222 		       &bar0->prc_rxd0_n[i]);
2223 
2224 		val64 = readq(&bar0->prc_ctrl_n[i]);
2225 		if (nic->rxd_mode == RXD_MODE_1)
2226 			val64 |= PRC_CTRL_RC_ENABLED;
2227 		else
2228 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2229 		if (nic->device_type == XFRAME_II_DEVICE)
2230 			val64 |= PRC_CTRL_GROUP_READS;
2231 		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2232 		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2233 		writeq(val64, &bar0->prc_ctrl_n[i]);
2234 	}
2235 
2236 	if (nic->rxd_mode == RXD_MODE_3B) {
2237 		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2238 		val64 = readq(&bar0->rx_pa_cfg);
2239 		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2240 		writeq(val64, &bar0->rx_pa_cfg);
2241 	}
2242 
2243 	if (vlan_tag_strip == 0) {
2244 		val64 = readq(&bar0->rx_pa_cfg);
2245 		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2246 		writeq(val64, &bar0->rx_pa_cfg);
2247 		nic->vlan_strip_flag = 0;
2248 	}
2249 
2250 	/*
2251 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2252 	 * for around 100ms, which is approximately the time required
2253 	 * for the device to be ready for operation.
2254 	 */
2255 	val64 = readq(&bar0->mc_rldram_mrs);
2256 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2257 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2258 	val64 = readq(&bar0->mc_rldram_mrs);
2259 
2260 	msleep(100);	/* Delay by around 100 ms. */
2261 
2262 	/* Enabling ECC Protection. */
2263 	val64 = readq(&bar0->adapter_control);
2264 	val64 &= ~ADAPTER_ECC_EN;
2265 	writeq(val64, &bar0->adapter_control);
2266 
2267 	/*
2268 	 * Verify if the device is ready to be enabled, if so enable
2269 	 * it.
2270 	 */
2271 	val64 = readq(&bar0->adapter_status);
2272 	if (!verify_xena_quiescence(nic)) {
2273 		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2274 			  "Adapter status reads: 0x%llx\n",
2275 			  dev->name, (unsigned long long)val64);
2276 		return FAILURE;
2277 	}
2278 
2279 	/*
2280 	 * With some switches, link might be already up at this point.
2281 	 * Because of this weird behavior, when we enable laser,
2282 	 * we may not get link. We need to handle this. We cannot
2283 	 * figure out which switch is misbehaving. So we are forced to
2284 	 * make a global change.
2285 	 */
2286 
2287 	/* Enabling Laser. */
2288 	val64 = readq(&bar0->adapter_control);
2289 	val64 |= ADAPTER_EOI_TX_ON;
2290 	writeq(val64, &bar0->adapter_control);
2291 
2292 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2293 		/*
2294 		 * Dont see link state interrupts initially on some switches,
2295 		 * so directly scheduling the link state task here.
2296 		 */
2297 		schedule_work(&nic->set_link_task);
2298 	}
2299 	/* SXE-002: Initialize link and activity LED */
2300 	subid = nic->pdev->subsystem_device;
2301 	if (((subid & 0xFF) >= 0x07) &&
2302 	    (nic->device_type == XFRAME_I_DEVICE)) {
2303 		val64 = readq(&bar0->gpio_control);
2304 		val64 |= 0x0000800000000000ULL;
2305 		writeq(val64, &bar0->gpio_control);
2306 		val64 = 0x0411040400000000ULL;
2307 		writeq(val64, (void __iomem *)bar0 + 0x2700);
2308 	}
2309 
2310 	return SUCCESS;
2311 }
2312 /**
2313  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2314  */
2315 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2316 					struct TxD *txdlp, int get_off)
2317 {
2318 	struct s2io_nic *nic = fifo_data->nic;
2319 	struct sk_buff *skb;
2320 	struct TxD *txds;
2321 	u16 j, frg_cnt;
2322 
2323 	txds = txdlp;
2324 	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2325 		pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2326 				 sizeof(u64), PCI_DMA_TODEVICE);
2327 		txds++;
2328 	}
2329 
2330 	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2331 	if (!skb) {
2332 		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2333 		return NULL;
2334 	}
2335 	pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2336 			 skb_headlen(skb), PCI_DMA_TODEVICE);
2337 	frg_cnt = skb_shinfo(skb)->nr_frags;
2338 	if (frg_cnt) {
2339 		txds++;
2340 		for (j = 0; j < frg_cnt; j++, txds++) {
2341 			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2342 			if (!txds->Buffer_Pointer)
2343 				break;
2344 			pci_unmap_page(nic->pdev,
2345 				       (dma_addr_t)txds->Buffer_Pointer,
2346 				       skb_frag_size(frag), PCI_DMA_TODEVICE);
2347 		}
2348 	}
2349 	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2350 	return skb;
2351 }
2352 
2353 /**
2354  *  free_tx_buffers - Free all queued Tx buffers
2355  *  @nic : device private variable.
2356  *  Description:
2357  *  Free all queued Tx buffers.
2358  *  Return Value: void
2359  */
2360 
2361 static void free_tx_buffers(struct s2io_nic *nic)
2362 {
2363 	struct net_device *dev = nic->dev;
2364 	struct sk_buff *skb;
2365 	struct TxD *txdp;
2366 	int i, j;
2367 	int cnt = 0;
2368 	struct config_param *config = &nic->config;
2369 	struct mac_info *mac_control = &nic->mac_control;
2370 	struct stat_block *stats = mac_control->stats_info;
2371 	struct swStat *swstats = &stats->sw_stat;
2372 
2373 	for (i = 0; i < config->tx_fifo_num; i++) {
2374 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2375 		struct fifo_info *fifo = &mac_control->fifos[i];
2376 		unsigned long flags;
2377 
2378 		spin_lock_irqsave(&fifo->tx_lock, flags);
2379 		for (j = 0; j < tx_cfg->fifo_len; j++) {
2380 			txdp = fifo->list_info[j].list_virt_addr;
2381 			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2382 			if (skb) {
2383 				swstats->mem_freed += skb->truesize;
2384 				dev_kfree_skb(skb);
2385 				cnt++;
2386 			}
2387 		}
2388 		DBG_PRINT(INTR_DBG,
2389 			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2390 			  dev->name, cnt, i);
2391 		fifo->tx_curr_get_info.offset = 0;
2392 		fifo->tx_curr_put_info.offset = 0;
2393 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2394 	}
2395 }
2396 
2397 /**
2398  *   stop_nic -  To stop the nic
2399  *   @nic ; device private variable.
2400  *   Description:
2401  *   This function does exactly the opposite of what the start_nic()
2402  *   function does. This function is called to stop the device.
2403  *   Return Value:
2404  *   void.
2405  */
2406 
2407 static void stop_nic(struct s2io_nic *nic)
2408 {
2409 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2410 	register u64 val64 = 0;
2411 	u16 interruptible;
2412 
2413 	/*  Disable all interrupts */
2414 	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2415 	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2416 	interruptible |= TX_PIC_INTR;
2417 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2418 
2419 	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2420 	val64 = readq(&bar0->adapter_control);
2421 	val64 &= ~(ADAPTER_CNTL_EN);
2422 	writeq(val64, &bar0->adapter_control);
2423 }
2424 
2425 /**
2426  *  fill_rx_buffers - Allocates the Rx side skbs
2427  *  @ring_info: per ring structure
2428  *  @from_card_up: If this is true, we will map the buffer to get
2429  *     the dma address for buf0 and buf1 to give it to the card.
2430  *     Else we will sync the already mapped buffer to give it to the card.
2431  *  Description:
2432  *  The function allocates Rx side skbs and puts the physical
2433  *  address of these buffers into the RxD buffer pointers, so that the NIC
2434  *  can DMA the received frame into these locations.
2435  *  The NIC supports 3 receive modes, viz
2436  *  1. single buffer,
2437  *  2. three buffer and
2438  *  3. Five buffer modes.
2439  *  Each mode defines how many fragments the received frame will be split
2440  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2441  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2442  *  is split into 3 fragments. As of now only single buffer mode is
2443  *  supported.
2444  *   Return Value:
2445  *  SUCCESS on success or an appropriate -ve value on failure.
2446  */
2447 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2448 			   int from_card_up)
2449 {
2450 	struct sk_buff *skb;
2451 	struct RxD_t *rxdp;
2452 	int off, size, block_no, block_no1;
2453 	u32 alloc_tab = 0;
2454 	u32 alloc_cnt;
2455 	u64 tmp;
2456 	struct buffAdd *ba;
2457 	struct RxD_t *first_rxdp = NULL;
2458 	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2459 	struct RxD1 *rxdp1;
2460 	struct RxD3 *rxdp3;
2461 	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2462 
2463 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2464 
2465 	block_no1 = ring->rx_curr_get_info.block_index;
2466 	while (alloc_tab < alloc_cnt) {
2467 		block_no = ring->rx_curr_put_info.block_index;
2468 
2469 		off = ring->rx_curr_put_info.offset;
2470 
2471 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2472 
2473 		if ((block_no == block_no1) &&
2474 		    (off == ring->rx_curr_get_info.offset) &&
2475 		    (rxdp->Host_Control)) {
2476 			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2477 				  ring->dev->name);
2478 			goto end;
2479 		}
2480 		if (off && (off == ring->rxd_count)) {
2481 			ring->rx_curr_put_info.block_index++;
2482 			if (ring->rx_curr_put_info.block_index ==
2483 			    ring->block_count)
2484 				ring->rx_curr_put_info.block_index = 0;
2485 			block_no = ring->rx_curr_put_info.block_index;
2486 			off = 0;
2487 			ring->rx_curr_put_info.offset = off;
2488 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2489 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2490 				  ring->dev->name, rxdp);
2491 
2492 		}
2493 
2494 		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2495 		    ((ring->rxd_mode == RXD_MODE_3B) &&
2496 		     (rxdp->Control_2 & s2BIT(0)))) {
2497 			ring->rx_curr_put_info.offset = off;
2498 			goto end;
2499 		}
2500 		/* calculate size of skb based on ring mode */
2501 		size = ring->mtu +
2502 			HEADER_ETHERNET_II_802_3_SIZE +
2503 			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2504 		if (ring->rxd_mode == RXD_MODE_1)
2505 			size += NET_IP_ALIGN;
2506 		else
2507 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2508 
2509 		/* allocate skb */
2510 		skb = netdev_alloc_skb(nic->dev, size);
2511 		if (!skb) {
2512 			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2513 				  ring->dev->name);
2514 			if (first_rxdp) {
2515 				dma_wmb();
2516 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2517 			}
2518 			swstats->mem_alloc_fail_cnt++;
2519 
2520 			return -ENOMEM ;
2521 		}
2522 		swstats->mem_allocated += skb->truesize;
2523 
2524 		if (ring->rxd_mode == RXD_MODE_1) {
2525 			/* 1 buffer mode - normal operation mode */
2526 			rxdp1 = (struct RxD1 *)rxdp;
2527 			memset(rxdp, 0, sizeof(struct RxD1));
2528 			skb_reserve(skb, NET_IP_ALIGN);
2529 			rxdp1->Buffer0_ptr =
2530 				pci_map_single(ring->pdev, skb->data,
2531 					       size - NET_IP_ALIGN,
2532 					       PCI_DMA_FROMDEVICE);
2533 			if (pci_dma_mapping_error(nic->pdev,
2534 						  rxdp1->Buffer0_ptr))
2535 				goto pci_map_failed;
2536 
2537 			rxdp->Control_2 =
2538 				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2539 			rxdp->Host_Control = (unsigned long)skb;
2540 		} else if (ring->rxd_mode == RXD_MODE_3B) {
2541 			/*
2542 			 * 2 buffer mode -
2543 			 * 2 buffer mode provides 128
2544 			 * byte aligned receive buffers.
2545 			 */
2546 
2547 			rxdp3 = (struct RxD3 *)rxdp;
2548 			/* save buffer pointers to avoid frequent dma mapping */
2549 			Buffer0_ptr = rxdp3->Buffer0_ptr;
2550 			Buffer1_ptr = rxdp3->Buffer1_ptr;
2551 			memset(rxdp, 0, sizeof(struct RxD3));
2552 			/* restore the buffer pointers for dma sync*/
2553 			rxdp3->Buffer0_ptr = Buffer0_ptr;
2554 			rxdp3->Buffer1_ptr = Buffer1_ptr;
2555 
2556 			ba = &ring->ba[block_no][off];
2557 			skb_reserve(skb, BUF0_LEN);
2558 			tmp = (u64)(unsigned long)skb->data;
2559 			tmp += ALIGN_SIZE;
2560 			tmp &= ~ALIGN_SIZE;
2561 			skb->data = (void *) (unsigned long)tmp;
2562 			skb_reset_tail_pointer(skb);
2563 
2564 			if (from_card_up) {
2565 				rxdp3->Buffer0_ptr =
2566 					pci_map_single(ring->pdev, ba->ba_0,
2567 						       BUF0_LEN,
2568 						       PCI_DMA_FROMDEVICE);
2569 				if (pci_dma_mapping_error(nic->pdev,
2570 							  rxdp3->Buffer0_ptr))
2571 					goto pci_map_failed;
2572 			} else
2573 				pci_dma_sync_single_for_device(ring->pdev,
2574 							       (dma_addr_t)rxdp3->Buffer0_ptr,
2575 							       BUF0_LEN,
2576 							       PCI_DMA_FROMDEVICE);
2577 
2578 			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2579 			if (ring->rxd_mode == RXD_MODE_3B) {
2580 				/* Two buffer mode */
2581 
2582 				/*
2583 				 * Buffer2 will have L3/L4 header plus
2584 				 * L4 payload
2585 				 */
2586 				rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2587 								    skb->data,
2588 								    ring->mtu + 4,
2589 								    PCI_DMA_FROMDEVICE);
2590 
2591 				if (pci_dma_mapping_error(nic->pdev,
2592 							  rxdp3->Buffer2_ptr))
2593 					goto pci_map_failed;
2594 
2595 				if (from_card_up) {
2596 					rxdp3->Buffer1_ptr =
2597 						pci_map_single(ring->pdev,
2598 							       ba->ba_1,
2599 							       BUF1_LEN,
2600 							       PCI_DMA_FROMDEVICE);
2601 
2602 					if (pci_dma_mapping_error(nic->pdev,
2603 								  rxdp3->Buffer1_ptr)) {
2604 						pci_unmap_single(ring->pdev,
2605 								 (dma_addr_t)(unsigned long)
2606 								 skb->data,
2607 								 ring->mtu + 4,
2608 								 PCI_DMA_FROMDEVICE);
2609 						goto pci_map_failed;
2610 					}
2611 				}
2612 				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2613 				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2614 					(ring->mtu + 4);
2615 			}
2616 			rxdp->Control_2 |= s2BIT(0);
2617 			rxdp->Host_Control = (unsigned long) (skb);
2618 		}
2619 		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2620 			rxdp->Control_1 |= RXD_OWN_XENA;
2621 		off++;
2622 		if (off == (ring->rxd_count + 1))
2623 			off = 0;
2624 		ring->rx_curr_put_info.offset = off;
2625 
2626 		rxdp->Control_2 |= SET_RXD_MARKER;
2627 		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2628 			if (first_rxdp) {
2629 				dma_wmb();
2630 				first_rxdp->Control_1 |= RXD_OWN_XENA;
2631 			}
2632 			first_rxdp = rxdp;
2633 		}
2634 		ring->rx_bufs_left += 1;
2635 		alloc_tab++;
2636 	}
2637 
2638 end:
2639 	/* Transfer ownership of first descriptor to adapter just before
2640 	 * exiting. Before that, use memory barrier so that ownership
2641 	 * and other fields are seen by adapter correctly.
2642 	 */
2643 	if (first_rxdp) {
2644 		dma_wmb();
2645 		first_rxdp->Control_1 |= RXD_OWN_XENA;
2646 	}
2647 
2648 	return SUCCESS;
2649 
2650 pci_map_failed:
2651 	swstats->pci_map_fail_cnt++;
2652 	swstats->mem_freed += skb->truesize;
2653 	dev_kfree_skb_irq(skb);
2654 	return -ENOMEM;
2655 }
2656 
2657 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2658 {
2659 	struct net_device *dev = sp->dev;
2660 	int j;
2661 	struct sk_buff *skb;
2662 	struct RxD_t *rxdp;
2663 	struct RxD1 *rxdp1;
2664 	struct RxD3 *rxdp3;
2665 	struct mac_info *mac_control = &sp->mac_control;
2666 	struct stat_block *stats = mac_control->stats_info;
2667 	struct swStat *swstats = &stats->sw_stat;
2668 
2669 	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2670 		rxdp = mac_control->rings[ring_no].
2671 			rx_blocks[blk].rxds[j].virt_addr;
2672 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2673 		if (!skb)
2674 			continue;
2675 		if (sp->rxd_mode == RXD_MODE_1) {
2676 			rxdp1 = (struct RxD1 *)rxdp;
2677 			pci_unmap_single(sp->pdev,
2678 					 (dma_addr_t)rxdp1->Buffer0_ptr,
2679 					 dev->mtu +
2680 					 HEADER_ETHERNET_II_802_3_SIZE +
2681 					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2682 					 PCI_DMA_FROMDEVICE);
2683 			memset(rxdp, 0, sizeof(struct RxD1));
2684 		} else if (sp->rxd_mode == RXD_MODE_3B) {
2685 			rxdp3 = (struct RxD3 *)rxdp;
2686 			pci_unmap_single(sp->pdev,
2687 					 (dma_addr_t)rxdp3->Buffer0_ptr,
2688 					 BUF0_LEN,
2689 					 PCI_DMA_FROMDEVICE);
2690 			pci_unmap_single(sp->pdev,
2691 					 (dma_addr_t)rxdp3->Buffer1_ptr,
2692 					 BUF1_LEN,
2693 					 PCI_DMA_FROMDEVICE);
2694 			pci_unmap_single(sp->pdev,
2695 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2696 					 dev->mtu + 4,
2697 					 PCI_DMA_FROMDEVICE);
2698 			memset(rxdp, 0, sizeof(struct RxD3));
2699 		}
2700 		swstats->mem_freed += skb->truesize;
2701 		dev_kfree_skb(skb);
2702 		mac_control->rings[ring_no].rx_bufs_left -= 1;
2703 	}
2704 }
2705 
2706 /**
2707  *  free_rx_buffers - Frees all Rx buffers
2708  *  @sp: device private variable.
2709  *  Description:
2710  *  This function will free all Rx buffers allocated by host.
2711  *  Return Value:
2712  *  NONE.
2713  */
2714 
2715 static void free_rx_buffers(struct s2io_nic *sp)
2716 {
2717 	struct net_device *dev = sp->dev;
2718 	int i, blk = 0, buf_cnt = 0;
2719 	struct config_param *config = &sp->config;
2720 	struct mac_info *mac_control = &sp->mac_control;
2721 
2722 	for (i = 0; i < config->rx_ring_num; i++) {
2723 		struct ring_info *ring = &mac_control->rings[i];
2724 
2725 		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2726 			free_rxd_blk(sp, i, blk);
2727 
2728 		ring->rx_curr_put_info.block_index = 0;
2729 		ring->rx_curr_get_info.block_index = 0;
2730 		ring->rx_curr_put_info.offset = 0;
2731 		ring->rx_curr_get_info.offset = 0;
2732 		ring->rx_bufs_left = 0;
2733 		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2734 			  dev->name, buf_cnt, i);
2735 	}
2736 }
2737 
2738 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2739 {
2740 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2741 		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2742 			  ring->dev->name);
2743 	}
2744 	return 0;
2745 }
2746 
2747 /**
2748  * s2io_poll - Rx interrupt handler for NAPI support
2749  * @napi : pointer to the napi structure.
2750  * @budget : The number of packets that were budgeted to be processed
2751  * during  one pass through the 'Poll" function.
2752  * Description:
2753  * Comes into picture only if NAPI support has been incorporated. It does
2754  * the same thing that rx_intr_handler does, but not in a interrupt context
2755  * also It will process only a given number of packets.
2756  * Return value:
2757  * 0 on success and 1 if there are No Rx packets to be processed.
2758  */
2759 
2760 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2761 {
2762 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2763 	struct net_device *dev = ring->dev;
2764 	int pkts_processed = 0;
2765 	u8 __iomem *addr = NULL;
2766 	u8 val8 = 0;
2767 	struct s2io_nic *nic = netdev_priv(dev);
2768 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2769 	int budget_org = budget;
2770 
2771 	if (unlikely(!is_s2io_card_up(nic)))
2772 		return 0;
2773 
2774 	pkts_processed = rx_intr_handler(ring, budget);
2775 	s2io_chk_rx_buffers(nic, ring);
2776 
2777 	if (pkts_processed < budget_org) {
2778 		napi_complete_done(napi, pkts_processed);
2779 		/*Re Enable MSI-Rx Vector*/
2780 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2781 		addr += 7 - ring->ring_no;
2782 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2783 		writeb(val8, addr);
2784 		val8 = readb(addr);
2785 	}
2786 	return pkts_processed;
2787 }
2788 
2789 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2790 {
2791 	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2792 	int pkts_processed = 0;
2793 	int ring_pkts_processed, i;
2794 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2795 	int budget_org = budget;
2796 	struct config_param *config = &nic->config;
2797 	struct mac_info *mac_control = &nic->mac_control;
2798 
2799 	if (unlikely(!is_s2io_card_up(nic)))
2800 		return 0;
2801 
2802 	for (i = 0; i < config->rx_ring_num; i++) {
2803 		struct ring_info *ring = &mac_control->rings[i];
2804 		ring_pkts_processed = rx_intr_handler(ring, budget);
2805 		s2io_chk_rx_buffers(nic, ring);
2806 		pkts_processed += ring_pkts_processed;
2807 		budget -= ring_pkts_processed;
2808 		if (budget <= 0)
2809 			break;
2810 	}
2811 	if (pkts_processed < budget_org) {
2812 		napi_complete_done(napi, pkts_processed);
2813 		/* Re enable the Rx interrupts for the ring */
2814 		writeq(0, &bar0->rx_traffic_mask);
2815 		readl(&bar0->rx_traffic_mask);
2816 	}
2817 	return pkts_processed;
2818 }
2819 
2820 #ifdef CONFIG_NET_POLL_CONTROLLER
2821 /**
2822  * s2io_netpoll - netpoll event handler entry point
2823  * @dev : pointer to the device structure.
2824  * Description:
2825  * 	This function will be called by upper layer to check for events on the
2826  * interface in situations where interrupts are disabled. It is used for
2827  * specific in-kernel networking tasks, such as remote consoles and kernel
2828  * debugging over the network (example netdump in RedHat).
2829  */
2830 static void s2io_netpoll(struct net_device *dev)
2831 {
2832 	struct s2io_nic *nic = netdev_priv(dev);
2833 	const int irq = nic->pdev->irq;
2834 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2835 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2836 	int i;
2837 	struct config_param *config = &nic->config;
2838 	struct mac_info *mac_control = &nic->mac_control;
2839 
2840 	if (pci_channel_offline(nic->pdev))
2841 		return;
2842 
2843 	disable_irq(irq);
2844 
2845 	writeq(val64, &bar0->rx_traffic_int);
2846 	writeq(val64, &bar0->tx_traffic_int);
2847 
2848 	/* we need to free up the transmitted skbufs or else netpoll will
2849 	 * run out of skbs and will fail and eventually netpoll application such
2850 	 * as netdump will fail.
2851 	 */
2852 	for (i = 0; i < config->tx_fifo_num; i++)
2853 		tx_intr_handler(&mac_control->fifos[i]);
2854 
2855 	/* check for received packet and indicate up to network */
2856 	for (i = 0; i < config->rx_ring_num; i++) {
2857 		struct ring_info *ring = &mac_control->rings[i];
2858 
2859 		rx_intr_handler(ring, 0);
2860 	}
2861 
2862 	for (i = 0; i < config->rx_ring_num; i++) {
2863 		struct ring_info *ring = &mac_control->rings[i];
2864 
2865 		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2866 			DBG_PRINT(INFO_DBG,
2867 				  "%s: Out of memory in Rx Netpoll!!\n",
2868 				  dev->name);
2869 			break;
2870 		}
2871 	}
2872 	enable_irq(irq);
2873 }
2874 #endif
2875 
2876 /**
2877  *  rx_intr_handler - Rx interrupt handler
2878  *  @ring_info: per ring structure.
2879  *  @budget: budget for napi processing.
2880  *  Description:
2881  *  If the interrupt is because of a received frame or if the
2882  *  receive ring contains fresh as yet un-processed frames,this function is
2883  *  called. It picks out the RxD at which place the last Rx processing had
2884  *  stopped and sends the skb to the OSM's Rx handler and then increments
2885  *  the offset.
2886  *  Return Value:
2887  *  No. of napi packets processed.
2888  */
2889 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2890 {
2891 	int get_block, put_block;
2892 	struct rx_curr_get_info get_info, put_info;
2893 	struct RxD_t *rxdp;
2894 	struct sk_buff *skb;
2895 	int pkt_cnt = 0, napi_pkts = 0;
2896 	int i;
2897 	struct RxD1 *rxdp1;
2898 	struct RxD3 *rxdp3;
2899 
2900 	if (budget <= 0)
2901 		return napi_pkts;
2902 
2903 	get_info = ring_data->rx_curr_get_info;
2904 	get_block = get_info.block_index;
2905 	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2906 	put_block = put_info.block_index;
2907 	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2908 
2909 	while (RXD_IS_UP2DT(rxdp)) {
2910 		/*
2911 		 * If your are next to put index then it's
2912 		 * FIFO full condition
2913 		 */
2914 		if ((get_block == put_block) &&
2915 		    (get_info.offset + 1) == put_info.offset) {
2916 			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2917 				  ring_data->dev->name);
2918 			break;
2919 		}
2920 		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2921 		if (skb == NULL) {
2922 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2923 				  ring_data->dev->name);
2924 			return 0;
2925 		}
2926 		if (ring_data->rxd_mode == RXD_MODE_1) {
2927 			rxdp1 = (struct RxD1 *)rxdp;
2928 			pci_unmap_single(ring_data->pdev, (dma_addr_t)
2929 					 rxdp1->Buffer0_ptr,
2930 					 ring_data->mtu +
2931 					 HEADER_ETHERNET_II_802_3_SIZE +
2932 					 HEADER_802_2_SIZE +
2933 					 HEADER_SNAP_SIZE,
2934 					 PCI_DMA_FROMDEVICE);
2935 		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2936 			rxdp3 = (struct RxD3 *)rxdp;
2937 			pci_dma_sync_single_for_cpu(ring_data->pdev,
2938 						    (dma_addr_t)rxdp3->Buffer0_ptr,
2939 						    BUF0_LEN,
2940 						    PCI_DMA_FROMDEVICE);
2941 			pci_unmap_single(ring_data->pdev,
2942 					 (dma_addr_t)rxdp3->Buffer2_ptr,
2943 					 ring_data->mtu + 4,
2944 					 PCI_DMA_FROMDEVICE);
2945 		}
2946 		prefetch(skb->data);
2947 		rx_osm_handler(ring_data, rxdp);
2948 		get_info.offset++;
2949 		ring_data->rx_curr_get_info.offset = get_info.offset;
2950 		rxdp = ring_data->rx_blocks[get_block].
2951 			rxds[get_info.offset].virt_addr;
2952 		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2953 			get_info.offset = 0;
2954 			ring_data->rx_curr_get_info.offset = get_info.offset;
2955 			get_block++;
2956 			if (get_block == ring_data->block_count)
2957 				get_block = 0;
2958 			ring_data->rx_curr_get_info.block_index = get_block;
2959 			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2960 		}
2961 
2962 		if (ring_data->nic->config.napi) {
2963 			budget--;
2964 			napi_pkts++;
2965 			if (!budget)
2966 				break;
2967 		}
2968 		pkt_cnt++;
2969 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2970 			break;
2971 	}
2972 	if (ring_data->lro) {
2973 		/* Clear all LRO sessions before exiting */
2974 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2975 			struct lro *lro = &ring_data->lro0_n[i];
2976 			if (lro->in_use) {
2977 				update_L3L4_header(ring_data->nic, lro);
2978 				queue_rx_frame(lro->parent, lro->vlan_tag);
2979 				clear_lro_session(lro);
2980 			}
2981 		}
2982 	}
2983 	return napi_pkts;
2984 }
2985 
2986 /**
2987  *  tx_intr_handler - Transmit interrupt handler
2988  *  @nic : device private variable
2989  *  Description:
2990  *  If an interrupt was raised to indicate DMA complete of the
2991  *  Tx packet, this function is called. It identifies the last TxD
2992  *  whose buffer was freed and frees all skbs whose data have already
2993  *  DMA'ed into the NICs internal memory.
2994  *  Return Value:
2995  *  NONE
2996  */
2997 
2998 static void tx_intr_handler(struct fifo_info *fifo_data)
2999 {
3000 	struct s2io_nic *nic = fifo_data->nic;
3001 	struct tx_curr_get_info get_info, put_info;
3002 	struct sk_buff *skb = NULL;
3003 	struct TxD *txdlp;
3004 	int pkt_cnt = 0;
3005 	unsigned long flags = 0;
3006 	u8 err_mask;
3007 	struct stat_block *stats = nic->mac_control.stats_info;
3008 	struct swStat *swstats = &stats->sw_stat;
3009 
3010 	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3011 		return;
3012 
3013 	get_info = fifo_data->tx_curr_get_info;
3014 	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3015 	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3016 	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3017 	       (get_info.offset != put_info.offset) &&
3018 	       (txdlp->Host_Control)) {
3019 		/* Check for TxD errors */
3020 		if (txdlp->Control_1 & TXD_T_CODE) {
3021 			unsigned long long err;
3022 			err = txdlp->Control_1 & TXD_T_CODE;
3023 			if (err & 0x1) {
3024 				swstats->parity_err_cnt++;
3025 			}
3026 
3027 			/* update t_code statistics */
3028 			err_mask = err >> 48;
3029 			switch (err_mask) {
3030 			case 2:
3031 				swstats->tx_buf_abort_cnt++;
3032 				break;
3033 
3034 			case 3:
3035 				swstats->tx_desc_abort_cnt++;
3036 				break;
3037 
3038 			case 7:
3039 				swstats->tx_parity_err_cnt++;
3040 				break;
3041 
3042 			case 10:
3043 				swstats->tx_link_loss_cnt++;
3044 				break;
3045 
3046 			case 15:
3047 				swstats->tx_list_proc_err_cnt++;
3048 				break;
3049 			}
3050 		}
3051 
3052 		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3053 		if (skb == NULL) {
3054 			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3055 			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3056 				  __func__);
3057 			return;
3058 		}
3059 		pkt_cnt++;
3060 
3061 		/* Updating the statistics block */
3062 		swstats->mem_freed += skb->truesize;
3063 		dev_kfree_skb_irq(skb);
3064 
3065 		get_info.offset++;
3066 		if (get_info.offset == get_info.fifo_len + 1)
3067 			get_info.offset = 0;
3068 		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3069 		fifo_data->tx_curr_get_info.offset = get_info.offset;
3070 	}
3071 
3072 	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3073 
3074 	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3075 }
3076 
3077 /**
3078  *  s2io_mdio_write - Function to write in to MDIO registers
3079  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3080  *  @addr     : address value
3081  *  @value    : data value
3082  *  @dev      : pointer to net_device structure
3083  *  Description:
3084  *  This function is used to write values to the MDIO registers
3085  *  NONE
3086  */
3087 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3088 			    struct net_device *dev)
3089 {
3090 	u64 val64;
3091 	struct s2io_nic *sp = netdev_priv(dev);
3092 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3093 
3094 	/* address transaction */
3095 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3096 		MDIO_MMD_DEV_ADDR(mmd_type) |
3097 		MDIO_MMS_PRT_ADDR(0x0);
3098 	writeq(val64, &bar0->mdio_control);
3099 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3100 	writeq(val64, &bar0->mdio_control);
3101 	udelay(100);
3102 
3103 	/* Data transaction */
3104 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3105 		MDIO_MMD_DEV_ADDR(mmd_type) |
3106 		MDIO_MMS_PRT_ADDR(0x0) |
3107 		MDIO_MDIO_DATA(value) |
3108 		MDIO_OP(MDIO_OP_WRITE_TRANS);
3109 	writeq(val64, &bar0->mdio_control);
3110 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3111 	writeq(val64, &bar0->mdio_control);
3112 	udelay(100);
3113 
3114 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3115 		MDIO_MMD_DEV_ADDR(mmd_type) |
3116 		MDIO_MMS_PRT_ADDR(0x0) |
3117 		MDIO_OP(MDIO_OP_READ_TRANS);
3118 	writeq(val64, &bar0->mdio_control);
3119 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3120 	writeq(val64, &bar0->mdio_control);
3121 	udelay(100);
3122 }
3123 
3124 /**
3125  *  s2io_mdio_read - Function to write in to MDIO registers
3126  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3127  *  @addr     : address value
3128  *  @dev      : pointer to net_device structure
3129  *  Description:
3130  *  This function is used to read values to the MDIO registers
3131  *  NONE
3132  */
3133 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3134 {
3135 	u64 val64 = 0x0;
3136 	u64 rval64 = 0x0;
3137 	struct s2io_nic *sp = netdev_priv(dev);
3138 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3139 
3140 	/* address transaction */
3141 	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3142 			 | MDIO_MMD_DEV_ADDR(mmd_type)
3143 			 | MDIO_MMS_PRT_ADDR(0x0));
3144 	writeq(val64, &bar0->mdio_control);
3145 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3146 	writeq(val64, &bar0->mdio_control);
3147 	udelay(100);
3148 
3149 	/* Data transaction */
3150 	val64 = MDIO_MMD_INDX_ADDR(addr) |
3151 		MDIO_MMD_DEV_ADDR(mmd_type) |
3152 		MDIO_MMS_PRT_ADDR(0x0) |
3153 		MDIO_OP(MDIO_OP_READ_TRANS);
3154 	writeq(val64, &bar0->mdio_control);
3155 	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3156 	writeq(val64, &bar0->mdio_control);
3157 	udelay(100);
3158 
3159 	/* Read the value from regs */
3160 	rval64 = readq(&bar0->mdio_control);
3161 	rval64 = rval64 & 0xFFFF0000;
3162 	rval64 = rval64 >> 16;
3163 	return rval64;
3164 }
3165 
3166 /**
3167  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3168  *  @counter      : counter value to be updated
3169  *  @flag         : flag to indicate the status
3170  *  @type         : counter type
3171  *  Description:
3172  *  This function is to check the status of the xpak counters value
3173  *  NONE
3174  */
3175 
3176 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3177 				  u16 flag, u16 type)
3178 {
3179 	u64 mask = 0x3;
3180 	u64 val64;
3181 	int i;
3182 	for (i = 0; i < index; i++)
3183 		mask = mask << 0x2;
3184 
3185 	if (flag > 0) {
3186 		*counter = *counter + 1;
3187 		val64 = *regs_stat & mask;
3188 		val64 = val64 >> (index * 0x2);
3189 		val64 = val64 + 1;
3190 		if (val64 == 3) {
3191 			switch (type) {
3192 			case 1:
3193 				DBG_PRINT(ERR_DBG,
3194 					  "Take Xframe NIC out of service.\n");
3195 				DBG_PRINT(ERR_DBG,
3196 "Excessive temperatures may result in premature transceiver failure.\n");
3197 				break;
3198 			case 2:
3199 				DBG_PRINT(ERR_DBG,
3200 					  "Take Xframe NIC out of service.\n");
3201 				DBG_PRINT(ERR_DBG,
3202 "Excessive bias currents may indicate imminent laser diode failure.\n");
3203 				break;
3204 			case 3:
3205 				DBG_PRINT(ERR_DBG,
3206 					  "Take Xframe NIC out of service.\n");
3207 				DBG_PRINT(ERR_DBG,
3208 "Excessive laser output power may saturate far-end receiver.\n");
3209 				break;
3210 			default:
3211 				DBG_PRINT(ERR_DBG,
3212 					  "Incorrect XPAK Alarm type\n");
3213 			}
3214 			val64 = 0x0;
3215 		}
3216 		val64 = val64 << (index * 0x2);
3217 		*regs_stat = (*regs_stat & (~mask)) | (val64);
3218 
3219 	} else {
3220 		*regs_stat = *regs_stat & (~mask);
3221 	}
3222 }
3223 
3224 /**
3225  *  s2io_updt_xpak_counter - Function to update the xpak counters
3226  *  @dev         : pointer to net_device struct
3227  *  Description:
3228  *  This function is to upate the status of the xpak counters value
3229  *  NONE
3230  */
3231 static void s2io_updt_xpak_counter(struct net_device *dev)
3232 {
3233 	u16 flag  = 0x0;
3234 	u16 type  = 0x0;
3235 	u16 val16 = 0x0;
3236 	u64 val64 = 0x0;
3237 	u64 addr  = 0x0;
3238 
3239 	struct s2io_nic *sp = netdev_priv(dev);
3240 	struct stat_block *stats = sp->mac_control.stats_info;
3241 	struct xpakStat *xstats = &stats->xpak_stat;
3242 
3243 	/* Check the communication with the MDIO slave */
3244 	addr = MDIO_CTRL1;
3245 	val64 = 0x0;
3246 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3247 	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3248 		DBG_PRINT(ERR_DBG,
3249 			  "ERR: MDIO slave access failed - Returned %llx\n",
3250 			  (unsigned long long)val64);
3251 		return;
3252 	}
3253 
3254 	/* Check for the expected value of control reg 1 */
3255 	if (val64 != MDIO_CTRL1_SPEED10G) {
3256 		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3257 			  "Returned: %llx- Expected: 0x%x\n",
3258 			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3259 		return;
3260 	}
3261 
3262 	/* Loading the DOM register to MDIO register */
3263 	addr = 0xA100;
3264 	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3265 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3266 
3267 	/* Reading the Alarm flags */
3268 	addr = 0xA070;
3269 	val64 = 0x0;
3270 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3271 
3272 	flag = CHECKBIT(val64, 0x7);
3273 	type = 1;
3274 	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3275 			      &xstats->xpak_regs_stat,
3276 			      0x0, flag, type);
3277 
3278 	if (CHECKBIT(val64, 0x6))
3279 		xstats->alarm_transceiver_temp_low++;
3280 
3281 	flag = CHECKBIT(val64, 0x3);
3282 	type = 2;
3283 	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3284 			      &xstats->xpak_regs_stat,
3285 			      0x2, flag, type);
3286 
3287 	if (CHECKBIT(val64, 0x2))
3288 		xstats->alarm_laser_bias_current_low++;
3289 
3290 	flag = CHECKBIT(val64, 0x1);
3291 	type = 3;
3292 	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3293 			      &xstats->xpak_regs_stat,
3294 			      0x4, flag, type);
3295 
3296 	if (CHECKBIT(val64, 0x0))
3297 		xstats->alarm_laser_output_power_low++;
3298 
3299 	/* Reading the Warning flags */
3300 	addr = 0xA074;
3301 	val64 = 0x0;
3302 	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3303 
3304 	if (CHECKBIT(val64, 0x7))
3305 		xstats->warn_transceiver_temp_high++;
3306 
3307 	if (CHECKBIT(val64, 0x6))
3308 		xstats->warn_transceiver_temp_low++;
3309 
3310 	if (CHECKBIT(val64, 0x3))
3311 		xstats->warn_laser_bias_current_high++;
3312 
3313 	if (CHECKBIT(val64, 0x2))
3314 		xstats->warn_laser_bias_current_low++;
3315 
3316 	if (CHECKBIT(val64, 0x1))
3317 		xstats->warn_laser_output_power_high++;
3318 
3319 	if (CHECKBIT(val64, 0x0))
3320 		xstats->warn_laser_output_power_low++;
3321 }
3322 
3323 /**
3324  *  wait_for_cmd_complete - waits for a command to complete.
3325  *  @sp : private member of the device structure, which is a pointer to the
3326  *  s2io_nic structure.
3327  *  Description: Function that waits for a command to Write into RMAC
3328  *  ADDR DATA registers to be completed and returns either success or
3329  *  error depending on whether the command was complete or not.
3330  *  Return value:
3331  *   SUCCESS on success and FAILURE on failure.
3332  */
3333 
3334 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3335 				 int bit_state)
3336 {
3337 	int ret = FAILURE, cnt = 0, delay = 1;
3338 	u64 val64;
3339 
3340 	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3341 		return FAILURE;
3342 
3343 	do {
3344 		val64 = readq(addr);
3345 		if (bit_state == S2IO_BIT_RESET) {
3346 			if (!(val64 & busy_bit)) {
3347 				ret = SUCCESS;
3348 				break;
3349 			}
3350 		} else {
3351 			if (val64 & busy_bit) {
3352 				ret = SUCCESS;
3353 				break;
3354 			}
3355 		}
3356 
3357 		if (in_interrupt())
3358 			mdelay(delay);
3359 		else
3360 			msleep(delay);
3361 
3362 		if (++cnt >= 10)
3363 			delay = 50;
3364 	} while (cnt < 20);
3365 	return ret;
3366 }
3367 /**
3368  * check_pci_device_id - Checks if the device id is supported
3369  * @id : device id
3370  * Description: Function to check if the pci device id is supported by driver.
3371  * Return value: Actual device id if supported else PCI_ANY_ID
3372  */
3373 static u16 check_pci_device_id(u16 id)
3374 {
3375 	switch (id) {
3376 	case PCI_DEVICE_ID_HERC_WIN:
3377 	case PCI_DEVICE_ID_HERC_UNI:
3378 		return XFRAME_II_DEVICE;
3379 	case PCI_DEVICE_ID_S2IO_UNI:
3380 	case PCI_DEVICE_ID_S2IO_WIN:
3381 		return XFRAME_I_DEVICE;
3382 	default:
3383 		return PCI_ANY_ID;
3384 	}
3385 }
3386 
3387 /**
3388  *  s2io_reset - Resets the card.
3389  *  @sp : private member of the device structure.
3390  *  Description: Function to Reset the card. This function then also
3391  *  restores the previously saved PCI configuration space registers as
3392  *  the card reset also resets the configuration space.
3393  *  Return value:
3394  *  void.
3395  */
3396 
3397 static void s2io_reset(struct s2io_nic *sp)
3398 {
3399 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3400 	u64 val64;
3401 	u16 subid, pci_cmd;
3402 	int i;
3403 	u16 val16;
3404 	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3405 	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3406 	struct stat_block *stats;
3407 	struct swStat *swstats;
3408 
3409 	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3410 		  __func__, pci_name(sp->pdev));
3411 
3412 	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3413 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3414 
3415 	val64 = SW_RESET_ALL;
3416 	writeq(val64, &bar0->sw_reset);
3417 	if (strstr(sp->product_name, "CX4"))
3418 		msleep(750);
3419 	msleep(250);
3420 	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3421 
3422 		/* Restore the PCI state saved during initialization. */
3423 		pci_restore_state(sp->pdev);
3424 		pci_save_state(sp->pdev);
3425 		pci_read_config_word(sp->pdev, 0x2, &val16);
3426 		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3427 			break;
3428 		msleep(200);
3429 	}
3430 
3431 	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3432 		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3433 
3434 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3435 
3436 	s2io_init_pci(sp);
3437 
3438 	/* Set swapper to enable I/O register access */
3439 	s2io_set_swapper(sp);
3440 
3441 	/* restore mac_addr entries */
3442 	do_s2io_restore_unicast_mc(sp);
3443 
3444 	/* Restore the MSIX table entries from local variables */
3445 	restore_xmsi_data(sp);
3446 
3447 	/* Clear certain PCI/PCI-X fields after reset */
3448 	if (sp->device_type == XFRAME_II_DEVICE) {
3449 		/* Clear "detected parity error" bit */
3450 		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3451 
3452 		/* Clearing PCIX Ecc status register */
3453 		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3454 
3455 		/* Clearing PCI_STATUS error reflected here */
3456 		writeq(s2BIT(62), &bar0->txpic_int_reg);
3457 	}
3458 
3459 	/* Reset device statistics maintained by OS */
3460 	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3461 
3462 	stats = sp->mac_control.stats_info;
3463 	swstats = &stats->sw_stat;
3464 
3465 	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3466 	up_cnt = swstats->link_up_cnt;
3467 	down_cnt = swstats->link_down_cnt;
3468 	up_time = swstats->link_up_time;
3469 	down_time = swstats->link_down_time;
3470 	reset_cnt = swstats->soft_reset_cnt;
3471 	mem_alloc_cnt = swstats->mem_allocated;
3472 	mem_free_cnt = swstats->mem_freed;
3473 	watchdog_cnt = swstats->watchdog_timer_cnt;
3474 
3475 	memset(stats, 0, sizeof(struct stat_block));
3476 
3477 	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3478 	swstats->link_up_cnt = up_cnt;
3479 	swstats->link_down_cnt = down_cnt;
3480 	swstats->link_up_time = up_time;
3481 	swstats->link_down_time = down_time;
3482 	swstats->soft_reset_cnt = reset_cnt;
3483 	swstats->mem_allocated = mem_alloc_cnt;
3484 	swstats->mem_freed = mem_free_cnt;
3485 	swstats->watchdog_timer_cnt = watchdog_cnt;
3486 
3487 	/* SXE-002: Configure link and activity LED to turn it off */
3488 	subid = sp->pdev->subsystem_device;
3489 	if (((subid & 0xFF) >= 0x07) &&
3490 	    (sp->device_type == XFRAME_I_DEVICE)) {
3491 		val64 = readq(&bar0->gpio_control);
3492 		val64 |= 0x0000800000000000ULL;
3493 		writeq(val64, &bar0->gpio_control);
3494 		val64 = 0x0411040400000000ULL;
3495 		writeq(val64, (void __iomem *)bar0 + 0x2700);
3496 	}
3497 
3498 	/*
3499 	 * Clear spurious ECC interrupts that would have occurred on
3500 	 * XFRAME II cards after reset.
3501 	 */
3502 	if (sp->device_type == XFRAME_II_DEVICE) {
3503 		val64 = readq(&bar0->pcc_err_reg);
3504 		writeq(val64, &bar0->pcc_err_reg);
3505 	}
3506 
3507 	sp->device_enabled_once = false;
3508 }
3509 
3510 /**
3511  *  s2io_set_swapper - to set the swapper controle on the card
3512  *  @sp : private member of the device structure,
3513  *  pointer to the s2io_nic structure.
3514  *  Description: Function to set the swapper control on the card
3515  *  correctly depending on the 'endianness' of the system.
3516  *  Return value:
3517  *  SUCCESS on success and FAILURE on failure.
3518  */
3519 
3520 static int s2io_set_swapper(struct s2io_nic *sp)
3521 {
3522 	struct net_device *dev = sp->dev;
3523 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3524 	u64 val64, valt, valr;
3525 
3526 	/*
3527 	 * Set proper endian settings and verify the same by reading
3528 	 * the PIF Feed-back register.
3529 	 */
3530 
3531 	val64 = readq(&bar0->pif_rd_swapper_fb);
3532 	if (val64 != 0x0123456789ABCDEFULL) {
3533 		int i = 0;
3534 		static const u64 value[] = {
3535 			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3536 			0x8100008181000081ULL,	/* FE=1, SE=0 */
3537 			0x4200004242000042ULL,	/* FE=0, SE=1 */
3538 			0			/* FE=0, SE=0 */
3539 		};
3540 
3541 		while (i < 4) {
3542 			writeq(value[i], &bar0->swapper_ctrl);
3543 			val64 = readq(&bar0->pif_rd_swapper_fb);
3544 			if (val64 == 0x0123456789ABCDEFULL)
3545 				break;
3546 			i++;
3547 		}
3548 		if (i == 4) {
3549 			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3550 				  "feedback read %llx\n",
3551 				  dev->name, (unsigned long long)val64);
3552 			return FAILURE;
3553 		}
3554 		valr = value[i];
3555 	} else {
3556 		valr = readq(&bar0->swapper_ctrl);
3557 	}
3558 
3559 	valt = 0x0123456789ABCDEFULL;
3560 	writeq(valt, &bar0->xmsi_address);
3561 	val64 = readq(&bar0->xmsi_address);
3562 
3563 	if (val64 != valt) {
3564 		int i = 0;
3565 		static const u64 value[] = {
3566 			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3567 			0x0081810000818100ULL,	/* FE=1, SE=0 */
3568 			0x0042420000424200ULL,	/* FE=0, SE=1 */
3569 			0			/* FE=0, SE=0 */
3570 		};
3571 
3572 		while (i < 4) {
3573 			writeq((value[i] | valr), &bar0->swapper_ctrl);
3574 			writeq(valt, &bar0->xmsi_address);
3575 			val64 = readq(&bar0->xmsi_address);
3576 			if (val64 == valt)
3577 				break;
3578 			i++;
3579 		}
3580 		if (i == 4) {
3581 			unsigned long long x = val64;
3582 			DBG_PRINT(ERR_DBG,
3583 				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3584 			return FAILURE;
3585 		}
3586 	}
3587 	val64 = readq(&bar0->swapper_ctrl);
3588 	val64 &= 0xFFFF000000000000ULL;
3589 
3590 #ifdef __BIG_ENDIAN
3591 	/*
3592 	 * The device by default set to a big endian format, so a
3593 	 * big endian driver need not set anything.
3594 	 */
3595 	val64 |= (SWAPPER_CTRL_TXP_FE |
3596 		  SWAPPER_CTRL_TXP_SE |
3597 		  SWAPPER_CTRL_TXD_R_FE |
3598 		  SWAPPER_CTRL_TXD_W_FE |
3599 		  SWAPPER_CTRL_TXF_R_FE |
3600 		  SWAPPER_CTRL_RXD_R_FE |
3601 		  SWAPPER_CTRL_RXD_W_FE |
3602 		  SWAPPER_CTRL_RXF_W_FE |
3603 		  SWAPPER_CTRL_XMSI_FE |
3604 		  SWAPPER_CTRL_STATS_FE |
3605 		  SWAPPER_CTRL_STATS_SE);
3606 	if (sp->config.intr_type == INTA)
3607 		val64 |= SWAPPER_CTRL_XMSI_SE;
3608 	writeq(val64, &bar0->swapper_ctrl);
3609 #else
3610 	/*
3611 	 * Initially we enable all bits to make it accessible by the
3612 	 * driver, then we selectively enable only those bits that
3613 	 * we want to set.
3614 	 */
3615 	val64 |= (SWAPPER_CTRL_TXP_FE |
3616 		  SWAPPER_CTRL_TXP_SE |
3617 		  SWAPPER_CTRL_TXD_R_FE |
3618 		  SWAPPER_CTRL_TXD_R_SE |
3619 		  SWAPPER_CTRL_TXD_W_FE |
3620 		  SWAPPER_CTRL_TXD_W_SE |
3621 		  SWAPPER_CTRL_TXF_R_FE |
3622 		  SWAPPER_CTRL_RXD_R_FE |
3623 		  SWAPPER_CTRL_RXD_R_SE |
3624 		  SWAPPER_CTRL_RXD_W_FE |
3625 		  SWAPPER_CTRL_RXD_W_SE |
3626 		  SWAPPER_CTRL_RXF_W_FE |
3627 		  SWAPPER_CTRL_XMSI_FE |
3628 		  SWAPPER_CTRL_STATS_FE |
3629 		  SWAPPER_CTRL_STATS_SE);
3630 	if (sp->config.intr_type == INTA)
3631 		val64 |= SWAPPER_CTRL_XMSI_SE;
3632 	writeq(val64, &bar0->swapper_ctrl);
3633 #endif
3634 	val64 = readq(&bar0->swapper_ctrl);
3635 
3636 	/*
3637 	 * Verifying if endian settings are accurate by reading a
3638 	 * feedback register.
3639 	 */
3640 	val64 = readq(&bar0->pif_rd_swapper_fb);
3641 	if (val64 != 0x0123456789ABCDEFULL) {
3642 		/* Endian settings are incorrect, calls for another dekko. */
3643 		DBG_PRINT(ERR_DBG,
3644 			  "%s: Endian settings are wrong, feedback read %llx\n",
3645 			  dev->name, (unsigned long long)val64);
3646 		return FAILURE;
3647 	}
3648 
3649 	return SUCCESS;
3650 }
3651 
3652 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3653 {
3654 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3655 	u64 val64;
3656 	int ret = 0, cnt = 0;
3657 
3658 	do {
3659 		val64 = readq(&bar0->xmsi_access);
3660 		if (!(val64 & s2BIT(15)))
3661 			break;
3662 		mdelay(1);
3663 		cnt++;
3664 	} while (cnt < 5);
3665 	if (cnt == 5) {
3666 		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3667 		ret = 1;
3668 	}
3669 
3670 	return ret;
3671 }
3672 
3673 static void restore_xmsi_data(struct s2io_nic *nic)
3674 {
3675 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3676 	u64 val64;
3677 	int i, msix_index;
3678 
3679 	if (nic->device_type == XFRAME_I_DEVICE)
3680 		return;
3681 
3682 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3683 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3684 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3685 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3686 		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3687 		writeq(val64, &bar0->xmsi_access);
3688 		if (wait_for_msix_trans(nic, msix_index)) {
3689 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3690 				  __func__, msix_index);
3691 			continue;
3692 		}
3693 	}
3694 }
3695 
3696 static void store_xmsi_data(struct s2io_nic *nic)
3697 {
3698 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3699 	u64 val64, addr, data;
3700 	int i, msix_index;
3701 
3702 	if (nic->device_type == XFRAME_I_DEVICE)
3703 		return;
3704 
3705 	/* Store and display */
3706 	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3707 		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3708 		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3709 		writeq(val64, &bar0->xmsi_access);
3710 		if (wait_for_msix_trans(nic, msix_index)) {
3711 			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3712 				  __func__, msix_index);
3713 			continue;
3714 		}
3715 		addr = readq(&bar0->xmsi_address);
3716 		data = readq(&bar0->xmsi_data);
3717 		if (addr && data) {
3718 			nic->msix_info[i].addr = addr;
3719 			nic->msix_info[i].data = data;
3720 		}
3721 	}
3722 }
3723 
3724 static int s2io_enable_msi_x(struct s2io_nic *nic)
3725 {
3726 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3727 	u64 rx_mat;
3728 	u16 msi_control; /* Temp variable */
3729 	int ret, i, j, msix_indx = 1;
3730 	int size;
3731 	struct stat_block *stats = nic->mac_control.stats_info;
3732 	struct swStat *swstats = &stats->sw_stat;
3733 
3734 	size = nic->num_entries * sizeof(struct msix_entry);
3735 	nic->entries = kzalloc(size, GFP_KERNEL);
3736 	if (!nic->entries) {
3737 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3738 			  __func__);
3739 		swstats->mem_alloc_fail_cnt++;
3740 		return -ENOMEM;
3741 	}
3742 	swstats->mem_allocated += size;
3743 
3744 	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3745 	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3746 	if (!nic->s2io_entries) {
3747 		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3748 			  __func__);
3749 		swstats->mem_alloc_fail_cnt++;
3750 		kfree(nic->entries);
3751 		swstats->mem_freed
3752 			+= (nic->num_entries * sizeof(struct msix_entry));
3753 		return -ENOMEM;
3754 	}
3755 	swstats->mem_allocated += size;
3756 
3757 	nic->entries[0].entry = 0;
3758 	nic->s2io_entries[0].entry = 0;
3759 	nic->s2io_entries[0].in_use = MSIX_FLG;
3760 	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3761 	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3762 
3763 	for (i = 1; i < nic->num_entries; i++) {
3764 		nic->entries[i].entry = ((i - 1) * 8) + 1;
3765 		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3766 		nic->s2io_entries[i].arg = NULL;
3767 		nic->s2io_entries[i].in_use = 0;
3768 	}
3769 
3770 	rx_mat = readq(&bar0->rx_mat);
3771 	for (j = 0; j < nic->config.rx_ring_num; j++) {
3772 		rx_mat |= RX_MAT_SET(j, msix_indx);
3773 		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3774 		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3775 		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3776 		msix_indx += 8;
3777 	}
3778 	writeq(rx_mat, &bar0->rx_mat);
3779 	readq(&bar0->rx_mat);
3780 
3781 	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3782 				    nic->num_entries, nic->num_entries);
3783 	/* We fail init if error or we get less vectors than min required */
3784 	if (ret < 0) {
3785 		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3786 		kfree(nic->entries);
3787 		swstats->mem_freed += nic->num_entries *
3788 			sizeof(struct msix_entry);
3789 		kfree(nic->s2io_entries);
3790 		swstats->mem_freed += nic->num_entries *
3791 			sizeof(struct s2io_msix_entry);
3792 		nic->entries = NULL;
3793 		nic->s2io_entries = NULL;
3794 		return -ENOMEM;
3795 	}
3796 
3797 	/*
3798 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3799 	 * in the herc NIC. (Temp change, needs to be removed later)
3800 	 */
3801 	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3802 	msi_control |= 0x1; /* Enable MSI */
3803 	pci_write_config_word(nic->pdev, 0x42, msi_control);
3804 
3805 	return 0;
3806 }
3807 
3808 /* Handle software interrupt used during MSI(X) test */
3809 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3810 {
3811 	struct s2io_nic *sp = dev_id;
3812 
3813 	sp->msi_detected = 1;
3814 	wake_up(&sp->msi_wait);
3815 
3816 	return IRQ_HANDLED;
3817 }
3818 
3819 /* Test interrupt path by forcing a a software IRQ */
3820 static int s2io_test_msi(struct s2io_nic *sp)
3821 {
3822 	struct pci_dev *pdev = sp->pdev;
3823 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3824 	int err;
3825 	u64 val64, saved64;
3826 
3827 	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3828 			  sp->name, sp);
3829 	if (err) {
3830 		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3831 			  sp->dev->name, pci_name(pdev), pdev->irq);
3832 		return err;
3833 	}
3834 
3835 	init_waitqueue_head(&sp->msi_wait);
3836 	sp->msi_detected = 0;
3837 
3838 	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3839 	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3840 	val64 |= SCHED_INT_CTRL_TIMER_EN;
3841 	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3842 	writeq(val64, &bar0->scheduled_int_ctrl);
3843 
3844 	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3845 
3846 	if (!sp->msi_detected) {
3847 		/* MSI(X) test failed, go back to INTx mode */
3848 		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3849 			  "using MSI(X) during test\n",
3850 			  sp->dev->name, pci_name(pdev));
3851 
3852 		err = -EOPNOTSUPP;
3853 	}
3854 
3855 	free_irq(sp->entries[1].vector, sp);
3856 
3857 	writeq(saved64, &bar0->scheduled_int_ctrl);
3858 
3859 	return err;
3860 }
3861 
3862 static void remove_msix_isr(struct s2io_nic *sp)
3863 {
3864 	int i;
3865 	u16 msi_control;
3866 
3867 	for (i = 0; i < sp->num_entries; i++) {
3868 		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3869 			int vector = sp->entries[i].vector;
3870 			void *arg = sp->s2io_entries[i].arg;
3871 			free_irq(vector, arg);
3872 		}
3873 	}
3874 
3875 	kfree(sp->entries);
3876 	kfree(sp->s2io_entries);
3877 	sp->entries = NULL;
3878 	sp->s2io_entries = NULL;
3879 
3880 	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3881 	msi_control &= 0xFFFE; /* Disable MSI */
3882 	pci_write_config_word(sp->pdev, 0x42, msi_control);
3883 
3884 	pci_disable_msix(sp->pdev);
3885 }
3886 
3887 static void remove_inta_isr(struct s2io_nic *sp)
3888 {
3889 	free_irq(sp->pdev->irq, sp->dev);
3890 }
3891 
3892 /* ********************************************************* *
3893  * Functions defined below concern the OS part of the driver *
3894  * ********************************************************* */
3895 
3896 /**
3897  *  s2io_open - open entry point of the driver
3898  *  @dev : pointer to the device structure.
3899  *  Description:
3900  *  This function is the open entry point of the driver. It mainly calls a
3901  *  function to allocate Rx buffers and inserts them into the buffer
3902  *  descriptors and then enables the Rx part of the NIC.
3903  *  Return value:
3904  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3905  *   file on failure.
3906  */
3907 
3908 static int s2io_open(struct net_device *dev)
3909 {
3910 	struct s2io_nic *sp = netdev_priv(dev);
3911 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3912 	int err = 0;
3913 
3914 	/*
3915 	 * Make sure you have link off by default every time
3916 	 * Nic is initialized
3917 	 */
3918 	netif_carrier_off(dev);
3919 	sp->last_link_state = 0;
3920 
3921 	/* Initialize H/W and enable interrupts */
3922 	err = s2io_card_up(sp);
3923 	if (err) {
3924 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3925 			  dev->name);
3926 		goto hw_init_failed;
3927 	}
3928 
3929 	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3930 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3931 		s2io_card_down(sp);
3932 		err = -ENODEV;
3933 		goto hw_init_failed;
3934 	}
3935 	s2io_start_all_tx_queue(sp);
3936 	return 0;
3937 
3938 hw_init_failed:
3939 	if (sp->config.intr_type == MSI_X) {
3940 		if (sp->entries) {
3941 			kfree(sp->entries);
3942 			swstats->mem_freed += sp->num_entries *
3943 				sizeof(struct msix_entry);
3944 		}
3945 		if (sp->s2io_entries) {
3946 			kfree(sp->s2io_entries);
3947 			swstats->mem_freed += sp->num_entries *
3948 				sizeof(struct s2io_msix_entry);
3949 		}
3950 	}
3951 	return err;
3952 }
3953 
3954 /**
3955  *  s2io_close -close entry point of the driver
3956  *  @dev : device pointer.
3957  *  Description:
3958  *  This is the stop entry point of the driver. It needs to undo exactly
3959  *  whatever was done by the open entry point,thus it's usually referred to
3960  *  as the close function.Among other things this function mainly stops the
3961  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3962  *  Return value:
3963  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3964  *  file on failure.
3965  */
3966 
3967 static int s2io_close(struct net_device *dev)
3968 {
3969 	struct s2io_nic *sp = netdev_priv(dev);
3970 	struct config_param *config = &sp->config;
3971 	u64 tmp64;
3972 	int offset;
3973 
3974 	/* Return if the device is already closed               *
3975 	 *  Can happen when s2io_card_up failed in change_mtu    *
3976 	 */
3977 	if (!is_s2io_card_up(sp))
3978 		return 0;
3979 
3980 	s2io_stop_all_tx_queue(sp);
3981 	/* delete all populated mac entries */
3982 	for (offset = 1; offset < config->max_mc_addr; offset++) {
3983 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3984 		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3985 			do_s2io_delete_unicast_mc(sp, tmp64);
3986 	}
3987 
3988 	s2io_card_down(sp);
3989 
3990 	return 0;
3991 }
3992 
3993 /**
3994  *  s2io_xmit - Tx entry point of te driver
3995  *  @skb : the socket buffer containing the Tx data.
3996  *  @dev : device pointer.
3997  *  Description :
3998  *  This function is the Tx entry point of the driver. S2IO NIC supports
3999  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4000  *  NOTE: when device can't queue the pkt,just the trans_start variable will
4001  *  not be upadted.
4002  *  Return value:
4003  *  0 on success & 1 on failure.
4004  */
4005 
4006 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4007 {
4008 	struct s2io_nic *sp = netdev_priv(dev);
4009 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4010 	register u64 val64;
4011 	struct TxD *txdp;
4012 	struct TxFIFO_element __iomem *tx_fifo;
4013 	unsigned long flags = 0;
4014 	u16 vlan_tag = 0;
4015 	struct fifo_info *fifo = NULL;
4016 	int offload_type;
4017 	int enable_per_list_interrupt = 0;
4018 	struct config_param *config = &sp->config;
4019 	struct mac_info *mac_control = &sp->mac_control;
4020 	struct stat_block *stats = mac_control->stats_info;
4021 	struct swStat *swstats = &stats->sw_stat;
4022 
4023 	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4024 
4025 	if (unlikely(skb->len <= 0)) {
4026 		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4027 		dev_kfree_skb_any(skb);
4028 		return NETDEV_TX_OK;
4029 	}
4030 
4031 	if (!is_s2io_card_up(sp)) {
4032 		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4033 			  dev->name);
4034 		dev_kfree_skb_any(skb);
4035 		return NETDEV_TX_OK;
4036 	}
4037 
4038 	queue = 0;
4039 	if (skb_vlan_tag_present(skb))
4040 		vlan_tag = skb_vlan_tag_get(skb);
4041 	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4042 		if (skb->protocol == htons(ETH_P_IP)) {
4043 			struct iphdr *ip;
4044 			struct tcphdr *th;
4045 			ip = ip_hdr(skb);
4046 
4047 			if (!ip_is_fragment(ip)) {
4048 				th = (struct tcphdr *)(((unsigned char *)ip) +
4049 						       ip->ihl*4);
4050 
4051 				if (ip->protocol == IPPROTO_TCP) {
4052 					queue_len = sp->total_tcp_fifos;
4053 					queue = (ntohs(th->source) +
4054 						 ntohs(th->dest)) &
4055 						sp->fifo_selector[queue_len - 1];
4056 					if (queue >= queue_len)
4057 						queue = queue_len - 1;
4058 				} else if (ip->protocol == IPPROTO_UDP) {
4059 					queue_len = sp->total_udp_fifos;
4060 					queue = (ntohs(th->source) +
4061 						 ntohs(th->dest)) &
4062 						sp->fifo_selector[queue_len - 1];
4063 					if (queue >= queue_len)
4064 						queue = queue_len - 1;
4065 					queue += sp->udp_fifo_idx;
4066 					if (skb->len > 1024)
4067 						enable_per_list_interrupt = 1;
4068 				}
4069 			}
4070 		}
4071 	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4072 		/* get fifo number based on skb->priority value */
4073 		queue = config->fifo_mapping
4074 			[skb->priority & (MAX_TX_FIFOS - 1)];
4075 	fifo = &mac_control->fifos[queue];
4076 
4077 	spin_lock_irqsave(&fifo->tx_lock, flags);
4078 
4079 	if (sp->config.multiq) {
4080 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4081 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4082 			return NETDEV_TX_BUSY;
4083 		}
4084 	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4085 		if (netif_queue_stopped(dev)) {
4086 			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4087 			return NETDEV_TX_BUSY;
4088 		}
4089 	}
4090 
4091 	put_off = (u16)fifo->tx_curr_put_info.offset;
4092 	get_off = (u16)fifo->tx_curr_get_info.offset;
4093 	txdp = fifo->list_info[put_off].list_virt_addr;
4094 
4095 	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4096 	/* Avoid "put" pointer going beyond "get" pointer */
4097 	if (txdp->Host_Control ||
4098 	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4099 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4100 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4101 		dev_kfree_skb_any(skb);
4102 		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4103 		return NETDEV_TX_OK;
4104 	}
4105 
4106 	offload_type = s2io_offload_type(skb);
4107 	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4108 		txdp->Control_1 |= TXD_TCP_LSO_EN;
4109 		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4110 	}
4111 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4112 		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4113 				    TXD_TX_CKO_TCP_EN |
4114 				    TXD_TX_CKO_UDP_EN);
4115 	}
4116 	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4117 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4118 	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4119 	if (enable_per_list_interrupt)
4120 		if (put_off & (queue_len >> 5))
4121 			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4122 	if (vlan_tag) {
4123 		txdp->Control_2 |= TXD_VLAN_ENABLE;
4124 		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4125 	}
4126 
4127 	frg_len = skb_headlen(skb);
4128 	txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4129 					      frg_len, PCI_DMA_TODEVICE);
4130 	if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4131 		goto pci_map_failed;
4132 
4133 	txdp->Host_Control = (unsigned long)skb;
4134 	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4135 
4136 	frg_cnt = skb_shinfo(skb)->nr_frags;
4137 	/* For fragmented SKB. */
4138 	for (i = 0; i < frg_cnt; i++) {
4139 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4140 		/* A '0' length fragment will be ignored */
4141 		if (!skb_frag_size(frag))
4142 			continue;
4143 		txdp++;
4144 		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4145 							     frag, 0,
4146 							     skb_frag_size(frag),
4147 							     DMA_TO_DEVICE);
4148 		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4149 	}
4150 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4151 
4152 	tx_fifo = mac_control->tx_FIFO_start[queue];
4153 	val64 = fifo->list_info[put_off].list_phy_addr;
4154 	writeq(val64, &tx_fifo->TxDL_Pointer);
4155 
4156 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4157 		 TX_FIFO_LAST_LIST);
4158 	if (offload_type)
4159 		val64 |= TX_FIFO_SPECIAL_FUNC;
4160 
4161 	writeq(val64, &tx_fifo->List_Control);
4162 
4163 	mmiowb();
4164 
4165 	put_off++;
4166 	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4167 		put_off = 0;
4168 	fifo->tx_curr_put_info.offset = put_off;
4169 
4170 	/* Avoid "put" pointer going beyond "get" pointer */
4171 	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4172 		swstats->fifo_full_cnt++;
4173 		DBG_PRINT(TX_DBG,
4174 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4175 			  put_off, get_off);
4176 		s2io_stop_tx_queue(sp, fifo->fifo_no);
4177 	}
4178 	swstats->mem_allocated += skb->truesize;
4179 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4180 
4181 	if (sp->config.intr_type == MSI_X)
4182 		tx_intr_handler(fifo);
4183 
4184 	return NETDEV_TX_OK;
4185 
4186 pci_map_failed:
4187 	swstats->pci_map_fail_cnt++;
4188 	s2io_stop_tx_queue(sp, fifo->fifo_no);
4189 	swstats->mem_freed += skb->truesize;
4190 	dev_kfree_skb_any(skb);
4191 	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4192 	return NETDEV_TX_OK;
4193 }
4194 
4195 static void
4196 s2io_alarm_handle(unsigned long data)
4197 {
4198 	struct s2io_nic *sp = (struct s2io_nic *)data;
4199 	struct net_device *dev = sp->dev;
4200 
4201 	s2io_handle_errors(dev);
4202 	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4203 }
4204 
4205 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4206 {
4207 	struct ring_info *ring = (struct ring_info *)dev_id;
4208 	struct s2io_nic *sp = ring->nic;
4209 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4210 
4211 	if (unlikely(!is_s2io_card_up(sp)))
4212 		return IRQ_HANDLED;
4213 
4214 	if (sp->config.napi) {
4215 		u8 __iomem *addr = NULL;
4216 		u8 val8 = 0;
4217 
4218 		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4219 		addr += (7 - ring->ring_no);
4220 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4221 		writeb(val8, addr);
4222 		val8 = readb(addr);
4223 		napi_schedule(&ring->napi);
4224 	} else {
4225 		rx_intr_handler(ring, 0);
4226 		s2io_chk_rx_buffers(sp, ring);
4227 	}
4228 
4229 	return IRQ_HANDLED;
4230 }
4231 
4232 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4233 {
4234 	int i;
4235 	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4236 	struct s2io_nic *sp = fifos->nic;
4237 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4238 	struct config_param *config  = &sp->config;
4239 	u64 reason;
4240 
4241 	if (unlikely(!is_s2io_card_up(sp)))
4242 		return IRQ_NONE;
4243 
4244 	reason = readq(&bar0->general_int_status);
4245 	if (unlikely(reason == S2IO_MINUS_ONE))
4246 		/* Nothing much can be done. Get out */
4247 		return IRQ_HANDLED;
4248 
4249 	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4250 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4251 
4252 		if (reason & GEN_INTR_TXPIC)
4253 			s2io_txpic_intr_handle(sp);
4254 
4255 		if (reason & GEN_INTR_TXTRAFFIC)
4256 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4257 
4258 		for (i = 0; i < config->tx_fifo_num; i++)
4259 			tx_intr_handler(&fifos[i]);
4260 
4261 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4262 		readl(&bar0->general_int_status);
4263 		return IRQ_HANDLED;
4264 	}
4265 	/* The interrupt was not raised by us */
4266 	return IRQ_NONE;
4267 }
4268 
4269 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4270 {
4271 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4272 	u64 val64;
4273 
4274 	val64 = readq(&bar0->pic_int_status);
4275 	if (val64 & PIC_INT_GPIO) {
4276 		val64 = readq(&bar0->gpio_int_reg);
4277 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4278 		    (val64 & GPIO_INT_REG_LINK_UP)) {
4279 			/*
4280 			 * This is unstable state so clear both up/down
4281 			 * interrupt and adapter to re-evaluate the link state.
4282 			 */
4283 			val64 |= GPIO_INT_REG_LINK_DOWN;
4284 			val64 |= GPIO_INT_REG_LINK_UP;
4285 			writeq(val64, &bar0->gpio_int_reg);
4286 			val64 = readq(&bar0->gpio_int_mask);
4287 			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4288 				   GPIO_INT_MASK_LINK_DOWN);
4289 			writeq(val64, &bar0->gpio_int_mask);
4290 		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4291 			val64 = readq(&bar0->adapter_status);
4292 			/* Enable Adapter */
4293 			val64 = readq(&bar0->adapter_control);
4294 			val64 |= ADAPTER_CNTL_EN;
4295 			writeq(val64, &bar0->adapter_control);
4296 			val64 |= ADAPTER_LED_ON;
4297 			writeq(val64, &bar0->adapter_control);
4298 			if (!sp->device_enabled_once)
4299 				sp->device_enabled_once = 1;
4300 
4301 			s2io_link(sp, LINK_UP);
4302 			/*
4303 			 * unmask link down interrupt and mask link-up
4304 			 * intr
4305 			 */
4306 			val64 = readq(&bar0->gpio_int_mask);
4307 			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4308 			val64 |= GPIO_INT_MASK_LINK_UP;
4309 			writeq(val64, &bar0->gpio_int_mask);
4310 
4311 		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4312 			val64 = readq(&bar0->adapter_status);
4313 			s2io_link(sp, LINK_DOWN);
4314 			/* Link is down so unmaks link up interrupt */
4315 			val64 = readq(&bar0->gpio_int_mask);
4316 			val64 &= ~GPIO_INT_MASK_LINK_UP;
4317 			val64 |= GPIO_INT_MASK_LINK_DOWN;
4318 			writeq(val64, &bar0->gpio_int_mask);
4319 
4320 			/* turn off LED */
4321 			val64 = readq(&bar0->adapter_control);
4322 			val64 = val64 & (~ADAPTER_LED_ON);
4323 			writeq(val64, &bar0->adapter_control);
4324 		}
4325 	}
4326 	val64 = readq(&bar0->gpio_int_mask);
4327 }
4328 
4329 /**
4330  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4331  *  @value: alarm bits
4332  *  @addr: address value
4333  *  @cnt: counter variable
4334  *  Description: Check for alarm and increment the counter
4335  *  Return Value:
4336  *  1 - if alarm bit set
4337  *  0 - if alarm bit is not set
4338  */
4339 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4340 				 unsigned long long *cnt)
4341 {
4342 	u64 val64;
4343 	val64 = readq(addr);
4344 	if (val64 & value) {
4345 		writeq(val64, addr);
4346 		(*cnt)++;
4347 		return 1;
4348 	}
4349 	return 0;
4350 
4351 }
4352 
4353 /**
4354  *  s2io_handle_errors - Xframe error indication handler
4355  *  @nic: device private variable
4356  *  Description: Handle alarms such as loss of link, single or
4357  *  double ECC errors, critical and serious errors.
4358  *  Return Value:
4359  *  NONE
4360  */
4361 static void s2io_handle_errors(void *dev_id)
4362 {
4363 	struct net_device *dev = (struct net_device *)dev_id;
4364 	struct s2io_nic *sp = netdev_priv(dev);
4365 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4366 	u64 temp64 = 0, val64 = 0;
4367 	int i = 0;
4368 
4369 	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4370 	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4371 
4372 	if (!is_s2io_card_up(sp))
4373 		return;
4374 
4375 	if (pci_channel_offline(sp->pdev))
4376 		return;
4377 
4378 	memset(&sw_stat->ring_full_cnt, 0,
4379 	       sizeof(sw_stat->ring_full_cnt));
4380 
4381 	/* Handling the XPAK counters update */
4382 	if (stats->xpak_timer_count < 72000) {
4383 		/* waiting for an hour */
4384 		stats->xpak_timer_count++;
4385 	} else {
4386 		s2io_updt_xpak_counter(dev);
4387 		/* reset the count to zero */
4388 		stats->xpak_timer_count = 0;
4389 	}
4390 
4391 	/* Handling link status change error Intr */
4392 	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4393 		val64 = readq(&bar0->mac_rmac_err_reg);
4394 		writeq(val64, &bar0->mac_rmac_err_reg);
4395 		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4396 			schedule_work(&sp->set_link_task);
4397 	}
4398 
4399 	/* In case of a serious error, the device will be Reset. */
4400 	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4401 				  &sw_stat->serious_err_cnt))
4402 		goto reset;
4403 
4404 	/* Check for data parity error */
4405 	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4406 				  &sw_stat->parity_err_cnt))
4407 		goto reset;
4408 
4409 	/* Check for ring full counter */
4410 	if (sp->device_type == XFRAME_II_DEVICE) {
4411 		val64 = readq(&bar0->ring_bump_counter1);
4412 		for (i = 0; i < 4; i++) {
4413 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4414 			temp64 >>= 64 - ((i+1)*16);
4415 			sw_stat->ring_full_cnt[i] += temp64;
4416 		}
4417 
4418 		val64 = readq(&bar0->ring_bump_counter2);
4419 		for (i = 0; i < 4; i++) {
4420 			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4421 			temp64 >>= 64 - ((i+1)*16);
4422 			sw_stat->ring_full_cnt[i+4] += temp64;
4423 		}
4424 	}
4425 
4426 	val64 = readq(&bar0->txdma_int_status);
4427 	/*check for pfc_err*/
4428 	if (val64 & TXDMA_PFC_INT) {
4429 		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4430 					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4431 					  PFC_PCIX_ERR,
4432 					  &bar0->pfc_err_reg,
4433 					  &sw_stat->pfc_err_cnt))
4434 			goto reset;
4435 		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4436 				      &bar0->pfc_err_reg,
4437 				      &sw_stat->pfc_err_cnt);
4438 	}
4439 
4440 	/*check for tda_err*/
4441 	if (val64 & TXDMA_TDA_INT) {
4442 		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4443 					  TDA_SM0_ERR_ALARM |
4444 					  TDA_SM1_ERR_ALARM,
4445 					  &bar0->tda_err_reg,
4446 					  &sw_stat->tda_err_cnt))
4447 			goto reset;
4448 		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4449 				      &bar0->tda_err_reg,
4450 				      &sw_stat->tda_err_cnt);
4451 	}
4452 	/*check for pcc_err*/
4453 	if (val64 & TXDMA_PCC_INT) {
4454 		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4455 					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4456 					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4457 					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4458 					  PCC_TXB_ECC_DB_ERR,
4459 					  &bar0->pcc_err_reg,
4460 					  &sw_stat->pcc_err_cnt))
4461 			goto reset;
4462 		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4463 				      &bar0->pcc_err_reg,
4464 				      &sw_stat->pcc_err_cnt);
4465 	}
4466 
4467 	/*check for tti_err*/
4468 	if (val64 & TXDMA_TTI_INT) {
4469 		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4470 					  &bar0->tti_err_reg,
4471 					  &sw_stat->tti_err_cnt))
4472 			goto reset;
4473 		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4474 				      &bar0->tti_err_reg,
4475 				      &sw_stat->tti_err_cnt);
4476 	}
4477 
4478 	/*check for lso_err*/
4479 	if (val64 & TXDMA_LSO_INT) {
4480 		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4481 					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4482 					  &bar0->lso_err_reg,
4483 					  &sw_stat->lso_err_cnt))
4484 			goto reset;
4485 		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4486 				      &bar0->lso_err_reg,
4487 				      &sw_stat->lso_err_cnt);
4488 	}
4489 
4490 	/*check for tpa_err*/
4491 	if (val64 & TXDMA_TPA_INT) {
4492 		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4493 					  &bar0->tpa_err_reg,
4494 					  &sw_stat->tpa_err_cnt))
4495 			goto reset;
4496 		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4497 				      &bar0->tpa_err_reg,
4498 				      &sw_stat->tpa_err_cnt);
4499 	}
4500 
4501 	/*check for sm_err*/
4502 	if (val64 & TXDMA_SM_INT) {
4503 		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4504 					  &bar0->sm_err_reg,
4505 					  &sw_stat->sm_err_cnt))
4506 			goto reset;
4507 	}
4508 
4509 	val64 = readq(&bar0->mac_int_status);
4510 	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4511 		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4512 					  &bar0->mac_tmac_err_reg,
4513 					  &sw_stat->mac_tmac_err_cnt))
4514 			goto reset;
4515 		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4516 				      TMAC_DESC_ECC_SG_ERR |
4517 				      TMAC_DESC_ECC_DB_ERR,
4518 				      &bar0->mac_tmac_err_reg,
4519 				      &sw_stat->mac_tmac_err_cnt);
4520 	}
4521 
4522 	val64 = readq(&bar0->xgxs_int_status);
4523 	if (val64 & XGXS_INT_STATUS_TXGXS) {
4524 		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4525 					  &bar0->xgxs_txgxs_err_reg,
4526 					  &sw_stat->xgxs_txgxs_err_cnt))
4527 			goto reset;
4528 		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4529 				      &bar0->xgxs_txgxs_err_reg,
4530 				      &sw_stat->xgxs_txgxs_err_cnt);
4531 	}
4532 
4533 	val64 = readq(&bar0->rxdma_int_status);
4534 	if (val64 & RXDMA_INT_RC_INT_M) {
4535 		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4536 					  RC_FTC_ECC_DB_ERR |
4537 					  RC_PRCn_SM_ERR_ALARM |
4538 					  RC_FTC_SM_ERR_ALARM,
4539 					  &bar0->rc_err_reg,
4540 					  &sw_stat->rc_err_cnt))
4541 			goto reset;
4542 		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4543 				      RC_FTC_ECC_SG_ERR |
4544 				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4545 				      &sw_stat->rc_err_cnt);
4546 		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4547 					  PRC_PCI_AB_WR_Rn |
4548 					  PRC_PCI_AB_F_WR_Rn,
4549 					  &bar0->prc_pcix_err_reg,
4550 					  &sw_stat->prc_pcix_err_cnt))
4551 			goto reset;
4552 		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4553 				      PRC_PCI_DP_WR_Rn |
4554 				      PRC_PCI_DP_F_WR_Rn,
4555 				      &bar0->prc_pcix_err_reg,
4556 				      &sw_stat->prc_pcix_err_cnt);
4557 	}
4558 
4559 	if (val64 & RXDMA_INT_RPA_INT_M) {
4560 		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4561 					  &bar0->rpa_err_reg,
4562 					  &sw_stat->rpa_err_cnt))
4563 			goto reset;
4564 		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4565 				      &bar0->rpa_err_reg,
4566 				      &sw_stat->rpa_err_cnt);
4567 	}
4568 
4569 	if (val64 & RXDMA_INT_RDA_INT_M) {
4570 		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4571 					  RDA_FRM_ECC_DB_N_AERR |
4572 					  RDA_SM1_ERR_ALARM |
4573 					  RDA_SM0_ERR_ALARM |
4574 					  RDA_RXD_ECC_DB_SERR,
4575 					  &bar0->rda_err_reg,
4576 					  &sw_stat->rda_err_cnt))
4577 			goto reset;
4578 		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4579 				      RDA_FRM_ECC_SG_ERR |
4580 				      RDA_MISC_ERR |
4581 				      RDA_PCIX_ERR,
4582 				      &bar0->rda_err_reg,
4583 				      &sw_stat->rda_err_cnt);
4584 	}
4585 
4586 	if (val64 & RXDMA_INT_RTI_INT_M) {
4587 		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4588 					  &bar0->rti_err_reg,
4589 					  &sw_stat->rti_err_cnt))
4590 			goto reset;
4591 		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4592 				      &bar0->rti_err_reg,
4593 				      &sw_stat->rti_err_cnt);
4594 	}
4595 
4596 	val64 = readq(&bar0->mac_int_status);
4597 	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4598 		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4599 					  &bar0->mac_rmac_err_reg,
4600 					  &sw_stat->mac_rmac_err_cnt))
4601 			goto reset;
4602 		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4603 				      RMAC_SINGLE_ECC_ERR |
4604 				      RMAC_DOUBLE_ECC_ERR,
4605 				      &bar0->mac_rmac_err_reg,
4606 				      &sw_stat->mac_rmac_err_cnt);
4607 	}
4608 
4609 	val64 = readq(&bar0->xgxs_int_status);
4610 	if (val64 & XGXS_INT_STATUS_RXGXS) {
4611 		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4612 					  &bar0->xgxs_rxgxs_err_reg,
4613 					  &sw_stat->xgxs_rxgxs_err_cnt))
4614 			goto reset;
4615 	}
4616 
4617 	val64 = readq(&bar0->mc_int_status);
4618 	if (val64 & MC_INT_STATUS_MC_INT) {
4619 		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4620 					  &bar0->mc_err_reg,
4621 					  &sw_stat->mc_err_cnt))
4622 			goto reset;
4623 
4624 		/* Handling Ecc errors */
4625 		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4626 			writeq(val64, &bar0->mc_err_reg);
4627 			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4628 				sw_stat->double_ecc_errs++;
4629 				if (sp->device_type != XFRAME_II_DEVICE) {
4630 					/*
4631 					 * Reset XframeI only if critical error
4632 					 */
4633 					if (val64 &
4634 					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4635 					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4636 						goto reset;
4637 				}
4638 			} else
4639 				sw_stat->single_ecc_errs++;
4640 		}
4641 	}
4642 	return;
4643 
4644 reset:
4645 	s2io_stop_all_tx_queue(sp);
4646 	schedule_work(&sp->rst_timer_task);
4647 	sw_stat->soft_reset_cnt++;
4648 }
4649 
4650 /**
4651  *  s2io_isr - ISR handler of the device .
4652  *  @irq: the irq of the device.
4653  *  @dev_id: a void pointer to the dev structure of the NIC.
4654  *  Description:  This function is the ISR handler of the device. It
4655  *  identifies the reason for the interrupt and calls the relevant
4656  *  service routines. As a contongency measure, this ISR allocates the
4657  *  recv buffers, if their numbers are below the panic value which is
4658  *  presently set to 25% of the original number of rcv buffers allocated.
4659  *  Return value:
4660  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4661  *   IRQ_NONE: will be returned if interrupt is not from our device
4662  */
4663 static irqreturn_t s2io_isr(int irq, void *dev_id)
4664 {
4665 	struct net_device *dev = (struct net_device *)dev_id;
4666 	struct s2io_nic *sp = netdev_priv(dev);
4667 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4668 	int i;
4669 	u64 reason = 0;
4670 	struct mac_info *mac_control;
4671 	struct config_param *config;
4672 
4673 	/* Pretend we handled any irq's from a disconnected card */
4674 	if (pci_channel_offline(sp->pdev))
4675 		return IRQ_NONE;
4676 
4677 	if (!is_s2io_card_up(sp))
4678 		return IRQ_NONE;
4679 
4680 	config = &sp->config;
4681 	mac_control = &sp->mac_control;
4682 
4683 	/*
4684 	 * Identify the cause for interrupt and call the appropriate
4685 	 * interrupt handler. Causes for the interrupt could be;
4686 	 * 1. Rx of packet.
4687 	 * 2. Tx complete.
4688 	 * 3. Link down.
4689 	 */
4690 	reason = readq(&bar0->general_int_status);
4691 
4692 	if (unlikely(reason == S2IO_MINUS_ONE))
4693 		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4694 
4695 	if (reason &
4696 	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4697 		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4698 
4699 		if (config->napi) {
4700 			if (reason & GEN_INTR_RXTRAFFIC) {
4701 				napi_schedule(&sp->napi);
4702 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4703 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4704 				readl(&bar0->rx_traffic_int);
4705 			}
4706 		} else {
4707 			/*
4708 			 * rx_traffic_int reg is an R1 register, writing all 1's
4709 			 * will ensure that the actual interrupt causing bit
4710 			 * get's cleared and hence a read can be avoided.
4711 			 */
4712 			if (reason & GEN_INTR_RXTRAFFIC)
4713 				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4714 
4715 			for (i = 0; i < config->rx_ring_num; i++) {
4716 				struct ring_info *ring = &mac_control->rings[i];
4717 
4718 				rx_intr_handler(ring, 0);
4719 			}
4720 		}
4721 
4722 		/*
4723 		 * tx_traffic_int reg is an R1 register, writing all 1's
4724 		 * will ensure that the actual interrupt causing bit get's
4725 		 * cleared and hence a read can be avoided.
4726 		 */
4727 		if (reason & GEN_INTR_TXTRAFFIC)
4728 			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4729 
4730 		for (i = 0; i < config->tx_fifo_num; i++)
4731 			tx_intr_handler(&mac_control->fifos[i]);
4732 
4733 		if (reason & GEN_INTR_TXPIC)
4734 			s2io_txpic_intr_handle(sp);
4735 
4736 		/*
4737 		 * Reallocate the buffers from the interrupt handler itself.
4738 		 */
4739 		if (!config->napi) {
4740 			for (i = 0; i < config->rx_ring_num; i++) {
4741 				struct ring_info *ring = &mac_control->rings[i];
4742 
4743 				s2io_chk_rx_buffers(sp, ring);
4744 			}
4745 		}
4746 		writeq(sp->general_int_mask, &bar0->general_int_mask);
4747 		readl(&bar0->general_int_status);
4748 
4749 		return IRQ_HANDLED;
4750 
4751 	} else if (!reason) {
4752 		/* The interrupt was not raised by us */
4753 		return IRQ_NONE;
4754 	}
4755 
4756 	return IRQ_HANDLED;
4757 }
4758 
4759 /**
4760  * s2io_updt_stats -
4761  */
4762 static void s2io_updt_stats(struct s2io_nic *sp)
4763 {
4764 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4765 	u64 val64;
4766 	int cnt = 0;
4767 
4768 	if (is_s2io_card_up(sp)) {
4769 		/* Apprx 30us on a 133 MHz bus */
4770 		val64 = SET_UPDT_CLICKS(10) |
4771 			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4772 		writeq(val64, &bar0->stat_cfg);
4773 		do {
4774 			udelay(100);
4775 			val64 = readq(&bar0->stat_cfg);
4776 			if (!(val64 & s2BIT(0)))
4777 				break;
4778 			cnt++;
4779 			if (cnt == 5)
4780 				break; /* Updt failed */
4781 		} while (1);
4782 	}
4783 }
4784 
4785 /**
4786  *  s2io_get_stats - Updates the device statistics structure.
4787  *  @dev : pointer to the device structure.
4788  *  Description:
4789  *  This function updates the device statistics structure in the s2io_nic
4790  *  structure and returns a pointer to the same.
4791  *  Return value:
4792  *  pointer to the updated net_device_stats structure.
4793  */
4794 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4795 {
4796 	struct s2io_nic *sp = netdev_priv(dev);
4797 	struct mac_info *mac_control = &sp->mac_control;
4798 	struct stat_block *stats = mac_control->stats_info;
4799 	u64 delta;
4800 
4801 	/* Configure Stats for immediate updt */
4802 	s2io_updt_stats(sp);
4803 
4804 	/* A device reset will cause the on-adapter statistics to be zero'ed.
4805 	 * This can be done while running by changing the MTU.  To prevent the
4806 	 * system from having the stats zero'ed, the driver keeps a copy of the
4807 	 * last update to the system (which is also zero'ed on reset).  This
4808 	 * enables the driver to accurately know the delta between the last
4809 	 * update and the current update.
4810 	 */
4811 	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4812 		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4813 	sp->stats.rx_packets += delta;
4814 	dev->stats.rx_packets += delta;
4815 
4816 	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4817 		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4818 	sp->stats.tx_packets += delta;
4819 	dev->stats.tx_packets += delta;
4820 
4821 	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4822 		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4823 	sp->stats.rx_bytes += delta;
4824 	dev->stats.rx_bytes += delta;
4825 
4826 	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4827 		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4828 	sp->stats.tx_bytes += delta;
4829 	dev->stats.tx_bytes += delta;
4830 
4831 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4832 	sp->stats.rx_errors += delta;
4833 	dev->stats.rx_errors += delta;
4834 
4835 	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4836 		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4837 	sp->stats.tx_errors += delta;
4838 	dev->stats.tx_errors += delta;
4839 
4840 	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4841 	sp->stats.rx_dropped += delta;
4842 	dev->stats.rx_dropped += delta;
4843 
4844 	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4845 	sp->stats.tx_dropped += delta;
4846 	dev->stats.tx_dropped += delta;
4847 
4848 	/* The adapter MAC interprets pause frames as multicast packets, but
4849 	 * does not pass them up.  This erroneously increases the multicast
4850 	 * packet count and needs to be deducted when the multicast frame count
4851 	 * is queried.
4852 	 */
4853 	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4854 		le32_to_cpu(stats->rmac_vld_mcst_frms);
4855 	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4856 	delta -= sp->stats.multicast;
4857 	sp->stats.multicast += delta;
4858 	dev->stats.multicast += delta;
4859 
4860 	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4861 		le32_to_cpu(stats->rmac_usized_frms)) +
4862 		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4863 	sp->stats.rx_length_errors += delta;
4864 	dev->stats.rx_length_errors += delta;
4865 
4866 	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4867 	sp->stats.rx_crc_errors += delta;
4868 	dev->stats.rx_crc_errors += delta;
4869 
4870 	return &dev->stats;
4871 }
4872 
4873 /**
4874  *  s2io_set_multicast - entry point for multicast address enable/disable.
4875  *  @dev : pointer to the device structure
4876  *  Description:
4877  *  This function is a driver entry point which gets called by the kernel
4878  *  whenever multicast addresses must be enabled/disabled. This also gets
4879  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4880  *  determine, if multicast address must be enabled or if promiscuous mode
4881  *  is to be disabled etc.
4882  *  Return value:
4883  *  void.
4884  */
4885 
4886 static void s2io_set_multicast(struct net_device *dev)
4887 {
4888 	int i, j, prev_cnt;
4889 	struct netdev_hw_addr *ha;
4890 	struct s2io_nic *sp = netdev_priv(dev);
4891 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4892 	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4893 		0xfeffffffffffULL;
4894 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4895 	void __iomem *add;
4896 	struct config_param *config = &sp->config;
4897 
4898 	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4899 		/*  Enable all Multicast addresses */
4900 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4901 		       &bar0->rmac_addr_data0_mem);
4902 		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4903 		       &bar0->rmac_addr_data1_mem);
4904 		val64 = RMAC_ADDR_CMD_MEM_WE |
4905 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4906 			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4907 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4908 		/* Wait till command completes */
4909 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4910 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4911 				      S2IO_BIT_RESET);
4912 
4913 		sp->m_cast_flg = 1;
4914 		sp->all_multi_pos = config->max_mc_addr - 1;
4915 	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4916 		/*  Disable all Multicast addresses */
4917 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4918 		       &bar0->rmac_addr_data0_mem);
4919 		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4920 		       &bar0->rmac_addr_data1_mem);
4921 		val64 = RMAC_ADDR_CMD_MEM_WE |
4922 			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4923 			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4924 		writeq(val64, &bar0->rmac_addr_cmd_mem);
4925 		/* Wait till command completes */
4926 		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4927 				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4928 				      S2IO_BIT_RESET);
4929 
4930 		sp->m_cast_flg = 0;
4931 		sp->all_multi_pos = 0;
4932 	}
4933 
4934 	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4935 		/*  Put the NIC into promiscuous mode */
4936 		add = &bar0->mac_cfg;
4937 		val64 = readq(&bar0->mac_cfg);
4938 		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4939 
4940 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4941 		writel((u32)val64, add);
4942 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4943 		writel((u32) (val64 >> 32), (add + 4));
4944 
4945 		if (vlan_tag_strip != 1) {
4946 			val64 = readq(&bar0->rx_pa_cfg);
4947 			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4948 			writeq(val64, &bar0->rx_pa_cfg);
4949 			sp->vlan_strip_flag = 0;
4950 		}
4951 
4952 		val64 = readq(&bar0->mac_cfg);
4953 		sp->promisc_flg = 1;
4954 		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4955 			  dev->name);
4956 	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4957 		/*  Remove the NIC from promiscuous mode */
4958 		add = &bar0->mac_cfg;
4959 		val64 = readq(&bar0->mac_cfg);
4960 		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4961 
4962 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4963 		writel((u32)val64, add);
4964 		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4965 		writel((u32) (val64 >> 32), (add + 4));
4966 
4967 		if (vlan_tag_strip != 0) {
4968 			val64 = readq(&bar0->rx_pa_cfg);
4969 			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4970 			writeq(val64, &bar0->rx_pa_cfg);
4971 			sp->vlan_strip_flag = 1;
4972 		}
4973 
4974 		val64 = readq(&bar0->mac_cfg);
4975 		sp->promisc_flg = 0;
4976 		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4977 	}
4978 
4979 	/*  Update individual M_CAST address list */
4980 	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4981 		if (netdev_mc_count(dev) >
4982 		    (config->max_mc_addr - config->max_mac_addr)) {
4983 			DBG_PRINT(ERR_DBG,
4984 				  "%s: No more Rx filters can be added - "
4985 				  "please enable ALL_MULTI instead\n",
4986 				  dev->name);
4987 			return;
4988 		}
4989 
4990 		prev_cnt = sp->mc_addr_count;
4991 		sp->mc_addr_count = netdev_mc_count(dev);
4992 
4993 		/* Clear out the previous list of Mc in the H/W. */
4994 		for (i = 0; i < prev_cnt; i++) {
4995 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4996 			       &bar0->rmac_addr_data0_mem);
4997 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4998 			       &bar0->rmac_addr_data1_mem);
4999 			val64 = RMAC_ADDR_CMD_MEM_WE |
5000 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5001 				RMAC_ADDR_CMD_MEM_OFFSET
5002 				(config->mc_start_offset + i);
5003 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5004 
5005 			/* Wait for command completes */
5006 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5007 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5008 						  S2IO_BIT_RESET)) {
5009 				DBG_PRINT(ERR_DBG,
5010 					  "%s: Adding Multicasts failed\n",
5011 					  dev->name);
5012 				return;
5013 			}
5014 		}
5015 
5016 		/* Create the new Rx filter list and update the same in H/W. */
5017 		i = 0;
5018 		netdev_for_each_mc_addr(ha, dev) {
5019 			mac_addr = 0;
5020 			for (j = 0; j < ETH_ALEN; j++) {
5021 				mac_addr |= ha->addr[j];
5022 				mac_addr <<= 8;
5023 			}
5024 			mac_addr >>= 8;
5025 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5026 			       &bar0->rmac_addr_data0_mem);
5027 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5028 			       &bar0->rmac_addr_data1_mem);
5029 			val64 = RMAC_ADDR_CMD_MEM_WE |
5030 				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5031 				RMAC_ADDR_CMD_MEM_OFFSET
5032 				(i + config->mc_start_offset);
5033 			writeq(val64, &bar0->rmac_addr_cmd_mem);
5034 
5035 			/* Wait for command completes */
5036 			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5037 						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5038 						  S2IO_BIT_RESET)) {
5039 				DBG_PRINT(ERR_DBG,
5040 					  "%s: Adding Multicasts failed\n",
5041 					  dev->name);
5042 				return;
5043 			}
5044 			i++;
5045 		}
5046 	}
5047 }
5048 
5049 /* read from CAM unicast & multicast addresses and store it in
5050  * def_mac_addr structure
5051  */
5052 static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5053 {
5054 	int offset;
5055 	u64 mac_addr = 0x0;
5056 	struct config_param *config = &sp->config;
5057 
5058 	/* store unicast & multicast mac addresses */
5059 	for (offset = 0; offset < config->max_mc_addr; offset++) {
5060 		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5061 		/* if read fails disable the entry */
5062 		if (mac_addr == FAILURE)
5063 			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5064 		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5065 	}
5066 }
5067 
5068 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5069 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5070 {
5071 	int offset;
5072 	struct config_param *config = &sp->config;
5073 	/* restore unicast mac address */
5074 	for (offset = 0; offset < config->max_mac_addr; offset++)
5075 		do_s2io_prog_unicast(sp->dev,
5076 				     sp->def_mac_addr[offset].mac_addr);
5077 
5078 	/* restore multicast mac address */
5079 	for (offset = config->mc_start_offset;
5080 	     offset < config->max_mc_addr; offset++)
5081 		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5082 }
5083 
5084 /* add a multicast MAC address to CAM */
5085 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5086 {
5087 	int i;
5088 	u64 mac_addr = 0;
5089 	struct config_param *config = &sp->config;
5090 
5091 	for (i = 0; i < ETH_ALEN; i++) {
5092 		mac_addr <<= 8;
5093 		mac_addr |= addr[i];
5094 	}
5095 	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5096 		return SUCCESS;
5097 
5098 	/* check if the multicast mac already preset in CAM */
5099 	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5100 		u64 tmp64;
5101 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5102 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5103 			break;
5104 
5105 		if (tmp64 == mac_addr)
5106 			return SUCCESS;
5107 	}
5108 	if (i == config->max_mc_addr) {
5109 		DBG_PRINT(ERR_DBG,
5110 			  "CAM full no space left for multicast MAC\n");
5111 		return FAILURE;
5112 	}
5113 	/* Update the internal structure with this new mac address */
5114 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5115 
5116 	return do_s2io_add_mac(sp, mac_addr, i);
5117 }
5118 
5119 /* add MAC address to CAM */
5120 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5121 {
5122 	u64 val64;
5123 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5124 
5125 	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5126 	       &bar0->rmac_addr_data0_mem);
5127 
5128 	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5129 		RMAC_ADDR_CMD_MEM_OFFSET(off);
5130 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5131 
5132 	/* Wait till command completes */
5133 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5134 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5135 				  S2IO_BIT_RESET)) {
5136 		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5137 		return FAILURE;
5138 	}
5139 	return SUCCESS;
5140 }
5141 /* deletes a specified unicast/multicast mac entry from CAM */
5142 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5143 {
5144 	int offset;
5145 	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5146 	struct config_param *config = &sp->config;
5147 
5148 	for (offset = 1;
5149 	     offset < config->max_mc_addr; offset++) {
5150 		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5151 		if (tmp64 == addr) {
5152 			/* disable the entry by writing  0xffffffffffffULL */
5153 			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5154 				return FAILURE;
5155 			/* store the new mac list from CAM */
5156 			do_s2io_store_unicast_mc(sp);
5157 			return SUCCESS;
5158 		}
5159 	}
5160 	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5161 		  (unsigned long long)addr);
5162 	return FAILURE;
5163 }
5164 
5165 /* read mac entries from CAM */
5166 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5167 {
5168 	u64 tmp64 = 0xffffffffffff0000ULL, val64;
5169 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5170 
5171 	/* read mac addr */
5172 	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5173 		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5174 	writeq(val64, &bar0->rmac_addr_cmd_mem);
5175 
5176 	/* Wait till command completes */
5177 	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5178 				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5179 				  S2IO_BIT_RESET)) {
5180 		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5181 		return FAILURE;
5182 	}
5183 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5184 
5185 	return tmp64 >> 16;
5186 }
5187 
5188 /**
5189  * s2io_set_mac_addr - driver entry point
5190  */
5191 
5192 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5193 {
5194 	struct sockaddr *addr = p;
5195 
5196 	if (!is_valid_ether_addr(addr->sa_data))
5197 		return -EADDRNOTAVAIL;
5198 
5199 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5200 
5201 	/* store the MAC address in CAM */
5202 	return do_s2io_prog_unicast(dev, dev->dev_addr);
5203 }
5204 /**
5205  *  do_s2io_prog_unicast - Programs the Xframe mac address
5206  *  @dev : pointer to the device structure.
5207  *  @addr: a uchar pointer to the new mac address which is to be set.
5208  *  Description : This procedure will program the Xframe to receive
5209  *  frames with new Mac Address
5210  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5211  *  as defined in errno.h file on failure.
5212  */
5213 
5214 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5215 {
5216 	struct s2io_nic *sp = netdev_priv(dev);
5217 	register u64 mac_addr = 0, perm_addr = 0;
5218 	int i;
5219 	u64 tmp64;
5220 	struct config_param *config = &sp->config;
5221 
5222 	/*
5223 	 * Set the new MAC address as the new unicast filter and reflect this
5224 	 * change on the device address registered with the OS. It will be
5225 	 * at offset 0.
5226 	 */
5227 	for (i = 0; i < ETH_ALEN; i++) {
5228 		mac_addr <<= 8;
5229 		mac_addr |= addr[i];
5230 		perm_addr <<= 8;
5231 		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5232 	}
5233 
5234 	/* check if the dev_addr is different than perm_addr */
5235 	if (mac_addr == perm_addr)
5236 		return SUCCESS;
5237 
5238 	/* check if the mac already preset in CAM */
5239 	for (i = 1; i < config->max_mac_addr; i++) {
5240 		tmp64 = do_s2io_read_unicast_mc(sp, i);
5241 		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5242 			break;
5243 
5244 		if (tmp64 == mac_addr) {
5245 			DBG_PRINT(INFO_DBG,
5246 				  "MAC addr:0x%llx already present in CAM\n",
5247 				  (unsigned long long)mac_addr);
5248 			return SUCCESS;
5249 		}
5250 	}
5251 	if (i == config->max_mac_addr) {
5252 		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5253 		return FAILURE;
5254 	}
5255 	/* Update the internal structure with this new mac address */
5256 	do_s2io_copy_mac_addr(sp, i, mac_addr);
5257 
5258 	return do_s2io_add_mac(sp, mac_addr, i);
5259 }
5260 
5261 /**
5262  * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5263  * @sp : private member of the device structure, which is a pointer to the
5264  * s2io_nic structure.
5265  * @cmd: pointer to the structure with parameters given by ethtool to set
5266  * link information.
5267  * Description:
5268  * The function sets different link parameters provided by the user onto
5269  * the NIC.
5270  * Return value:
5271  * 0 on success.
5272  */
5273 
5274 static int
5275 s2io_ethtool_set_link_ksettings(struct net_device *dev,
5276 				const struct ethtool_link_ksettings *cmd)
5277 {
5278 	struct s2io_nic *sp = netdev_priv(dev);
5279 	if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5280 	    (cmd->base.speed != SPEED_10000) ||
5281 	    (cmd->base.duplex != DUPLEX_FULL))
5282 		return -EINVAL;
5283 	else {
5284 		s2io_close(sp->dev);
5285 		s2io_open(sp->dev);
5286 	}
5287 
5288 	return 0;
5289 }
5290 
5291 /**
5292  * s2io_ethtol_get_link_ksettings - Return link specific information.
5293  * @sp : private member of the device structure, pointer to the
5294  *      s2io_nic structure.
5295  * @cmd : pointer to the structure with parameters given by ethtool
5296  * to return link information.
5297  * Description:
5298  * Returns link specific information like speed, duplex etc.. to ethtool.
5299  * Return value :
5300  * return 0 on success.
5301  */
5302 
5303 static int
5304 s2io_ethtool_get_link_ksettings(struct net_device *dev,
5305 				struct ethtool_link_ksettings *cmd)
5306 {
5307 	struct s2io_nic *sp = netdev_priv(dev);
5308 
5309 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
5310 	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5311 	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5312 
5313 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5314 	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5315 	ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5316 
5317 	cmd->base.port = PORT_FIBRE;
5318 
5319 	if (netif_carrier_ok(sp->dev)) {
5320 		cmd->base.speed = SPEED_10000;
5321 		cmd->base.duplex = DUPLEX_FULL;
5322 	} else {
5323 		cmd->base.speed = SPEED_UNKNOWN;
5324 		cmd->base.duplex = DUPLEX_UNKNOWN;
5325 	}
5326 
5327 	cmd->base.autoneg = AUTONEG_DISABLE;
5328 	return 0;
5329 }
5330 
5331 /**
5332  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5333  * @sp : private member of the device structure, which is a pointer to the
5334  * s2io_nic structure.
5335  * @info : pointer to the structure with parameters given by ethtool to
5336  * return driver information.
5337  * Description:
5338  * Returns driver specefic information like name, version etc.. to ethtool.
5339  * Return value:
5340  *  void
5341  */
5342 
5343 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5344 				  struct ethtool_drvinfo *info)
5345 {
5346 	struct s2io_nic *sp = netdev_priv(dev);
5347 
5348 	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5349 	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5350 	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5351 }
5352 
5353 /**
5354  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5355  *  @sp: private member of the device structure, which is a pointer to the
5356  *  s2io_nic structure.
5357  *  @regs : pointer to the structure with parameters given by ethtool for
5358  *  dumping the registers.
5359  *  @reg_space: The input argument into which all the registers are dumped.
5360  *  Description:
5361  *  Dumps the entire register space of xFrame NIC into the user given
5362  *  buffer area.
5363  * Return value :
5364  * void .
5365  */
5366 
5367 static void s2io_ethtool_gregs(struct net_device *dev,
5368 			       struct ethtool_regs *regs, void *space)
5369 {
5370 	int i;
5371 	u64 reg;
5372 	u8 *reg_space = (u8 *)space;
5373 	struct s2io_nic *sp = netdev_priv(dev);
5374 
5375 	regs->len = XENA_REG_SPACE;
5376 	regs->version = sp->pdev->subsystem_device;
5377 
5378 	for (i = 0; i < regs->len; i += 8) {
5379 		reg = readq(sp->bar0 + i);
5380 		memcpy((reg_space + i), &reg, 8);
5381 	}
5382 }
5383 
5384 /*
5385  *  s2io_set_led - control NIC led
5386  */
5387 static void s2io_set_led(struct s2io_nic *sp, bool on)
5388 {
5389 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5390 	u16 subid = sp->pdev->subsystem_device;
5391 	u64 val64;
5392 
5393 	if ((sp->device_type == XFRAME_II_DEVICE) ||
5394 	    ((subid & 0xFF) >= 0x07)) {
5395 		val64 = readq(&bar0->gpio_control);
5396 		if (on)
5397 			val64 |= GPIO_CTRL_GPIO_0;
5398 		else
5399 			val64 &= ~GPIO_CTRL_GPIO_0;
5400 
5401 		writeq(val64, &bar0->gpio_control);
5402 	} else {
5403 		val64 = readq(&bar0->adapter_control);
5404 		if (on)
5405 			val64 |= ADAPTER_LED_ON;
5406 		else
5407 			val64 &= ~ADAPTER_LED_ON;
5408 
5409 		writeq(val64, &bar0->adapter_control);
5410 	}
5411 
5412 }
5413 
5414 /**
5415  * s2io_ethtool_set_led - To physically identify the nic on the system.
5416  * @dev : network device
5417  * @state: led setting
5418  *
5419  * Description: Used to physically identify the NIC on the system.
5420  * The Link LED will blink for a time specified by the user for
5421  * identification.
5422  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5423  * identification is possible only if it's link is up.
5424  */
5425 
5426 static int s2io_ethtool_set_led(struct net_device *dev,
5427 				enum ethtool_phys_id_state state)
5428 {
5429 	struct s2io_nic *sp = netdev_priv(dev);
5430 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5431 	u16 subid = sp->pdev->subsystem_device;
5432 
5433 	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5434 		u64 val64 = readq(&bar0->adapter_control);
5435 		if (!(val64 & ADAPTER_CNTL_EN)) {
5436 			pr_err("Adapter Link down, cannot blink LED\n");
5437 			return -EAGAIN;
5438 		}
5439 	}
5440 
5441 	switch (state) {
5442 	case ETHTOOL_ID_ACTIVE:
5443 		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5444 		return 1;	/* cycle on/off once per second */
5445 
5446 	case ETHTOOL_ID_ON:
5447 		s2io_set_led(sp, true);
5448 		break;
5449 
5450 	case ETHTOOL_ID_OFF:
5451 		s2io_set_led(sp, false);
5452 		break;
5453 
5454 	case ETHTOOL_ID_INACTIVE:
5455 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5456 			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5457 	}
5458 
5459 	return 0;
5460 }
5461 
5462 static void s2io_ethtool_gringparam(struct net_device *dev,
5463 				    struct ethtool_ringparam *ering)
5464 {
5465 	struct s2io_nic *sp = netdev_priv(dev);
5466 	int i, tx_desc_count = 0, rx_desc_count = 0;
5467 
5468 	if (sp->rxd_mode == RXD_MODE_1) {
5469 		ering->rx_max_pending = MAX_RX_DESC_1;
5470 		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5471 	} else {
5472 		ering->rx_max_pending = MAX_RX_DESC_2;
5473 		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5474 	}
5475 
5476 	ering->tx_max_pending = MAX_TX_DESC;
5477 
5478 	for (i = 0; i < sp->config.rx_ring_num; i++)
5479 		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5480 	ering->rx_pending = rx_desc_count;
5481 	ering->rx_jumbo_pending = rx_desc_count;
5482 
5483 	for (i = 0; i < sp->config.tx_fifo_num; i++)
5484 		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5485 	ering->tx_pending = tx_desc_count;
5486 	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5487 }
5488 
5489 /**
5490  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5491  * @sp : private member of the device structure, which is a pointer to the
5492  *	s2io_nic structure.
5493  * @ep : pointer to the structure with pause parameters given by ethtool.
5494  * Description:
5495  * Returns the Pause frame generation and reception capability of the NIC.
5496  * Return value:
5497  *  void
5498  */
5499 static void s2io_ethtool_getpause_data(struct net_device *dev,
5500 				       struct ethtool_pauseparam *ep)
5501 {
5502 	u64 val64;
5503 	struct s2io_nic *sp = netdev_priv(dev);
5504 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5505 
5506 	val64 = readq(&bar0->rmac_pause_cfg);
5507 	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5508 		ep->tx_pause = true;
5509 	if (val64 & RMAC_PAUSE_RX_ENABLE)
5510 		ep->rx_pause = true;
5511 	ep->autoneg = false;
5512 }
5513 
5514 /**
5515  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5516  * @sp : private member of the device structure, which is a pointer to the
5517  *      s2io_nic structure.
5518  * @ep : pointer to the structure with pause parameters given by ethtool.
5519  * Description:
5520  * It can be used to set or reset Pause frame generation or reception
5521  * support of the NIC.
5522  * Return value:
5523  * int, returns 0 on Success
5524  */
5525 
5526 static int s2io_ethtool_setpause_data(struct net_device *dev,
5527 				      struct ethtool_pauseparam *ep)
5528 {
5529 	u64 val64;
5530 	struct s2io_nic *sp = netdev_priv(dev);
5531 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5532 
5533 	val64 = readq(&bar0->rmac_pause_cfg);
5534 	if (ep->tx_pause)
5535 		val64 |= RMAC_PAUSE_GEN_ENABLE;
5536 	else
5537 		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5538 	if (ep->rx_pause)
5539 		val64 |= RMAC_PAUSE_RX_ENABLE;
5540 	else
5541 		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5542 	writeq(val64, &bar0->rmac_pause_cfg);
5543 	return 0;
5544 }
5545 
5546 /**
5547  * read_eeprom - reads 4 bytes of data from user given offset.
5548  * @sp : private member of the device structure, which is a pointer to the
5549  *      s2io_nic structure.
5550  * @off : offset at which the data must be written
5551  * @data : Its an output parameter where the data read at the given
5552  *	offset is stored.
5553  * Description:
5554  * Will read 4 bytes of data from the user given offset and return the
5555  * read data.
5556  * NOTE: Will allow to read only part of the EEPROM visible through the
5557  *   I2C bus.
5558  * Return value:
5559  *  -1 on failure and 0 on success.
5560  */
5561 
5562 #define S2IO_DEV_ID		5
5563 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5564 {
5565 	int ret = -1;
5566 	u32 exit_cnt = 0;
5567 	u64 val64;
5568 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5569 
5570 	if (sp->device_type == XFRAME_I_DEVICE) {
5571 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5572 			I2C_CONTROL_ADDR(off) |
5573 			I2C_CONTROL_BYTE_CNT(0x3) |
5574 			I2C_CONTROL_READ |
5575 			I2C_CONTROL_CNTL_START;
5576 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5577 
5578 		while (exit_cnt < 5) {
5579 			val64 = readq(&bar0->i2c_control);
5580 			if (I2C_CONTROL_CNTL_END(val64)) {
5581 				*data = I2C_CONTROL_GET_DATA(val64);
5582 				ret = 0;
5583 				break;
5584 			}
5585 			msleep(50);
5586 			exit_cnt++;
5587 		}
5588 	}
5589 
5590 	if (sp->device_type == XFRAME_II_DEVICE) {
5591 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5592 			SPI_CONTROL_BYTECNT(0x3) |
5593 			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5594 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5595 		val64 |= SPI_CONTROL_REQ;
5596 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5597 		while (exit_cnt < 5) {
5598 			val64 = readq(&bar0->spi_control);
5599 			if (val64 & SPI_CONTROL_NACK) {
5600 				ret = 1;
5601 				break;
5602 			} else if (val64 & SPI_CONTROL_DONE) {
5603 				*data = readq(&bar0->spi_data);
5604 				*data &= 0xffffff;
5605 				ret = 0;
5606 				break;
5607 			}
5608 			msleep(50);
5609 			exit_cnt++;
5610 		}
5611 	}
5612 	return ret;
5613 }
5614 
5615 /**
5616  *  write_eeprom - actually writes the relevant part of the data value.
5617  *  @sp : private member of the device structure, which is a pointer to the
5618  *       s2io_nic structure.
5619  *  @off : offset at which the data must be written
5620  *  @data : The data that is to be written
5621  *  @cnt : Number of bytes of the data that are actually to be written into
5622  *  the Eeprom. (max of 3)
5623  * Description:
5624  *  Actually writes the relevant part of the data value into the Eeprom
5625  *  through the I2C bus.
5626  * Return value:
5627  *  0 on success, -1 on failure.
5628  */
5629 
5630 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5631 {
5632 	int exit_cnt = 0, ret = -1;
5633 	u64 val64;
5634 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5635 
5636 	if (sp->device_type == XFRAME_I_DEVICE) {
5637 		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5638 			I2C_CONTROL_ADDR(off) |
5639 			I2C_CONTROL_BYTE_CNT(cnt) |
5640 			I2C_CONTROL_SET_DATA((u32)data) |
5641 			I2C_CONTROL_CNTL_START;
5642 		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5643 
5644 		while (exit_cnt < 5) {
5645 			val64 = readq(&bar0->i2c_control);
5646 			if (I2C_CONTROL_CNTL_END(val64)) {
5647 				if (!(val64 & I2C_CONTROL_NACK))
5648 					ret = 0;
5649 				break;
5650 			}
5651 			msleep(50);
5652 			exit_cnt++;
5653 		}
5654 	}
5655 
5656 	if (sp->device_type == XFRAME_II_DEVICE) {
5657 		int write_cnt = (cnt == 8) ? 0 : cnt;
5658 		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5659 
5660 		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5661 			SPI_CONTROL_BYTECNT(write_cnt) |
5662 			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5663 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5664 		val64 |= SPI_CONTROL_REQ;
5665 		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5666 		while (exit_cnt < 5) {
5667 			val64 = readq(&bar0->spi_control);
5668 			if (val64 & SPI_CONTROL_NACK) {
5669 				ret = 1;
5670 				break;
5671 			} else if (val64 & SPI_CONTROL_DONE) {
5672 				ret = 0;
5673 				break;
5674 			}
5675 			msleep(50);
5676 			exit_cnt++;
5677 		}
5678 	}
5679 	return ret;
5680 }
5681 static void s2io_vpd_read(struct s2io_nic *nic)
5682 {
5683 	u8 *vpd_data;
5684 	u8 data;
5685 	int i = 0, cnt, len, fail = 0;
5686 	int vpd_addr = 0x80;
5687 	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5688 
5689 	if (nic->device_type == XFRAME_II_DEVICE) {
5690 		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5691 		vpd_addr = 0x80;
5692 	} else {
5693 		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5694 		vpd_addr = 0x50;
5695 	}
5696 	strcpy(nic->serial_num, "NOT AVAILABLE");
5697 
5698 	vpd_data = kmalloc(256, GFP_KERNEL);
5699 	if (!vpd_data) {
5700 		swstats->mem_alloc_fail_cnt++;
5701 		return;
5702 	}
5703 	swstats->mem_allocated += 256;
5704 
5705 	for (i = 0; i < 256; i += 4) {
5706 		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5707 		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5708 		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5709 		for (cnt = 0; cnt < 5; cnt++) {
5710 			msleep(2);
5711 			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5712 			if (data == 0x80)
5713 				break;
5714 		}
5715 		if (cnt >= 5) {
5716 			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5717 			fail = 1;
5718 			break;
5719 		}
5720 		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5721 				      (u32 *)&vpd_data[i]);
5722 	}
5723 
5724 	if (!fail) {
5725 		/* read serial number of adapter */
5726 		for (cnt = 0; cnt < 252; cnt++) {
5727 			if ((vpd_data[cnt] == 'S') &&
5728 			    (vpd_data[cnt+1] == 'N')) {
5729 				len = vpd_data[cnt+2];
5730 				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5731 					memcpy(nic->serial_num,
5732 					       &vpd_data[cnt + 3],
5733 					       len);
5734 					memset(nic->serial_num+len,
5735 					       0,
5736 					       VPD_STRING_LEN-len);
5737 					break;
5738 				}
5739 			}
5740 		}
5741 	}
5742 
5743 	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5744 		len = vpd_data[1];
5745 		memcpy(nic->product_name, &vpd_data[3], len);
5746 		nic->product_name[len] = 0;
5747 	}
5748 	kfree(vpd_data);
5749 	swstats->mem_freed += 256;
5750 }
5751 
5752 /**
5753  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5754  *  @sp : private member of the device structure, which is a pointer to the
5755  *  s2io_nic structure.
5756  *  @eeprom : pointer to the user level structure provided by ethtool,
5757  *  containing all relevant information.
5758  *  @data_buf : user defined value to be written into Eeprom.
5759  *  Description: Reads the values stored in the Eeprom at given offset
5760  *  for a given length. Stores these values int the input argument data
5761  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5762  *  Return value:
5763  *  int  0 on success
5764  */
5765 
5766 static int s2io_ethtool_geeprom(struct net_device *dev,
5767 				struct ethtool_eeprom *eeprom, u8 * data_buf)
5768 {
5769 	u32 i, valid;
5770 	u64 data;
5771 	struct s2io_nic *sp = netdev_priv(dev);
5772 
5773 	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5774 
5775 	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5776 		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5777 
5778 	for (i = 0; i < eeprom->len; i += 4) {
5779 		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5780 			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5781 			return -EFAULT;
5782 		}
5783 		valid = INV(data);
5784 		memcpy((data_buf + i), &valid, 4);
5785 	}
5786 	return 0;
5787 }
5788 
5789 /**
5790  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5791  *  @sp : private member of the device structure, which is a pointer to the
5792  *  s2io_nic structure.
5793  *  @eeprom : pointer to the user level structure provided by ethtool,
5794  *  containing all relevant information.
5795  *  @data_buf ; user defined value to be written into Eeprom.
5796  *  Description:
5797  *  Tries to write the user provided value in the Eeprom, at the offset
5798  *  given by the user.
5799  *  Return value:
5800  *  0 on success, -EFAULT on failure.
5801  */
5802 
5803 static int s2io_ethtool_seeprom(struct net_device *dev,
5804 				struct ethtool_eeprom *eeprom,
5805 				u8 *data_buf)
5806 {
5807 	int len = eeprom->len, cnt = 0;
5808 	u64 valid = 0, data;
5809 	struct s2io_nic *sp = netdev_priv(dev);
5810 
5811 	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5812 		DBG_PRINT(ERR_DBG,
5813 			  "ETHTOOL_WRITE_EEPROM Err: "
5814 			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5815 			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5816 			  eeprom->magic);
5817 		return -EFAULT;
5818 	}
5819 
5820 	while (len) {
5821 		data = (u32)data_buf[cnt] & 0x000000FF;
5822 		if (data)
5823 			valid = (u32)(data << 24);
5824 		else
5825 			valid = data;
5826 
5827 		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5828 			DBG_PRINT(ERR_DBG,
5829 				  "ETHTOOL_WRITE_EEPROM Err: "
5830 				  "Cannot write into the specified offset\n");
5831 			return -EFAULT;
5832 		}
5833 		cnt++;
5834 		len--;
5835 	}
5836 
5837 	return 0;
5838 }
5839 
5840 /**
5841  * s2io_register_test - reads and writes into all clock domains.
5842  * @sp : private member of the device structure, which is a pointer to the
5843  * s2io_nic structure.
5844  * @data : variable that returns the result of each of the test conducted b
5845  * by the driver.
5846  * Description:
5847  * Read and write into all clock domains. The NIC has 3 clock domains,
5848  * see that registers in all the three regions are accessible.
5849  * Return value:
5850  * 0 on success.
5851  */
5852 
5853 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5854 {
5855 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5856 	u64 val64 = 0, exp_val;
5857 	int fail = 0;
5858 
5859 	val64 = readq(&bar0->pif_rd_swapper_fb);
5860 	if (val64 != 0x123456789abcdefULL) {
5861 		fail = 1;
5862 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5863 	}
5864 
5865 	val64 = readq(&bar0->rmac_pause_cfg);
5866 	if (val64 != 0xc000ffff00000000ULL) {
5867 		fail = 1;
5868 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5869 	}
5870 
5871 	val64 = readq(&bar0->rx_queue_cfg);
5872 	if (sp->device_type == XFRAME_II_DEVICE)
5873 		exp_val = 0x0404040404040404ULL;
5874 	else
5875 		exp_val = 0x0808080808080808ULL;
5876 	if (val64 != exp_val) {
5877 		fail = 1;
5878 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5879 	}
5880 
5881 	val64 = readq(&bar0->xgxs_efifo_cfg);
5882 	if (val64 != 0x000000001923141EULL) {
5883 		fail = 1;
5884 		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5885 	}
5886 
5887 	val64 = 0x5A5A5A5A5A5A5A5AULL;
5888 	writeq(val64, &bar0->xmsi_data);
5889 	val64 = readq(&bar0->xmsi_data);
5890 	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5891 		fail = 1;
5892 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5893 	}
5894 
5895 	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5896 	writeq(val64, &bar0->xmsi_data);
5897 	val64 = readq(&bar0->xmsi_data);
5898 	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5899 		fail = 1;
5900 		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5901 	}
5902 
5903 	*data = fail;
5904 	return fail;
5905 }
5906 
5907 /**
5908  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5909  * @sp : private member of the device structure, which is a pointer to the
5910  * s2io_nic structure.
5911  * @data:variable that returns the result of each of the test conducted by
5912  * the driver.
5913  * Description:
5914  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5915  * register.
5916  * Return value:
5917  * 0 on success.
5918  */
5919 
5920 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5921 {
5922 	int fail = 0;
5923 	u64 ret_data, org_4F0, org_7F0;
5924 	u8 saved_4F0 = 0, saved_7F0 = 0;
5925 	struct net_device *dev = sp->dev;
5926 
5927 	/* Test Write Error at offset 0 */
5928 	/* Note that SPI interface allows write access to all areas
5929 	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5930 	 */
5931 	if (sp->device_type == XFRAME_I_DEVICE)
5932 		if (!write_eeprom(sp, 0, 0, 3))
5933 			fail = 1;
5934 
5935 	/* Save current values at offsets 0x4F0 and 0x7F0 */
5936 	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5937 		saved_4F0 = 1;
5938 	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5939 		saved_7F0 = 1;
5940 
5941 	/* Test Write at offset 4f0 */
5942 	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5943 		fail = 1;
5944 	if (read_eeprom(sp, 0x4F0, &ret_data))
5945 		fail = 1;
5946 
5947 	if (ret_data != 0x012345) {
5948 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5949 			  "Data written %llx Data read %llx\n",
5950 			  dev->name, (unsigned long long)0x12345,
5951 			  (unsigned long long)ret_data);
5952 		fail = 1;
5953 	}
5954 
5955 	/* Reset the EEPROM data go FFFF */
5956 	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5957 
5958 	/* Test Write Request Error at offset 0x7c */
5959 	if (sp->device_type == XFRAME_I_DEVICE)
5960 		if (!write_eeprom(sp, 0x07C, 0, 3))
5961 			fail = 1;
5962 
5963 	/* Test Write Request at offset 0x7f0 */
5964 	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5965 		fail = 1;
5966 	if (read_eeprom(sp, 0x7F0, &ret_data))
5967 		fail = 1;
5968 
5969 	if (ret_data != 0x012345) {
5970 		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5971 			  "Data written %llx Data read %llx\n",
5972 			  dev->name, (unsigned long long)0x12345,
5973 			  (unsigned long long)ret_data);
5974 		fail = 1;
5975 	}
5976 
5977 	/* Reset the EEPROM data go FFFF */
5978 	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5979 
5980 	if (sp->device_type == XFRAME_I_DEVICE) {
5981 		/* Test Write Error at offset 0x80 */
5982 		if (!write_eeprom(sp, 0x080, 0, 3))
5983 			fail = 1;
5984 
5985 		/* Test Write Error at offset 0xfc */
5986 		if (!write_eeprom(sp, 0x0FC, 0, 3))
5987 			fail = 1;
5988 
5989 		/* Test Write Error at offset 0x100 */
5990 		if (!write_eeprom(sp, 0x100, 0, 3))
5991 			fail = 1;
5992 
5993 		/* Test Write Error at offset 4ec */
5994 		if (!write_eeprom(sp, 0x4EC, 0, 3))
5995 			fail = 1;
5996 	}
5997 
5998 	/* Restore values at offsets 0x4F0 and 0x7F0 */
5999 	if (saved_4F0)
6000 		write_eeprom(sp, 0x4F0, org_4F0, 3);
6001 	if (saved_7F0)
6002 		write_eeprom(sp, 0x7F0, org_7F0, 3);
6003 
6004 	*data = fail;
6005 	return fail;
6006 }
6007 
6008 /**
6009  * s2io_bist_test - invokes the MemBist test of the card .
6010  * @sp : private member of the device structure, which is a pointer to the
6011  * s2io_nic structure.
6012  * @data:variable that returns the result of each of the test conducted by
6013  * the driver.
6014  * Description:
6015  * This invokes the MemBist test of the card. We give around
6016  * 2 secs time for the Test to complete. If it's still not complete
6017  * within this peiod, we consider that the test failed.
6018  * Return value:
6019  * 0 on success and -1 on failure.
6020  */
6021 
6022 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6023 {
6024 	u8 bist = 0;
6025 	int cnt = 0, ret = -1;
6026 
6027 	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6028 	bist |= PCI_BIST_START;
6029 	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6030 
6031 	while (cnt < 20) {
6032 		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6033 		if (!(bist & PCI_BIST_START)) {
6034 			*data = (bist & PCI_BIST_CODE_MASK);
6035 			ret = 0;
6036 			break;
6037 		}
6038 		msleep(100);
6039 		cnt++;
6040 	}
6041 
6042 	return ret;
6043 }
6044 
6045 /**
6046  * s2io_link_test - verifies the link state of the nic
6047  * @sp ; private member of the device structure, which is a pointer to the
6048  * s2io_nic structure.
6049  * @data: variable that returns the result of each of the test conducted by
6050  * the driver.
6051  * Description:
6052  * The function verifies the link state of the NIC and updates the input
6053  * argument 'data' appropriately.
6054  * Return value:
6055  * 0 on success.
6056  */
6057 
6058 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6059 {
6060 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6061 	u64 val64;
6062 
6063 	val64 = readq(&bar0->adapter_status);
6064 	if (!(LINK_IS_UP(val64)))
6065 		*data = 1;
6066 	else
6067 		*data = 0;
6068 
6069 	return *data;
6070 }
6071 
6072 /**
6073  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6074  * @sp: private member of the device structure, which is a pointer to the
6075  * s2io_nic structure.
6076  * @data: variable that returns the result of each of the test
6077  * conducted by the driver.
6078  * Description:
6079  *  This is one of the offline test that tests the read and write
6080  *  access to the RldRam chip on the NIC.
6081  * Return value:
6082  *  0 on success.
6083  */
6084 
6085 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6086 {
6087 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6088 	u64 val64;
6089 	int cnt, iteration = 0, test_fail = 0;
6090 
6091 	val64 = readq(&bar0->adapter_control);
6092 	val64 &= ~ADAPTER_ECC_EN;
6093 	writeq(val64, &bar0->adapter_control);
6094 
6095 	val64 = readq(&bar0->mc_rldram_test_ctrl);
6096 	val64 |= MC_RLDRAM_TEST_MODE;
6097 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6098 
6099 	val64 = readq(&bar0->mc_rldram_mrs);
6100 	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6101 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6102 
6103 	val64 |= MC_RLDRAM_MRS_ENABLE;
6104 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6105 
6106 	while (iteration < 2) {
6107 		val64 = 0x55555555aaaa0000ULL;
6108 		if (iteration == 1)
6109 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6110 		writeq(val64, &bar0->mc_rldram_test_d0);
6111 
6112 		val64 = 0xaaaa5a5555550000ULL;
6113 		if (iteration == 1)
6114 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6115 		writeq(val64, &bar0->mc_rldram_test_d1);
6116 
6117 		val64 = 0x55aaaaaaaa5a0000ULL;
6118 		if (iteration == 1)
6119 			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6120 		writeq(val64, &bar0->mc_rldram_test_d2);
6121 
6122 		val64 = (u64) (0x0000003ffffe0100ULL);
6123 		writeq(val64, &bar0->mc_rldram_test_add);
6124 
6125 		val64 = MC_RLDRAM_TEST_MODE |
6126 			MC_RLDRAM_TEST_WRITE |
6127 			MC_RLDRAM_TEST_GO;
6128 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6129 
6130 		for (cnt = 0; cnt < 5; cnt++) {
6131 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6132 			if (val64 & MC_RLDRAM_TEST_DONE)
6133 				break;
6134 			msleep(200);
6135 		}
6136 
6137 		if (cnt == 5)
6138 			break;
6139 
6140 		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6141 		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6142 
6143 		for (cnt = 0; cnt < 5; cnt++) {
6144 			val64 = readq(&bar0->mc_rldram_test_ctrl);
6145 			if (val64 & MC_RLDRAM_TEST_DONE)
6146 				break;
6147 			msleep(500);
6148 		}
6149 
6150 		if (cnt == 5)
6151 			break;
6152 
6153 		val64 = readq(&bar0->mc_rldram_test_ctrl);
6154 		if (!(val64 & MC_RLDRAM_TEST_PASS))
6155 			test_fail = 1;
6156 
6157 		iteration++;
6158 	}
6159 
6160 	*data = test_fail;
6161 
6162 	/* Bring the adapter out of test mode */
6163 	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6164 
6165 	return test_fail;
6166 }
6167 
6168 /**
6169  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6170  *  @sp : private member of the device structure, which is a pointer to the
6171  *  s2io_nic structure.
6172  *  @ethtest : pointer to a ethtool command specific structure that will be
6173  *  returned to the user.
6174  *  @data : variable that returns the result of each of the test
6175  * conducted by the driver.
6176  * Description:
6177  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6178  *  the health of the card.
6179  * Return value:
6180  *  void
6181  */
6182 
6183 static void s2io_ethtool_test(struct net_device *dev,
6184 			      struct ethtool_test *ethtest,
6185 			      uint64_t *data)
6186 {
6187 	struct s2io_nic *sp = netdev_priv(dev);
6188 	int orig_state = netif_running(sp->dev);
6189 
6190 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6191 		/* Offline Tests. */
6192 		if (orig_state)
6193 			s2io_close(sp->dev);
6194 
6195 		if (s2io_register_test(sp, &data[0]))
6196 			ethtest->flags |= ETH_TEST_FL_FAILED;
6197 
6198 		s2io_reset(sp);
6199 
6200 		if (s2io_rldram_test(sp, &data[3]))
6201 			ethtest->flags |= ETH_TEST_FL_FAILED;
6202 
6203 		s2io_reset(sp);
6204 
6205 		if (s2io_eeprom_test(sp, &data[1]))
6206 			ethtest->flags |= ETH_TEST_FL_FAILED;
6207 
6208 		if (s2io_bist_test(sp, &data[4]))
6209 			ethtest->flags |= ETH_TEST_FL_FAILED;
6210 
6211 		if (orig_state)
6212 			s2io_open(sp->dev);
6213 
6214 		data[2] = 0;
6215 	} else {
6216 		/* Online Tests. */
6217 		if (!orig_state) {
6218 			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6219 				  dev->name);
6220 			data[0] = -1;
6221 			data[1] = -1;
6222 			data[2] = -1;
6223 			data[3] = -1;
6224 			data[4] = -1;
6225 		}
6226 
6227 		if (s2io_link_test(sp, &data[2]))
6228 			ethtest->flags |= ETH_TEST_FL_FAILED;
6229 
6230 		data[0] = 0;
6231 		data[1] = 0;
6232 		data[3] = 0;
6233 		data[4] = 0;
6234 	}
6235 }
6236 
6237 static void s2io_get_ethtool_stats(struct net_device *dev,
6238 				   struct ethtool_stats *estats,
6239 				   u64 *tmp_stats)
6240 {
6241 	int i = 0, k;
6242 	struct s2io_nic *sp = netdev_priv(dev);
6243 	struct stat_block *stats = sp->mac_control.stats_info;
6244 	struct swStat *swstats = &stats->sw_stat;
6245 	struct xpakStat *xstats = &stats->xpak_stat;
6246 
6247 	s2io_updt_stats(sp);
6248 	tmp_stats[i++] =
6249 		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6250 		le32_to_cpu(stats->tmac_frms);
6251 	tmp_stats[i++] =
6252 		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6253 		le32_to_cpu(stats->tmac_data_octets);
6254 	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6255 	tmp_stats[i++] =
6256 		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6257 		le32_to_cpu(stats->tmac_mcst_frms);
6258 	tmp_stats[i++] =
6259 		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6260 		le32_to_cpu(stats->tmac_bcst_frms);
6261 	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6262 	tmp_stats[i++] =
6263 		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6264 		le32_to_cpu(stats->tmac_ttl_octets);
6265 	tmp_stats[i++] =
6266 		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6267 		le32_to_cpu(stats->tmac_ucst_frms);
6268 	tmp_stats[i++] =
6269 		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6270 		le32_to_cpu(stats->tmac_nucst_frms);
6271 	tmp_stats[i++] =
6272 		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6273 		le32_to_cpu(stats->tmac_any_err_frms);
6274 	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6275 	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6276 	tmp_stats[i++] =
6277 		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6278 		le32_to_cpu(stats->tmac_vld_ip);
6279 	tmp_stats[i++] =
6280 		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6281 		le32_to_cpu(stats->tmac_drop_ip);
6282 	tmp_stats[i++] =
6283 		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6284 		le32_to_cpu(stats->tmac_icmp);
6285 	tmp_stats[i++] =
6286 		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6287 		le32_to_cpu(stats->tmac_rst_tcp);
6288 	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6289 	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6290 		le32_to_cpu(stats->tmac_udp);
6291 	tmp_stats[i++] =
6292 		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6293 		le32_to_cpu(stats->rmac_vld_frms);
6294 	tmp_stats[i++] =
6295 		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6296 		le32_to_cpu(stats->rmac_data_octets);
6297 	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6298 	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6299 	tmp_stats[i++] =
6300 		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6301 		le32_to_cpu(stats->rmac_vld_mcst_frms);
6302 	tmp_stats[i++] =
6303 		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6304 		le32_to_cpu(stats->rmac_vld_bcst_frms);
6305 	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6306 	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6307 	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6308 	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6309 	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6310 	tmp_stats[i++] =
6311 		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6312 		le32_to_cpu(stats->rmac_ttl_octets);
6313 	tmp_stats[i++] =
6314 		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6315 		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6316 	tmp_stats[i++] =
6317 		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6318 		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6319 	tmp_stats[i++] =
6320 		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6321 		le32_to_cpu(stats->rmac_discarded_frms);
6322 	tmp_stats[i++] =
6323 		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6324 		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6325 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6326 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6327 	tmp_stats[i++] =
6328 		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6329 		le32_to_cpu(stats->rmac_usized_frms);
6330 	tmp_stats[i++] =
6331 		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6332 		le32_to_cpu(stats->rmac_osized_frms);
6333 	tmp_stats[i++] =
6334 		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6335 		le32_to_cpu(stats->rmac_frag_frms);
6336 	tmp_stats[i++] =
6337 		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6338 		le32_to_cpu(stats->rmac_jabber_frms);
6339 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6340 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6341 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6342 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6343 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6344 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6345 	tmp_stats[i++] =
6346 		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6347 		le32_to_cpu(stats->rmac_ip);
6348 	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6349 	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6350 	tmp_stats[i++] =
6351 		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6352 		le32_to_cpu(stats->rmac_drop_ip);
6353 	tmp_stats[i++] =
6354 		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6355 		le32_to_cpu(stats->rmac_icmp);
6356 	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6357 	tmp_stats[i++] =
6358 		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6359 		le32_to_cpu(stats->rmac_udp);
6360 	tmp_stats[i++] =
6361 		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6362 		le32_to_cpu(stats->rmac_err_drp_udp);
6363 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6364 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6365 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6366 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6367 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6368 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6369 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6370 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6371 	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6372 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6373 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6374 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6375 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6376 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6377 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6378 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6379 	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6380 	tmp_stats[i++] =
6381 		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6382 		le32_to_cpu(stats->rmac_pause_cnt);
6383 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6384 	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6385 	tmp_stats[i++] =
6386 		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6387 		le32_to_cpu(stats->rmac_accepted_ip);
6388 	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6389 	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6390 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6391 	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6392 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6393 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6394 	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6395 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6396 	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6397 	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6398 	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6399 	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6400 	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6401 	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6402 	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6403 	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6404 	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6405 	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6406 	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6407 
6408 	/* Enhanced statistics exist only for Hercules */
6409 	if (sp->device_type == XFRAME_II_DEVICE) {
6410 		tmp_stats[i++] =
6411 			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6412 		tmp_stats[i++] =
6413 			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6414 		tmp_stats[i++] =
6415 			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6416 		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6417 		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6418 		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6419 		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6420 		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6421 		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6422 		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6423 		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6424 		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6425 		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6426 		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6427 		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6428 		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6429 	}
6430 
6431 	tmp_stats[i++] = 0;
6432 	tmp_stats[i++] = swstats->single_ecc_errs;
6433 	tmp_stats[i++] = swstats->double_ecc_errs;
6434 	tmp_stats[i++] = swstats->parity_err_cnt;
6435 	tmp_stats[i++] = swstats->serious_err_cnt;
6436 	tmp_stats[i++] = swstats->soft_reset_cnt;
6437 	tmp_stats[i++] = swstats->fifo_full_cnt;
6438 	for (k = 0; k < MAX_RX_RINGS; k++)
6439 		tmp_stats[i++] = swstats->ring_full_cnt[k];
6440 	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6441 	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6442 	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6443 	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6444 	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6445 	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6446 	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6447 	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6448 	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6449 	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6450 	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6451 	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6452 	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6453 	tmp_stats[i++] = swstats->sending_both;
6454 	tmp_stats[i++] = swstats->outof_sequence_pkts;
6455 	tmp_stats[i++] = swstats->flush_max_pkts;
6456 	if (swstats->num_aggregations) {
6457 		u64 tmp = swstats->sum_avg_pkts_aggregated;
6458 		int count = 0;
6459 		/*
6460 		 * Since 64-bit divide does not work on all platforms,
6461 		 * do repeated subtraction.
6462 		 */
6463 		while (tmp >= swstats->num_aggregations) {
6464 			tmp -= swstats->num_aggregations;
6465 			count++;
6466 		}
6467 		tmp_stats[i++] = count;
6468 	} else
6469 		tmp_stats[i++] = 0;
6470 	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6471 	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6472 	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6473 	tmp_stats[i++] = swstats->mem_allocated;
6474 	tmp_stats[i++] = swstats->mem_freed;
6475 	tmp_stats[i++] = swstats->link_up_cnt;
6476 	tmp_stats[i++] = swstats->link_down_cnt;
6477 	tmp_stats[i++] = swstats->link_up_time;
6478 	tmp_stats[i++] = swstats->link_down_time;
6479 
6480 	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6481 	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6482 	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6483 	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6484 	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6485 
6486 	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6487 	tmp_stats[i++] = swstats->rx_abort_cnt;
6488 	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6489 	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6490 	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6491 	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6492 	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6493 	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6494 	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6495 	tmp_stats[i++] = swstats->tda_err_cnt;
6496 	tmp_stats[i++] = swstats->pfc_err_cnt;
6497 	tmp_stats[i++] = swstats->pcc_err_cnt;
6498 	tmp_stats[i++] = swstats->tti_err_cnt;
6499 	tmp_stats[i++] = swstats->tpa_err_cnt;
6500 	tmp_stats[i++] = swstats->sm_err_cnt;
6501 	tmp_stats[i++] = swstats->lso_err_cnt;
6502 	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6503 	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6504 	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6505 	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6506 	tmp_stats[i++] = swstats->rc_err_cnt;
6507 	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6508 	tmp_stats[i++] = swstats->rpa_err_cnt;
6509 	tmp_stats[i++] = swstats->rda_err_cnt;
6510 	tmp_stats[i++] = swstats->rti_err_cnt;
6511 	tmp_stats[i++] = swstats->mc_err_cnt;
6512 }
6513 
6514 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6515 {
6516 	return XENA_REG_SPACE;
6517 }
6518 
6519 
6520 static int s2io_get_eeprom_len(struct net_device *dev)
6521 {
6522 	return XENA_EEPROM_SPACE;
6523 }
6524 
6525 static int s2io_get_sset_count(struct net_device *dev, int sset)
6526 {
6527 	struct s2io_nic *sp = netdev_priv(dev);
6528 
6529 	switch (sset) {
6530 	case ETH_SS_TEST:
6531 		return S2IO_TEST_LEN;
6532 	case ETH_SS_STATS:
6533 		switch (sp->device_type) {
6534 		case XFRAME_I_DEVICE:
6535 			return XFRAME_I_STAT_LEN;
6536 		case XFRAME_II_DEVICE:
6537 			return XFRAME_II_STAT_LEN;
6538 		default:
6539 			return 0;
6540 		}
6541 	default:
6542 		return -EOPNOTSUPP;
6543 	}
6544 }
6545 
6546 static void s2io_ethtool_get_strings(struct net_device *dev,
6547 				     u32 stringset, u8 *data)
6548 {
6549 	int stat_size = 0;
6550 	struct s2io_nic *sp = netdev_priv(dev);
6551 
6552 	switch (stringset) {
6553 	case ETH_SS_TEST:
6554 		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6555 		break;
6556 	case ETH_SS_STATS:
6557 		stat_size = sizeof(ethtool_xena_stats_keys);
6558 		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6559 		if (sp->device_type == XFRAME_II_DEVICE) {
6560 			memcpy(data + stat_size,
6561 			       &ethtool_enhanced_stats_keys,
6562 			       sizeof(ethtool_enhanced_stats_keys));
6563 			stat_size += sizeof(ethtool_enhanced_stats_keys);
6564 		}
6565 
6566 		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6567 		       sizeof(ethtool_driver_stats_keys));
6568 	}
6569 }
6570 
6571 static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6572 {
6573 	struct s2io_nic *sp = netdev_priv(dev);
6574 	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6575 
6576 	if (changed && netif_running(dev)) {
6577 		int rc;
6578 
6579 		s2io_stop_all_tx_queue(sp);
6580 		s2io_card_down(sp);
6581 		dev->features = features;
6582 		rc = s2io_card_up(sp);
6583 		if (rc)
6584 			s2io_reset(sp);
6585 		else
6586 			s2io_start_all_tx_queue(sp);
6587 
6588 		return rc ? rc : 1;
6589 	}
6590 
6591 	return 0;
6592 }
6593 
6594 static const struct ethtool_ops netdev_ethtool_ops = {
6595 	.get_drvinfo = s2io_ethtool_gdrvinfo,
6596 	.get_regs_len = s2io_ethtool_get_regs_len,
6597 	.get_regs = s2io_ethtool_gregs,
6598 	.get_link = ethtool_op_get_link,
6599 	.get_eeprom_len = s2io_get_eeprom_len,
6600 	.get_eeprom = s2io_ethtool_geeprom,
6601 	.set_eeprom = s2io_ethtool_seeprom,
6602 	.get_ringparam = s2io_ethtool_gringparam,
6603 	.get_pauseparam = s2io_ethtool_getpause_data,
6604 	.set_pauseparam = s2io_ethtool_setpause_data,
6605 	.self_test = s2io_ethtool_test,
6606 	.get_strings = s2io_ethtool_get_strings,
6607 	.set_phys_id = s2io_ethtool_set_led,
6608 	.get_ethtool_stats = s2io_get_ethtool_stats,
6609 	.get_sset_count = s2io_get_sset_count,
6610 	.get_link_ksettings = s2io_ethtool_get_link_ksettings,
6611 	.set_link_ksettings = s2io_ethtool_set_link_ksettings,
6612 };
6613 
6614 /**
6615  *  s2io_ioctl - Entry point for the Ioctl
6616  *  @dev :  Device pointer.
6617  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6618  *  a proprietary structure used to pass information to the driver.
6619  *  @cmd :  This is used to distinguish between the different commands that
6620  *  can be passed to the IOCTL functions.
6621  *  Description:
6622  *  Currently there are no special functionality supported in IOCTL, hence
6623  *  function always return EOPNOTSUPPORTED
6624  */
6625 
6626 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6627 {
6628 	return -EOPNOTSUPP;
6629 }
6630 
6631 /**
6632  *  s2io_change_mtu - entry point to change MTU size for the device.
6633  *   @dev : device pointer.
6634  *   @new_mtu : the new MTU size for the device.
6635  *   Description: A driver entry point to change MTU size for the device.
6636  *   Before changing the MTU the device must be stopped.
6637  *  Return value:
6638  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6639  *   file on failure.
6640  */
6641 
6642 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6643 {
6644 	struct s2io_nic *sp = netdev_priv(dev);
6645 	int ret = 0;
6646 
6647 	dev->mtu = new_mtu;
6648 	if (netif_running(dev)) {
6649 		s2io_stop_all_tx_queue(sp);
6650 		s2io_card_down(sp);
6651 		ret = s2io_card_up(sp);
6652 		if (ret) {
6653 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6654 				  __func__);
6655 			return ret;
6656 		}
6657 		s2io_wake_all_tx_queue(sp);
6658 	} else { /* Device is down */
6659 		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6660 		u64 val64 = new_mtu;
6661 
6662 		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6663 	}
6664 
6665 	return ret;
6666 }
6667 
6668 /**
6669  * s2io_set_link - Set the LInk status
6670  * @data: long pointer to device private structue
6671  * Description: Sets the link status for the adapter
6672  */
6673 
6674 static void s2io_set_link(struct work_struct *work)
6675 {
6676 	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6677 					    set_link_task);
6678 	struct net_device *dev = nic->dev;
6679 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6680 	register u64 val64;
6681 	u16 subid;
6682 
6683 	rtnl_lock();
6684 
6685 	if (!netif_running(dev))
6686 		goto out_unlock;
6687 
6688 	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6689 		/* The card is being reset, no point doing anything */
6690 		goto out_unlock;
6691 	}
6692 
6693 	subid = nic->pdev->subsystem_device;
6694 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6695 		/*
6696 		 * Allow a small delay for the NICs self initiated
6697 		 * cleanup to complete.
6698 		 */
6699 		msleep(100);
6700 	}
6701 
6702 	val64 = readq(&bar0->adapter_status);
6703 	if (LINK_IS_UP(val64)) {
6704 		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6705 			if (verify_xena_quiescence(nic)) {
6706 				val64 = readq(&bar0->adapter_control);
6707 				val64 |= ADAPTER_CNTL_EN;
6708 				writeq(val64, &bar0->adapter_control);
6709 				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6710 					    nic->device_type, subid)) {
6711 					val64 = readq(&bar0->gpio_control);
6712 					val64 |= GPIO_CTRL_GPIO_0;
6713 					writeq(val64, &bar0->gpio_control);
6714 					val64 = readq(&bar0->gpio_control);
6715 				} else {
6716 					val64 |= ADAPTER_LED_ON;
6717 					writeq(val64, &bar0->adapter_control);
6718 				}
6719 				nic->device_enabled_once = true;
6720 			} else {
6721 				DBG_PRINT(ERR_DBG,
6722 					  "%s: Error: device is not Quiescent\n",
6723 					  dev->name);
6724 				s2io_stop_all_tx_queue(nic);
6725 			}
6726 		}
6727 		val64 = readq(&bar0->adapter_control);
6728 		val64 |= ADAPTER_LED_ON;
6729 		writeq(val64, &bar0->adapter_control);
6730 		s2io_link(nic, LINK_UP);
6731 	} else {
6732 		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6733 						      subid)) {
6734 			val64 = readq(&bar0->gpio_control);
6735 			val64 &= ~GPIO_CTRL_GPIO_0;
6736 			writeq(val64, &bar0->gpio_control);
6737 			val64 = readq(&bar0->gpio_control);
6738 		}
6739 		/* turn off LED */
6740 		val64 = readq(&bar0->adapter_control);
6741 		val64 = val64 & (~ADAPTER_LED_ON);
6742 		writeq(val64, &bar0->adapter_control);
6743 		s2io_link(nic, LINK_DOWN);
6744 	}
6745 	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6746 
6747 out_unlock:
6748 	rtnl_unlock();
6749 }
6750 
6751 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6752 				  struct buffAdd *ba,
6753 				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6754 				  u64 *temp2, int size)
6755 {
6756 	struct net_device *dev = sp->dev;
6757 	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6758 
6759 	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6760 		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6761 		/* allocate skb */
6762 		if (*skb) {
6763 			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6764 			/*
6765 			 * As Rx frame are not going to be processed,
6766 			 * using same mapped address for the Rxd
6767 			 * buffer pointer
6768 			 */
6769 			rxdp1->Buffer0_ptr = *temp0;
6770 		} else {
6771 			*skb = netdev_alloc_skb(dev, size);
6772 			if (!(*skb)) {
6773 				DBG_PRINT(INFO_DBG,
6774 					  "%s: Out of memory to allocate %s\n",
6775 					  dev->name, "1 buf mode SKBs");
6776 				stats->mem_alloc_fail_cnt++;
6777 				return -ENOMEM ;
6778 			}
6779 			stats->mem_allocated += (*skb)->truesize;
6780 			/* storing the mapped addr in a temp variable
6781 			 * such it will be used for next rxd whose
6782 			 * Host Control is NULL
6783 			 */
6784 			rxdp1->Buffer0_ptr = *temp0 =
6785 				pci_map_single(sp->pdev, (*skb)->data,
6786 					       size - NET_IP_ALIGN,
6787 					       PCI_DMA_FROMDEVICE);
6788 			if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6789 				goto memalloc_failed;
6790 			rxdp->Host_Control = (unsigned long) (*skb);
6791 		}
6792 	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6793 		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6794 		/* Two buffer Mode */
6795 		if (*skb) {
6796 			rxdp3->Buffer2_ptr = *temp2;
6797 			rxdp3->Buffer0_ptr = *temp0;
6798 			rxdp3->Buffer1_ptr = *temp1;
6799 		} else {
6800 			*skb = netdev_alloc_skb(dev, size);
6801 			if (!(*skb)) {
6802 				DBG_PRINT(INFO_DBG,
6803 					  "%s: Out of memory to allocate %s\n",
6804 					  dev->name,
6805 					  "2 buf mode SKBs");
6806 				stats->mem_alloc_fail_cnt++;
6807 				return -ENOMEM;
6808 			}
6809 			stats->mem_allocated += (*skb)->truesize;
6810 			rxdp3->Buffer2_ptr = *temp2 =
6811 				pci_map_single(sp->pdev, (*skb)->data,
6812 					       dev->mtu + 4,
6813 					       PCI_DMA_FROMDEVICE);
6814 			if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6815 				goto memalloc_failed;
6816 			rxdp3->Buffer0_ptr = *temp0 =
6817 				pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6818 					       PCI_DMA_FROMDEVICE);
6819 			if (pci_dma_mapping_error(sp->pdev,
6820 						  rxdp3->Buffer0_ptr)) {
6821 				pci_unmap_single(sp->pdev,
6822 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6823 						 dev->mtu + 4,
6824 						 PCI_DMA_FROMDEVICE);
6825 				goto memalloc_failed;
6826 			}
6827 			rxdp->Host_Control = (unsigned long) (*skb);
6828 
6829 			/* Buffer-1 will be dummy buffer not used */
6830 			rxdp3->Buffer1_ptr = *temp1 =
6831 				pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6832 					       PCI_DMA_FROMDEVICE);
6833 			if (pci_dma_mapping_error(sp->pdev,
6834 						  rxdp3->Buffer1_ptr)) {
6835 				pci_unmap_single(sp->pdev,
6836 						 (dma_addr_t)rxdp3->Buffer0_ptr,
6837 						 BUF0_LEN, PCI_DMA_FROMDEVICE);
6838 				pci_unmap_single(sp->pdev,
6839 						 (dma_addr_t)rxdp3->Buffer2_ptr,
6840 						 dev->mtu + 4,
6841 						 PCI_DMA_FROMDEVICE);
6842 				goto memalloc_failed;
6843 			}
6844 		}
6845 	}
6846 	return 0;
6847 
6848 memalloc_failed:
6849 	stats->pci_map_fail_cnt++;
6850 	stats->mem_freed += (*skb)->truesize;
6851 	dev_kfree_skb(*skb);
6852 	return -ENOMEM;
6853 }
6854 
6855 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6856 				int size)
6857 {
6858 	struct net_device *dev = sp->dev;
6859 	if (sp->rxd_mode == RXD_MODE_1) {
6860 		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6861 	} else if (sp->rxd_mode == RXD_MODE_3B) {
6862 		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6863 		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6864 		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6865 	}
6866 }
6867 
6868 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6869 {
6870 	int i, j, k, blk_cnt = 0, size;
6871 	struct config_param *config = &sp->config;
6872 	struct mac_info *mac_control = &sp->mac_control;
6873 	struct net_device *dev = sp->dev;
6874 	struct RxD_t *rxdp = NULL;
6875 	struct sk_buff *skb = NULL;
6876 	struct buffAdd *ba = NULL;
6877 	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6878 
6879 	/* Calculate the size based on ring mode */
6880 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6881 		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6882 	if (sp->rxd_mode == RXD_MODE_1)
6883 		size += NET_IP_ALIGN;
6884 	else if (sp->rxd_mode == RXD_MODE_3B)
6885 		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6886 
6887 	for (i = 0; i < config->rx_ring_num; i++) {
6888 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6889 		struct ring_info *ring = &mac_control->rings[i];
6890 
6891 		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6892 
6893 		for (j = 0; j < blk_cnt; j++) {
6894 			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6895 				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6896 				if (sp->rxd_mode == RXD_MODE_3B)
6897 					ba = &ring->ba[j][k];
6898 				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6899 							   &temp0_64,
6900 							   &temp1_64,
6901 							   &temp2_64,
6902 							   size) == -ENOMEM) {
6903 					return 0;
6904 				}
6905 
6906 				set_rxd_buffer_size(sp, rxdp, size);
6907 				dma_wmb();
6908 				/* flip the Ownership bit to Hardware */
6909 				rxdp->Control_1 |= RXD_OWN_XENA;
6910 			}
6911 		}
6912 	}
6913 	return 0;
6914 
6915 }
6916 
6917 static int s2io_add_isr(struct s2io_nic *sp)
6918 {
6919 	int ret = 0;
6920 	struct net_device *dev = sp->dev;
6921 	int err = 0;
6922 
6923 	if (sp->config.intr_type == MSI_X)
6924 		ret = s2io_enable_msi_x(sp);
6925 	if (ret) {
6926 		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6927 		sp->config.intr_type = INTA;
6928 	}
6929 
6930 	/*
6931 	 * Store the values of the MSIX table in
6932 	 * the struct s2io_nic structure
6933 	 */
6934 	store_xmsi_data(sp);
6935 
6936 	/* After proper initialization of H/W, register ISR */
6937 	if (sp->config.intr_type == MSI_X) {
6938 		int i, msix_rx_cnt = 0;
6939 
6940 		for (i = 0; i < sp->num_entries; i++) {
6941 			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6942 				if (sp->s2io_entries[i].type ==
6943 				    MSIX_RING_TYPE) {
6944 					snprintf(sp->desc[i],
6945 						sizeof(sp->desc[i]),
6946 						"%s:MSI-X-%d-RX",
6947 						dev->name, i);
6948 					err = request_irq(sp->entries[i].vector,
6949 							  s2io_msix_ring_handle,
6950 							  0,
6951 							  sp->desc[i],
6952 							  sp->s2io_entries[i].arg);
6953 				} else if (sp->s2io_entries[i].type ==
6954 					   MSIX_ALARM_TYPE) {
6955 					snprintf(sp->desc[i],
6956 						sizeof(sp->desc[i]),
6957 						"%s:MSI-X-%d-TX",
6958 						dev->name, i);
6959 					err = request_irq(sp->entries[i].vector,
6960 							  s2io_msix_fifo_handle,
6961 							  0,
6962 							  sp->desc[i],
6963 							  sp->s2io_entries[i].arg);
6964 
6965 				}
6966 				/* if either data or addr is zero print it. */
6967 				if (!(sp->msix_info[i].addr &&
6968 				      sp->msix_info[i].data)) {
6969 					DBG_PRINT(ERR_DBG,
6970 						  "%s @Addr:0x%llx Data:0x%llx\n",
6971 						  sp->desc[i],
6972 						  (unsigned long long)
6973 						  sp->msix_info[i].addr,
6974 						  (unsigned long long)
6975 						  ntohl(sp->msix_info[i].data));
6976 				} else
6977 					msix_rx_cnt++;
6978 				if (err) {
6979 					remove_msix_isr(sp);
6980 
6981 					DBG_PRINT(ERR_DBG,
6982 						  "%s:MSI-X-%d registration "
6983 						  "failed\n", dev->name, i);
6984 
6985 					DBG_PRINT(ERR_DBG,
6986 						  "%s: Defaulting to INTA\n",
6987 						  dev->name);
6988 					sp->config.intr_type = INTA;
6989 					break;
6990 				}
6991 				sp->s2io_entries[i].in_use =
6992 					MSIX_REGISTERED_SUCCESS;
6993 			}
6994 		}
6995 		if (!err) {
6996 			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6997 			DBG_PRINT(INFO_DBG,
6998 				  "MSI-X-TX entries enabled through alarm vector\n");
6999 		}
7000 	}
7001 	if (sp->config.intr_type == INTA) {
7002 		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
7003 				  sp->name, dev);
7004 		if (err) {
7005 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7006 				  dev->name);
7007 			return -1;
7008 		}
7009 	}
7010 	return 0;
7011 }
7012 
7013 static void s2io_rem_isr(struct s2io_nic *sp)
7014 {
7015 	if (sp->config.intr_type == MSI_X)
7016 		remove_msix_isr(sp);
7017 	else
7018 		remove_inta_isr(sp);
7019 }
7020 
7021 static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7022 {
7023 	int cnt = 0;
7024 	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7025 	register u64 val64 = 0;
7026 	struct config_param *config;
7027 	config = &sp->config;
7028 
7029 	if (!is_s2io_card_up(sp))
7030 		return;
7031 
7032 	del_timer_sync(&sp->alarm_timer);
7033 	/* If s2io_set_link task is executing, wait till it completes. */
7034 	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7035 		msleep(50);
7036 	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7037 
7038 	/* Disable napi */
7039 	if (sp->config.napi) {
7040 		int off = 0;
7041 		if (config->intr_type ==  MSI_X) {
7042 			for (; off < sp->config.rx_ring_num; off++)
7043 				napi_disable(&sp->mac_control.rings[off].napi);
7044 		}
7045 		else
7046 			napi_disable(&sp->napi);
7047 	}
7048 
7049 	/* disable Tx and Rx traffic on the NIC */
7050 	if (do_io)
7051 		stop_nic(sp);
7052 
7053 	s2io_rem_isr(sp);
7054 
7055 	/* stop the tx queue, indicate link down */
7056 	s2io_link(sp, LINK_DOWN);
7057 
7058 	/* Check if the device is Quiescent and then Reset the NIC */
7059 	while (do_io) {
7060 		/* As per the HW requirement we need to replenish the
7061 		 * receive buffer to avoid the ring bump. Since there is
7062 		 * no intention of processing the Rx frame at this pointwe are
7063 		 * just setting the ownership bit of rxd in Each Rx
7064 		 * ring to HW and set the appropriate buffer size
7065 		 * based on the ring mode
7066 		 */
7067 		rxd_owner_bit_reset(sp);
7068 
7069 		val64 = readq(&bar0->adapter_status);
7070 		if (verify_xena_quiescence(sp)) {
7071 			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7072 				break;
7073 		}
7074 
7075 		msleep(50);
7076 		cnt++;
7077 		if (cnt == 10) {
7078 			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7079 				  "adapter status reads 0x%llx\n",
7080 				  (unsigned long long)val64);
7081 			break;
7082 		}
7083 	}
7084 	if (do_io)
7085 		s2io_reset(sp);
7086 
7087 	/* Free all Tx buffers */
7088 	free_tx_buffers(sp);
7089 
7090 	/* Free all Rx buffers */
7091 	free_rx_buffers(sp);
7092 
7093 	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7094 }
7095 
7096 static void s2io_card_down(struct s2io_nic *sp)
7097 {
7098 	do_s2io_card_down(sp, 1);
7099 }
7100 
7101 static int s2io_card_up(struct s2io_nic *sp)
7102 {
7103 	int i, ret = 0;
7104 	struct config_param *config;
7105 	struct mac_info *mac_control;
7106 	struct net_device *dev = sp->dev;
7107 	u16 interruptible;
7108 
7109 	/* Initialize the H/W I/O registers */
7110 	ret = init_nic(sp);
7111 	if (ret != 0) {
7112 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7113 			  dev->name);
7114 		if (ret != -EIO)
7115 			s2io_reset(sp);
7116 		return ret;
7117 	}
7118 
7119 	/*
7120 	 * Initializing the Rx buffers. For now we are considering only 1
7121 	 * Rx ring and initializing buffers into 30 Rx blocks
7122 	 */
7123 	config = &sp->config;
7124 	mac_control = &sp->mac_control;
7125 
7126 	for (i = 0; i < config->rx_ring_num; i++) {
7127 		struct ring_info *ring = &mac_control->rings[i];
7128 
7129 		ring->mtu = dev->mtu;
7130 		ring->lro = !!(dev->features & NETIF_F_LRO);
7131 		ret = fill_rx_buffers(sp, ring, 1);
7132 		if (ret) {
7133 			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7134 				  dev->name);
7135 			s2io_reset(sp);
7136 			free_rx_buffers(sp);
7137 			return -ENOMEM;
7138 		}
7139 		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7140 			  ring->rx_bufs_left);
7141 	}
7142 
7143 	/* Initialise napi */
7144 	if (config->napi) {
7145 		if (config->intr_type ==  MSI_X) {
7146 			for (i = 0; i < sp->config.rx_ring_num; i++)
7147 				napi_enable(&sp->mac_control.rings[i].napi);
7148 		} else {
7149 			napi_enable(&sp->napi);
7150 		}
7151 	}
7152 
7153 	/* Maintain the state prior to the open */
7154 	if (sp->promisc_flg)
7155 		sp->promisc_flg = 0;
7156 	if (sp->m_cast_flg) {
7157 		sp->m_cast_flg = 0;
7158 		sp->all_multi_pos = 0;
7159 	}
7160 
7161 	/* Setting its receive mode */
7162 	s2io_set_multicast(dev);
7163 
7164 	if (dev->features & NETIF_F_LRO) {
7165 		/* Initialize max aggregatable pkts per session based on MTU */
7166 		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7167 		/* Check if we can use (if specified) user provided value */
7168 		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7169 			sp->lro_max_aggr_per_sess = lro_max_pkts;
7170 	}
7171 
7172 	/* Enable Rx Traffic and interrupts on the NIC */
7173 	if (start_nic(sp)) {
7174 		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7175 		s2io_reset(sp);
7176 		free_rx_buffers(sp);
7177 		return -ENODEV;
7178 	}
7179 
7180 	/* Add interrupt service routine */
7181 	if (s2io_add_isr(sp) != 0) {
7182 		if (sp->config.intr_type == MSI_X)
7183 			s2io_rem_isr(sp);
7184 		s2io_reset(sp);
7185 		free_rx_buffers(sp);
7186 		return -ENODEV;
7187 	}
7188 
7189 	S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7190 
7191 	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7192 
7193 	/*  Enable select interrupts */
7194 	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7195 	if (sp->config.intr_type != INTA) {
7196 		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7197 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7198 	} else {
7199 		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7200 		interruptible |= TX_PIC_INTR;
7201 		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7202 	}
7203 
7204 	return 0;
7205 }
7206 
7207 /**
7208  * s2io_restart_nic - Resets the NIC.
7209  * @data : long pointer to the device private structure
7210  * Description:
7211  * This function is scheduled to be run by the s2io_tx_watchdog
7212  * function after 0.5 secs to reset the NIC. The idea is to reduce
7213  * the run time of the watch dog routine which is run holding a
7214  * spin lock.
7215  */
7216 
7217 static void s2io_restart_nic(struct work_struct *work)
7218 {
7219 	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7220 	struct net_device *dev = sp->dev;
7221 
7222 	rtnl_lock();
7223 
7224 	if (!netif_running(dev))
7225 		goto out_unlock;
7226 
7227 	s2io_card_down(sp);
7228 	if (s2io_card_up(sp)) {
7229 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7230 	}
7231 	s2io_wake_all_tx_queue(sp);
7232 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7233 out_unlock:
7234 	rtnl_unlock();
7235 }
7236 
7237 /**
7238  *  s2io_tx_watchdog - Watchdog for transmit side.
7239  *  @dev : Pointer to net device structure
7240  *  Description:
7241  *  This function is triggered if the Tx Queue is stopped
7242  *  for a pre-defined amount of time when the Interface is still up.
7243  *  If the Interface is jammed in such a situation, the hardware is
7244  *  reset (by s2io_close) and restarted again (by s2io_open) to
7245  *  overcome any problem that might have been caused in the hardware.
7246  *  Return value:
7247  *  void
7248  */
7249 
7250 static void s2io_tx_watchdog(struct net_device *dev)
7251 {
7252 	struct s2io_nic *sp = netdev_priv(dev);
7253 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7254 
7255 	if (netif_carrier_ok(dev)) {
7256 		swstats->watchdog_timer_cnt++;
7257 		schedule_work(&sp->rst_timer_task);
7258 		swstats->soft_reset_cnt++;
7259 	}
7260 }
7261 
7262 /**
7263  *   rx_osm_handler - To perform some OS related operations on SKB.
7264  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7265  *   @skb : the socket buffer pointer.
7266  *   @len : length of the packet
7267  *   @cksum : FCS checksum of the frame.
7268  *   @ring_no : the ring from which this RxD was extracted.
7269  *   Description:
7270  *   This function is called by the Rx interrupt serivce routine to perform
7271  *   some OS related operations on the SKB before passing it to the upper
7272  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7273  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7274  *   to the upper layer. If the checksum is wrong, it increments the Rx
7275  *   packet error count, frees the SKB and returns error.
7276  *   Return value:
7277  *   SUCCESS on success and -1 on failure.
7278  */
7279 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7280 {
7281 	struct s2io_nic *sp = ring_data->nic;
7282 	struct net_device *dev = ring_data->dev;
7283 	struct sk_buff *skb = (struct sk_buff *)
7284 		((unsigned long)rxdp->Host_Control);
7285 	int ring_no = ring_data->ring_no;
7286 	u16 l3_csum, l4_csum;
7287 	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7288 	struct lro *uninitialized_var(lro);
7289 	u8 err_mask;
7290 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7291 
7292 	skb->dev = dev;
7293 
7294 	if (err) {
7295 		/* Check for parity error */
7296 		if (err & 0x1)
7297 			swstats->parity_err_cnt++;
7298 
7299 		err_mask = err >> 48;
7300 		switch (err_mask) {
7301 		case 1:
7302 			swstats->rx_parity_err_cnt++;
7303 			break;
7304 
7305 		case 2:
7306 			swstats->rx_abort_cnt++;
7307 			break;
7308 
7309 		case 3:
7310 			swstats->rx_parity_abort_cnt++;
7311 			break;
7312 
7313 		case 4:
7314 			swstats->rx_rda_fail_cnt++;
7315 			break;
7316 
7317 		case 5:
7318 			swstats->rx_unkn_prot_cnt++;
7319 			break;
7320 
7321 		case 6:
7322 			swstats->rx_fcs_err_cnt++;
7323 			break;
7324 
7325 		case 7:
7326 			swstats->rx_buf_size_err_cnt++;
7327 			break;
7328 
7329 		case 8:
7330 			swstats->rx_rxd_corrupt_cnt++;
7331 			break;
7332 
7333 		case 15:
7334 			swstats->rx_unkn_err_cnt++;
7335 			break;
7336 		}
7337 		/*
7338 		 * Drop the packet if bad transfer code. Exception being
7339 		 * 0x5, which could be due to unsupported IPv6 extension header.
7340 		 * In this case, we let stack handle the packet.
7341 		 * Note that in this case, since checksum will be incorrect,
7342 		 * stack will validate the same.
7343 		 */
7344 		if (err_mask != 0x5) {
7345 			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7346 				  dev->name, err_mask);
7347 			dev->stats.rx_crc_errors++;
7348 			swstats->mem_freed
7349 				+= skb->truesize;
7350 			dev_kfree_skb(skb);
7351 			ring_data->rx_bufs_left -= 1;
7352 			rxdp->Host_Control = 0;
7353 			return 0;
7354 		}
7355 	}
7356 
7357 	rxdp->Host_Control = 0;
7358 	if (sp->rxd_mode == RXD_MODE_1) {
7359 		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7360 
7361 		skb_put(skb, len);
7362 	} else if (sp->rxd_mode == RXD_MODE_3B) {
7363 		int get_block = ring_data->rx_curr_get_info.block_index;
7364 		int get_off = ring_data->rx_curr_get_info.offset;
7365 		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7366 		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7367 		unsigned char *buff = skb_push(skb, buf0_len);
7368 
7369 		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7370 		memcpy(buff, ba->ba_0, buf0_len);
7371 		skb_put(skb, buf2_len);
7372 	}
7373 
7374 	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7375 	    ((!ring_data->lro) ||
7376 	     (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7377 	    (dev->features & NETIF_F_RXCSUM)) {
7378 		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7379 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7380 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7381 			/*
7382 			 * NIC verifies if the Checksum of the received
7383 			 * frame is Ok or not and accordingly returns
7384 			 * a flag in the RxD.
7385 			 */
7386 			skb->ip_summed = CHECKSUM_UNNECESSARY;
7387 			if (ring_data->lro) {
7388 				u32 tcp_len = 0;
7389 				u8 *tcp;
7390 				int ret = 0;
7391 
7392 				ret = s2io_club_tcp_session(ring_data,
7393 							    skb->data, &tcp,
7394 							    &tcp_len, &lro,
7395 							    rxdp, sp);
7396 				switch (ret) {
7397 				case 3: /* Begin anew */
7398 					lro->parent = skb;
7399 					goto aggregate;
7400 				case 1: /* Aggregate */
7401 					lro_append_pkt(sp, lro, skb, tcp_len);
7402 					goto aggregate;
7403 				case 4: /* Flush session */
7404 					lro_append_pkt(sp, lro, skb, tcp_len);
7405 					queue_rx_frame(lro->parent,
7406 						       lro->vlan_tag);
7407 					clear_lro_session(lro);
7408 					swstats->flush_max_pkts++;
7409 					goto aggregate;
7410 				case 2: /* Flush both */
7411 					lro->parent->data_len = lro->frags_len;
7412 					swstats->sending_both++;
7413 					queue_rx_frame(lro->parent,
7414 						       lro->vlan_tag);
7415 					clear_lro_session(lro);
7416 					goto send_up;
7417 				case 0: /* sessions exceeded */
7418 				case -1: /* non-TCP or not L2 aggregatable */
7419 				case 5: /*
7420 					 * First pkt in session not
7421 					 * L3/L4 aggregatable
7422 					 */
7423 					break;
7424 				default:
7425 					DBG_PRINT(ERR_DBG,
7426 						  "%s: Samadhana!!\n",
7427 						  __func__);
7428 					BUG();
7429 				}
7430 			}
7431 		} else {
7432 			/*
7433 			 * Packet with erroneous checksum, let the
7434 			 * upper layers deal with it.
7435 			 */
7436 			skb_checksum_none_assert(skb);
7437 		}
7438 	} else
7439 		skb_checksum_none_assert(skb);
7440 
7441 	swstats->mem_freed += skb->truesize;
7442 send_up:
7443 	skb_record_rx_queue(skb, ring_no);
7444 	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7445 aggregate:
7446 	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7447 	return SUCCESS;
7448 }
7449 
7450 /**
7451  *  s2io_link - stops/starts the Tx queue.
7452  *  @sp : private member of the device structure, which is a pointer to the
7453  *  s2io_nic structure.
7454  *  @link : inidicates whether link is UP/DOWN.
7455  *  Description:
7456  *  This function stops/starts the Tx queue depending on whether the link
7457  *  status of the NIC is is down or up. This is called by the Alarm
7458  *  interrupt handler whenever a link change interrupt comes up.
7459  *  Return value:
7460  *  void.
7461  */
7462 
7463 static void s2io_link(struct s2io_nic *sp, int link)
7464 {
7465 	struct net_device *dev = sp->dev;
7466 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7467 
7468 	if (link != sp->last_link_state) {
7469 		init_tti(sp, link);
7470 		if (link == LINK_DOWN) {
7471 			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7472 			s2io_stop_all_tx_queue(sp);
7473 			netif_carrier_off(dev);
7474 			if (swstats->link_up_cnt)
7475 				swstats->link_up_time =
7476 					jiffies - sp->start_time;
7477 			swstats->link_down_cnt++;
7478 		} else {
7479 			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7480 			if (swstats->link_down_cnt)
7481 				swstats->link_down_time =
7482 					jiffies - sp->start_time;
7483 			swstats->link_up_cnt++;
7484 			netif_carrier_on(dev);
7485 			s2io_wake_all_tx_queue(sp);
7486 		}
7487 	}
7488 	sp->last_link_state = link;
7489 	sp->start_time = jiffies;
7490 }
7491 
7492 /**
7493  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7494  *  @sp : private member of the device structure, which is a pointer to the
7495  *  s2io_nic structure.
7496  *  Description:
7497  *  This function initializes a few of the PCI and PCI-X configuration registers
7498  *  with recommended values.
7499  *  Return value:
7500  *  void
7501  */
7502 
7503 static void s2io_init_pci(struct s2io_nic *sp)
7504 {
7505 	u16 pci_cmd = 0, pcix_cmd = 0;
7506 
7507 	/* Enable Data Parity Error Recovery in PCI-X command register. */
7508 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7509 			     &(pcix_cmd));
7510 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7511 			      (pcix_cmd | 1));
7512 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7513 			     &(pcix_cmd));
7514 
7515 	/* Set the PErr Response bit in PCI command register. */
7516 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7517 	pci_write_config_word(sp->pdev, PCI_COMMAND,
7518 			      (pci_cmd | PCI_COMMAND_PARITY));
7519 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7520 }
7521 
7522 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7523 			    u8 *dev_multiq)
7524 {
7525 	int i;
7526 
7527 	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7528 		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7529 			  "(%d) not supported\n", tx_fifo_num);
7530 
7531 		if (tx_fifo_num < 1)
7532 			tx_fifo_num = 1;
7533 		else
7534 			tx_fifo_num = MAX_TX_FIFOS;
7535 
7536 		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7537 	}
7538 
7539 	if (multiq)
7540 		*dev_multiq = multiq;
7541 
7542 	if (tx_steering_type && (1 == tx_fifo_num)) {
7543 		if (tx_steering_type != TX_DEFAULT_STEERING)
7544 			DBG_PRINT(ERR_DBG,
7545 				  "Tx steering is not supported with "
7546 				  "one fifo. Disabling Tx steering.\n");
7547 		tx_steering_type = NO_STEERING;
7548 	}
7549 
7550 	if ((tx_steering_type < NO_STEERING) ||
7551 	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7552 		DBG_PRINT(ERR_DBG,
7553 			  "Requested transmit steering not supported\n");
7554 		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7555 		tx_steering_type = NO_STEERING;
7556 	}
7557 
7558 	if (rx_ring_num > MAX_RX_RINGS) {
7559 		DBG_PRINT(ERR_DBG,
7560 			  "Requested number of rx rings not supported\n");
7561 		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7562 			  MAX_RX_RINGS);
7563 		rx_ring_num = MAX_RX_RINGS;
7564 	}
7565 
7566 	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7567 		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7568 			  "Defaulting to INTA\n");
7569 		*dev_intr_type = INTA;
7570 	}
7571 
7572 	if ((*dev_intr_type == MSI_X) &&
7573 	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7574 	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7575 		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7576 			  "Defaulting to INTA\n");
7577 		*dev_intr_type = INTA;
7578 	}
7579 
7580 	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7581 		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7582 		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7583 		rx_ring_mode = 1;
7584 	}
7585 
7586 	for (i = 0; i < MAX_RX_RINGS; i++)
7587 		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7588 			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7589 				  "supported\nDefaulting to %d\n",
7590 				  MAX_RX_BLOCKS_PER_RING);
7591 			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7592 		}
7593 
7594 	return SUCCESS;
7595 }
7596 
7597 /**
7598  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7599  * or Traffic class respectively.
7600  * @nic: device private variable
7601  * Description: The function configures the receive steering to
7602  * desired receive ring.
7603  * Return Value:  SUCCESS on success and
7604  * '-1' on failure (endian settings incorrect).
7605  */
7606 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7607 {
7608 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7609 	register u64 val64 = 0;
7610 
7611 	if (ds_codepoint > 63)
7612 		return FAILURE;
7613 
7614 	val64 = RTS_DS_MEM_DATA(ring);
7615 	writeq(val64, &bar0->rts_ds_mem_data);
7616 
7617 	val64 = RTS_DS_MEM_CTRL_WE |
7618 		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7619 		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7620 
7621 	writeq(val64, &bar0->rts_ds_mem_ctrl);
7622 
7623 	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7624 				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7625 				     S2IO_BIT_RESET);
7626 }
7627 
7628 static const struct net_device_ops s2io_netdev_ops = {
7629 	.ndo_open	        = s2io_open,
7630 	.ndo_stop	        = s2io_close,
7631 	.ndo_get_stats	        = s2io_get_stats,
7632 	.ndo_start_xmit    	= s2io_xmit,
7633 	.ndo_validate_addr	= eth_validate_addr,
7634 	.ndo_set_rx_mode	= s2io_set_multicast,
7635 	.ndo_do_ioctl	   	= s2io_ioctl,
7636 	.ndo_set_mac_address    = s2io_set_mac_addr,
7637 	.ndo_change_mtu	   	= s2io_change_mtu,
7638 	.ndo_set_features	= s2io_set_features,
7639 	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7640 #ifdef CONFIG_NET_POLL_CONTROLLER
7641 	.ndo_poll_controller    = s2io_netpoll,
7642 #endif
7643 };
7644 
7645 /**
7646  *  s2io_init_nic - Initialization of the adapter .
7647  *  @pdev : structure containing the PCI related information of the device.
7648  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7649  *  Description:
7650  *  The function initializes an adapter identified by the pci_dec structure.
7651  *  All OS related initialization including memory and device structure and
7652  *  initlaization of the device private variable is done. Also the swapper
7653  *  control register is initialized to enable read and write into the I/O
7654  *  registers of the device.
7655  *  Return value:
7656  *  returns 0 on success and negative on failure.
7657  */
7658 
7659 static int
7660 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7661 {
7662 	struct s2io_nic *sp;
7663 	struct net_device *dev;
7664 	int i, j, ret;
7665 	int dma_flag = false;
7666 	u32 mac_up, mac_down;
7667 	u64 val64 = 0, tmp64 = 0;
7668 	struct XENA_dev_config __iomem *bar0 = NULL;
7669 	u16 subid;
7670 	struct config_param *config;
7671 	struct mac_info *mac_control;
7672 	int mode;
7673 	u8 dev_intr_type = intr_type;
7674 	u8 dev_multiq = 0;
7675 
7676 	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7677 	if (ret)
7678 		return ret;
7679 
7680 	ret = pci_enable_device(pdev);
7681 	if (ret) {
7682 		DBG_PRINT(ERR_DBG,
7683 			  "%s: pci_enable_device failed\n", __func__);
7684 		return ret;
7685 	}
7686 
7687 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7688 		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7689 		dma_flag = true;
7690 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7691 			DBG_PRINT(ERR_DBG,
7692 				  "Unable to obtain 64bit DMA "
7693 				  "for consistent allocations\n");
7694 			pci_disable_device(pdev);
7695 			return -ENOMEM;
7696 		}
7697 	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7698 		DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7699 	} else {
7700 		pci_disable_device(pdev);
7701 		return -ENOMEM;
7702 	}
7703 	ret = pci_request_regions(pdev, s2io_driver_name);
7704 	if (ret) {
7705 		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7706 			  __func__, ret);
7707 		pci_disable_device(pdev);
7708 		return -ENODEV;
7709 	}
7710 	if (dev_multiq)
7711 		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7712 	else
7713 		dev = alloc_etherdev(sizeof(struct s2io_nic));
7714 	if (dev == NULL) {
7715 		pci_disable_device(pdev);
7716 		pci_release_regions(pdev);
7717 		return -ENODEV;
7718 	}
7719 
7720 	pci_set_master(pdev);
7721 	pci_set_drvdata(pdev, dev);
7722 	SET_NETDEV_DEV(dev, &pdev->dev);
7723 
7724 	/*  Private member variable initialized to s2io NIC structure */
7725 	sp = netdev_priv(dev);
7726 	sp->dev = dev;
7727 	sp->pdev = pdev;
7728 	sp->high_dma_flag = dma_flag;
7729 	sp->device_enabled_once = false;
7730 	if (rx_ring_mode == 1)
7731 		sp->rxd_mode = RXD_MODE_1;
7732 	if (rx_ring_mode == 2)
7733 		sp->rxd_mode = RXD_MODE_3B;
7734 
7735 	sp->config.intr_type = dev_intr_type;
7736 
7737 	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7738 	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7739 		sp->device_type = XFRAME_II_DEVICE;
7740 	else
7741 		sp->device_type = XFRAME_I_DEVICE;
7742 
7743 
7744 	/* Initialize some PCI/PCI-X fields of the NIC. */
7745 	s2io_init_pci(sp);
7746 
7747 	/*
7748 	 * Setting the device configuration parameters.
7749 	 * Most of these parameters can be specified by the user during
7750 	 * module insertion as they are module loadable parameters. If
7751 	 * these parameters are not not specified during load time, they
7752 	 * are initialized with default values.
7753 	 */
7754 	config = &sp->config;
7755 	mac_control = &sp->mac_control;
7756 
7757 	config->napi = napi;
7758 	config->tx_steering_type = tx_steering_type;
7759 
7760 	/* Tx side parameters. */
7761 	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7762 		config->tx_fifo_num = MAX_TX_FIFOS;
7763 	else
7764 		config->tx_fifo_num = tx_fifo_num;
7765 
7766 	/* Initialize the fifos used for tx steering */
7767 	if (config->tx_fifo_num < 5) {
7768 		if (config->tx_fifo_num  == 1)
7769 			sp->total_tcp_fifos = 1;
7770 		else
7771 			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7772 		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7773 		sp->total_udp_fifos = 1;
7774 		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7775 	} else {
7776 		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7777 				       FIFO_OTHER_MAX_NUM);
7778 		sp->udp_fifo_idx = sp->total_tcp_fifos;
7779 		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7780 		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7781 	}
7782 
7783 	config->multiq = dev_multiq;
7784 	for (i = 0; i < config->tx_fifo_num; i++) {
7785 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7786 
7787 		tx_cfg->fifo_len = tx_fifo_len[i];
7788 		tx_cfg->fifo_priority = i;
7789 	}
7790 
7791 	/* mapping the QoS priority to the configured fifos */
7792 	for (i = 0; i < MAX_TX_FIFOS; i++)
7793 		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7794 
7795 	/* map the hashing selector table to the configured fifos */
7796 	for (i = 0; i < config->tx_fifo_num; i++)
7797 		sp->fifo_selector[i] = fifo_selector[i];
7798 
7799 
7800 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7801 	for (i = 0; i < config->tx_fifo_num; i++) {
7802 		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7803 
7804 		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7805 		if (tx_cfg->fifo_len < 65) {
7806 			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7807 			break;
7808 		}
7809 	}
7810 	/* + 2 because one Txd for skb->data and one Txd for UFO */
7811 	config->max_txds = MAX_SKB_FRAGS + 2;
7812 
7813 	/* Rx side parameters. */
7814 	config->rx_ring_num = rx_ring_num;
7815 	for (i = 0; i < config->rx_ring_num; i++) {
7816 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7817 		struct ring_info *ring = &mac_control->rings[i];
7818 
7819 		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7820 		rx_cfg->ring_priority = i;
7821 		ring->rx_bufs_left = 0;
7822 		ring->rxd_mode = sp->rxd_mode;
7823 		ring->rxd_count = rxd_count[sp->rxd_mode];
7824 		ring->pdev = sp->pdev;
7825 		ring->dev = sp->dev;
7826 	}
7827 
7828 	for (i = 0; i < rx_ring_num; i++) {
7829 		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7830 
7831 		rx_cfg->ring_org = RING_ORG_BUFF1;
7832 		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7833 	}
7834 
7835 	/*  Setting Mac Control parameters */
7836 	mac_control->rmac_pause_time = rmac_pause_time;
7837 	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7838 	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7839 
7840 
7841 	/*  initialize the shared memory used by the NIC and the host */
7842 	if (init_shared_mem(sp)) {
7843 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7844 		ret = -ENOMEM;
7845 		goto mem_alloc_failed;
7846 	}
7847 
7848 	sp->bar0 = pci_ioremap_bar(pdev, 0);
7849 	if (!sp->bar0) {
7850 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7851 			  dev->name);
7852 		ret = -ENOMEM;
7853 		goto bar0_remap_failed;
7854 	}
7855 
7856 	sp->bar1 = pci_ioremap_bar(pdev, 2);
7857 	if (!sp->bar1) {
7858 		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7859 			  dev->name);
7860 		ret = -ENOMEM;
7861 		goto bar1_remap_failed;
7862 	}
7863 
7864 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7865 	for (j = 0; j < MAX_TX_FIFOS; j++) {
7866 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7867 	}
7868 
7869 	/*  Driver entry points */
7870 	dev->netdev_ops = &s2io_netdev_ops;
7871 	dev->ethtool_ops = &netdev_ethtool_ops;
7872 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7873 		NETIF_F_TSO | NETIF_F_TSO6 |
7874 		NETIF_F_RXCSUM | NETIF_F_LRO;
7875 	dev->features |= dev->hw_features |
7876 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7877 	if (sp->high_dma_flag == true)
7878 		dev->features |= NETIF_F_HIGHDMA;
7879 	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7880 	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7881 	INIT_WORK(&sp->set_link_task, s2io_set_link);
7882 
7883 	pci_save_state(sp->pdev);
7884 
7885 	/* Setting swapper control on the NIC, for proper reset operation */
7886 	if (s2io_set_swapper(sp)) {
7887 		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7888 			  dev->name);
7889 		ret = -EAGAIN;
7890 		goto set_swap_failed;
7891 	}
7892 
7893 	/* Verify if the Herc works on the slot its placed into */
7894 	if (sp->device_type & XFRAME_II_DEVICE) {
7895 		mode = s2io_verify_pci_mode(sp);
7896 		if (mode < 0) {
7897 			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7898 				  __func__);
7899 			ret = -EBADSLT;
7900 			goto set_swap_failed;
7901 		}
7902 	}
7903 
7904 	if (sp->config.intr_type == MSI_X) {
7905 		sp->num_entries = config->rx_ring_num + 1;
7906 		ret = s2io_enable_msi_x(sp);
7907 
7908 		if (!ret) {
7909 			ret = s2io_test_msi(sp);
7910 			/* rollback MSI-X, will re-enable during add_isr() */
7911 			remove_msix_isr(sp);
7912 		}
7913 		if (ret) {
7914 
7915 			DBG_PRINT(ERR_DBG,
7916 				  "MSI-X requested but failed to enable\n");
7917 			sp->config.intr_type = INTA;
7918 		}
7919 	}
7920 
7921 	if (config->intr_type ==  MSI_X) {
7922 		for (i = 0; i < config->rx_ring_num ; i++) {
7923 			struct ring_info *ring = &mac_control->rings[i];
7924 
7925 			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7926 		}
7927 	} else {
7928 		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7929 	}
7930 
7931 	/* Not needed for Herc */
7932 	if (sp->device_type & XFRAME_I_DEVICE) {
7933 		/*
7934 		 * Fix for all "FFs" MAC address problems observed on
7935 		 * Alpha platforms
7936 		 */
7937 		fix_mac_address(sp);
7938 		s2io_reset(sp);
7939 	}
7940 
7941 	/*
7942 	 * MAC address initialization.
7943 	 * For now only one mac address will be read and used.
7944 	 */
7945 	bar0 = sp->bar0;
7946 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7947 		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7948 	writeq(val64, &bar0->rmac_addr_cmd_mem);
7949 	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7950 			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7951 			      S2IO_BIT_RESET);
7952 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
7953 	mac_down = (u32)tmp64;
7954 	mac_up = (u32) (tmp64 >> 32);
7955 
7956 	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7957 	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7958 	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7959 	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7960 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7961 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7962 
7963 	/*  Set the factory defined MAC address initially   */
7964 	dev->addr_len = ETH_ALEN;
7965 	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7966 
7967 	/* initialize number of multicast & unicast MAC entries variables */
7968 	if (sp->device_type == XFRAME_I_DEVICE) {
7969 		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7970 		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7971 		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7972 	} else if (sp->device_type == XFRAME_II_DEVICE) {
7973 		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7974 		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7975 		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7976 	}
7977 
7978 	/* MTU range: 46 - 9600 */
7979 	dev->min_mtu = MIN_MTU;
7980 	dev->max_mtu = S2IO_JUMBO_SIZE;
7981 
7982 	/* store mac addresses from CAM to s2io_nic structure */
7983 	do_s2io_store_unicast_mc(sp);
7984 
7985 	/* Configure MSIX vector for number of rings configured plus one */
7986 	if ((sp->device_type == XFRAME_II_DEVICE) &&
7987 	    (config->intr_type == MSI_X))
7988 		sp->num_entries = config->rx_ring_num + 1;
7989 
7990 	/* Store the values of the MSIX table in the s2io_nic structure */
7991 	store_xmsi_data(sp);
7992 	/* reset Nic and bring it to known state */
7993 	s2io_reset(sp);
7994 
7995 	/*
7996 	 * Initialize link state flags
7997 	 * and the card state parameter
7998 	 */
7999 	sp->state = 0;
8000 
8001 	/* Initialize spinlocks */
8002 	for (i = 0; i < sp->config.tx_fifo_num; i++) {
8003 		struct fifo_info *fifo = &mac_control->fifos[i];
8004 
8005 		spin_lock_init(&fifo->tx_lock);
8006 	}
8007 
8008 	/*
8009 	 * SXE-002: Configure link and activity LED to init state
8010 	 * on driver load.
8011 	 */
8012 	subid = sp->pdev->subsystem_device;
8013 	if ((subid & 0xFF) >= 0x07) {
8014 		val64 = readq(&bar0->gpio_control);
8015 		val64 |= 0x0000800000000000ULL;
8016 		writeq(val64, &bar0->gpio_control);
8017 		val64 = 0x0411040400000000ULL;
8018 		writeq(val64, (void __iomem *)bar0 + 0x2700);
8019 		val64 = readq(&bar0->gpio_control);
8020 	}
8021 
8022 	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8023 
8024 	if (register_netdev(dev)) {
8025 		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8026 		ret = -ENODEV;
8027 		goto register_failed;
8028 	}
8029 	s2io_vpd_read(sp);
8030 	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8031 	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8032 		  sp->product_name, pdev->revision);
8033 	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8034 		  s2io_driver_version);
8035 	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8036 	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8037 	if (sp->device_type & XFRAME_II_DEVICE) {
8038 		mode = s2io_print_pci_mode(sp);
8039 		if (mode < 0) {
8040 			ret = -EBADSLT;
8041 			unregister_netdev(dev);
8042 			goto set_swap_failed;
8043 		}
8044 	}
8045 	switch (sp->rxd_mode) {
8046 	case RXD_MODE_1:
8047 		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8048 			  dev->name);
8049 		break;
8050 	case RXD_MODE_3B:
8051 		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8052 			  dev->name);
8053 		break;
8054 	}
8055 
8056 	switch (sp->config.napi) {
8057 	case 0:
8058 		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8059 		break;
8060 	case 1:
8061 		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8062 		break;
8063 	}
8064 
8065 	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8066 		  sp->config.tx_fifo_num);
8067 
8068 	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8069 		  sp->config.rx_ring_num);
8070 
8071 	switch (sp->config.intr_type) {
8072 	case INTA:
8073 		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8074 		break;
8075 	case MSI_X:
8076 		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8077 		break;
8078 	}
8079 	if (sp->config.multiq) {
8080 		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8081 			struct fifo_info *fifo = &mac_control->fifos[i];
8082 
8083 			fifo->multiq = config->multiq;
8084 		}
8085 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8086 			  dev->name);
8087 	} else
8088 		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8089 			  dev->name);
8090 
8091 	switch (sp->config.tx_steering_type) {
8092 	case NO_STEERING:
8093 		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8094 			  dev->name);
8095 		break;
8096 	case TX_PRIORITY_STEERING:
8097 		DBG_PRINT(ERR_DBG,
8098 			  "%s: Priority steering enabled for transmit\n",
8099 			  dev->name);
8100 		break;
8101 	case TX_DEFAULT_STEERING:
8102 		DBG_PRINT(ERR_DBG,
8103 			  "%s: Default steering enabled for transmit\n",
8104 			  dev->name);
8105 	}
8106 
8107 	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8108 		  dev->name);
8109 	/* Initialize device name */
8110 	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8111 		 sp->product_name);
8112 
8113 	if (vlan_tag_strip)
8114 		sp->vlan_strip_flag = 1;
8115 	else
8116 		sp->vlan_strip_flag = 0;
8117 
8118 	/*
8119 	 * Make Link state as off at this point, when the Link change
8120 	 * interrupt comes the state will be automatically changed to
8121 	 * the right state.
8122 	 */
8123 	netif_carrier_off(dev);
8124 
8125 	return 0;
8126 
8127 register_failed:
8128 set_swap_failed:
8129 	iounmap(sp->bar1);
8130 bar1_remap_failed:
8131 	iounmap(sp->bar0);
8132 bar0_remap_failed:
8133 mem_alloc_failed:
8134 	free_shared_mem(sp);
8135 	pci_disable_device(pdev);
8136 	pci_release_regions(pdev);
8137 	free_netdev(dev);
8138 
8139 	return ret;
8140 }
8141 
8142 /**
8143  * s2io_rem_nic - Free the PCI device
8144  * @pdev: structure containing the PCI related information of the device.
8145  * Description: This function is called by the Pci subsystem to release a
8146  * PCI device and free up all resource held up by the device. This could
8147  * be in response to a Hot plug event or when the driver is to be removed
8148  * from memory.
8149  */
8150 
8151 static void s2io_rem_nic(struct pci_dev *pdev)
8152 {
8153 	struct net_device *dev = pci_get_drvdata(pdev);
8154 	struct s2io_nic *sp;
8155 
8156 	if (dev == NULL) {
8157 		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8158 		return;
8159 	}
8160 
8161 	sp = netdev_priv(dev);
8162 
8163 	cancel_work_sync(&sp->rst_timer_task);
8164 	cancel_work_sync(&sp->set_link_task);
8165 
8166 	unregister_netdev(dev);
8167 
8168 	free_shared_mem(sp);
8169 	iounmap(sp->bar0);
8170 	iounmap(sp->bar1);
8171 	pci_release_regions(pdev);
8172 	free_netdev(dev);
8173 	pci_disable_device(pdev);
8174 }
8175 
8176 module_pci_driver(s2io_driver);
8177 
8178 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8179 				struct tcphdr **tcp, struct RxD_t *rxdp,
8180 				struct s2io_nic *sp)
8181 {
8182 	int ip_off;
8183 	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8184 
8185 	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8186 		DBG_PRINT(INIT_DBG,
8187 			  "%s: Non-TCP frames not supported for LRO\n",
8188 			  __func__);
8189 		return -1;
8190 	}
8191 
8192 	/* Checking for DIX type or DIX type with VLAN */
8193 	if ((l2_type == 0) || (l2_type == 4)) {
8194 		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8195 		/*
8196 		 * If vlan stripping is disabled and the frame is VLAN tagged,
8197 		 * shift the offset by the VLAN header size bytes.
8198 		 */
8199 		if ((!sp->vlan_strip_flag) &&
8200 		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8201 			ip_off += HEADER_VLAN_SIZE;
8202 	} else {
8203 		/* LLC, SNAP etc are considered non-mergeable */
8204 		return -1;
8205 	}
8206 
8207 	*ip = (struct iphdr *)(buffer + ip_off);
8208 	ip_len = (u8)((*ip)->ihl);
8209 	ip_len <<= 2;
8210 	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8211 
8212 	return 0;
8213 }
8214 
8215 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8216 				  struct tcphdr *tcp)
8217 {
8218 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8219 	if ((lro->iph->saddr != ip->saddr) ||
8220 	    (lro->iph->daddr != ip->daddr) ||
8221 	    (lro->tcph->source != tcp->source) ||
8222 	    (lro->tcph->dest != tcp->dest))
8223 		return -1;
8224 	return 0;
8225 }
8226 
8227 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8228 {
8229 	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8230 }
8231 
8232 static void initiate_new_session(struct lro *lro, u8 *l2h,
8233 				 struct iphdr *ip, struct tcphdr *tcp,
8234 				 u32 tcp_pyld_len, u16 vlan_tag)
8235 {
8236 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8237 	lro->l2h = l2h;
8238 	lro->iph = ip;
8239 	lro->tcph = tcp;
8240 	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8241 	lro->tcp_ack = tcp->ack_seq;
8242 	lro->sg_num = 1;
8243 	lro->total_len = ntohs(ip->tot_len);
8244 	lro->frags_len = 0;
8245 	lro->vlan_tag = vlan_tag;
8246 	/*
8247 	 * Check if we saw TCP timestamp.
8248 	 * Other consistency checks have already been done.
8249 	 */
8250 	if (tcp->doff == 8) {
8251 		__be32 *ptr;
8252 		ptr = (__be32 *)(tcp+1);
8253 		lro->saw_ts = 1;
8254 		lro->cur_tsval = ntohl(*(ptr+1));
8255 		lro->cur_tsecr = *(ptr+2);
8256 	}
8257 	lro->in_use = 1;
8258 }
8259 
8260 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8261 {
8262 	struct iphdr *ip = lro->iph;
8263 	struct tcphdr *tcp = lro->tcph;
8264 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8265 
8266 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8267 
8268 	/* Update L3 header */
8269 	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8270 	ip->tot_len = htons(lro->total_len);
8271 
8272 	/* Update L4 header */
8273 	tcp->ack_seq = lro->tcp_ack;
8274 	tcp->window = lro->window;
8275 
8276 	/* Update tsecr field if this session has timestamps enabled */
8277 	if (lro->saw_ts) {
8278 		__be32 *ptr = (__be32 *)(tcp + 1);
8279 		*(ptr+2) = lro->cur_tsecr;
8280 	}
8281 
8282 	/* Update counters required for calculation of
8283 	 * average no. of packets aggregated.
8284 	 */
8285 	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8286 	swstats->num_aggregations++;
8287 }
8288 
8289 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8290 			     struct tcphdr *tcp, u32 l4_pyld)
8291 {
8292 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8293 	lro->total_len += l4_pyld;
8294 	lro->frags_len += l4_pyld;
8295 	lro->tcp_next_seq += l4_pyld;
8296 	lro->sg_num++;
8297 
8298 	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8299 	lro->tcp_ack = tcp->ack_seq;
8300 	lro->window = tcp->window;
8301 
8302 	if (lro->saw_ts) {
8303 		__be32 *ptr;
8304 		/* Update tsecr and tsval from this packet */
8305 		ptr = (__be32 *)(tcp+1);
8306 		lro->cur_tsval = ntohl(*(ptr+1));
8307 		lro->cur_tsecr = *(ptr + 2);
8308 	}
8309 }
8310 
8311 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8312 				    struct tcphdr *tcp, u32 tcp_pyld_len)
8313 {
8314 	u8 *ptr;
8315 
8316 	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8317 
8318 	if (!tcp_pyld_len) {
8319 		/* Runt frame or a pure ack */
8320 		return -1;
8321 	}
8322 
8323 	if (ip->ihl != 5) /* IP has options */
8324 		return -1;
8325 
8326 	/* If we see CE codepoint in IP header, packet is not mergeable */
8327 	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8328 		return -1;
8329 
8330 	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8331 	if (tcp->urg || tcp->psh || tcp->rst ||
8332 	    tcp->syn || tcp->fin ||
8333 	    tcp->ece || tcp->cwr || !tcp->ack) {
8334 		/*
8335 		 * Currently recognize only the ack control word and
8336 		 * any other control field being set would result in
8337 		 * flushing the LRO session
8338 		 */
8339 		return -1;
8340 	}
8341 
8342 	/*
8343 	 * Allow only one TCP timestamp option. Don't aggregate if
8344 	 * any other options are detected.
8345 	 */
8346 	if (tcp->doff != 5 && tcp->doff != 8)
8347 		return -1;
8348 
8349 	if (tcp->doff == 8) {
8350 		ptr = (u8 *)(tcp + 1);
8351 		while (*ptr == TCPOPT_NOP)
8352 			ptr++;
8353 		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8354 			return -1;
8355 
8356 		/* Ensure timestamp value increases monotonically */
8357 		if (l_lro)
8358 			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8359 				return -1;
8360 
8361 		/* timestamp echo reply should be non-zero */
8362 		if (*((__be32 *)(ptr+6)) == 0)
8363 			return -1;
8364 	}
8365 
8366 	return 0;
8367 }
8368 
8369 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8370 				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8371 				 struct RxD_t *rxdp, struct s2io_nic *sp)
8372 {
8373 	struct iphdr *ip;
8374 	struct tcphdr *tcph;
8375 	int ret = 0, i;
8376 	u16 vlan_tag = 0;
8377 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8378 
8379 	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8380 				   rxdp, sp);
8381 	if (ret)
8382 		return ret;
8383 
8384 	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8385 
8386 	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8387 	tcph = (struct tcphdr *)*tcp;
8388 	*tcp_len = get_l4_pyld_length(ip, tcph);
8389 	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8390 		struct lro *l_lro = &ring_data->lro0_n[i];
8391 		if (l_lro->in_use) {
8392 			if (check_for_socket_match(l_lro, ip, tcph))
8393 				continue;
8394 			/* Sock pair matched */
8395 			*lro = l_lro;
8396 
8397 			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8398 				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8399 					  "expected 0x%x, actual 0x%x\n",
8400 					  __func__,
8401 					  (*lro)->tcp_next_seq,
8402 					  ntohl(tcph->seq));
8403 
8404 				swstats->outof_sequence_pkts++;
8405 				ret = 2;
8406 				break;
8407 			}
8408 
8409 			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8410 						      *tcp_len))
8411 				ret = 1; /* Aggregate */
8412 			else
8413 				ret = 2; /* Flush both */
8414 			break;
8415 		}
8416 	}
8417 
8418 	if (ret == 0) {
8419 		/* Before searching for available LRO objects,
8420 		 * check if the pkt is L3/L4 aggregatable. If not
8421 		 * don't create new LRO session. Just send this
8422 		 * packet up.
8423 		 */
8424 		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8425 			return 5;
8426 
8427 		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8428 			struct lro *l_lro = &ring_data->lro0_n[i];
8429 			if (!(l_lro->in_use)) {
8430 				*lro = l_lro;
8431 				ret = 3; /* Begin anew */
8432 				break;
8433 			}
8434 		}
8435 	}
8436 
8437 	if (ret == 0) { /* sessions exceeded */
8438 		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8439 			  __func__);
8440 		*lro = NULL;
8441 		return ret;
8442 	}
8443 
8444 	switch (ret) {
8445 	case 3:
8446 		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8447 				     vlan_tag);
8448 		break;
8449 	case 2:
8450 		update_L3L4_header(sp, *lro);
8451 		break;
8452 	case 1:
8453 		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8454 		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8455 			update_L3L4_header(sp, *lro);
8456 			ret = 4; /* Flush the LRO */
8457 		}
8458 		break;
8459 	default:
8460 		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8461 		break;
8462 	}
8463 
8464 	return ret;
8465 }
8466 
8467 static void clear_lro_session(struct lro *lro)
8468 {
8469 	static u16 lro_struct_size = sizeof(struct lro);
8470 
8471 	memset(lro, 0, lro_struct_size);
8472 }
8473 
8474 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8475 {
8476 	struct net_device *dev = skb->dev;
8477 	struct s2io_nic *sp = netdev_priv(dev);
8478 
8479 	skb->protocol = eth_type_trans(skb, dev);
8480 	if (vlan_tag && sp->vlan_strip_flag)
8481 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8482 	if (sp->config.napi)
8483 		netif_receive_skb(skb);
8484 	else
8485 		netif_rx(skb);
8486 }
8487 
8488 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8489 			   struct sk_buff *skb, u32 tcp_len)
8490 {
8491 	struct sk_buff *first = lro->parent;
8492 	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8493 
8494 	first->len += tcp_len;
8495 	first->data_len = lro->frags_len;
8496 	skb_pull(skb, (skb->len - tcp_len));
8497 	if (skb_shinfo(first)->frag_list)
8498 		lro->last_frag->next = skb;
8499 	else
8500 		skb_shinfo(first)->frag_list = skb;
8501 	first->truesize += skb->truesize;
8502 	lro->last_frag = skb;
8503 	swstats->clubbed_frms_cnt++;
8504 }
8505 
8506 /**
8507  * s2io_io_error_detected - called when PCI error is detected
8508  * @pdev: Pointer to PCI device
8509  * @state: The current pci connection state
8510  *
8511  * This function is called after a PCI bus error affecting
8512  * this device has been detected.
8513  */
8514 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8515 					       pci_channel_state_t state)
8516 {
8517 	struct net_device *netdev = pci_get_drvdata(pdev);
8518 	struct s2io_nic *sp = netdev_priv(netdev);
8519 
8520 	netif_device_detach(netdev);
8521 
8522 	if (state == pci_channel_io_perm_failure)
8523 		return PCI_ERS_RESULT_DISCONNECT;
8524 
8525 	if (netif_running(netdev)) {
8526 		/* Bring down the card, while avoiding PCI I/O */
8527 		do_s2io_card_down(sp, 0);
8528 	}
8529 	pci_disable_device(pdev);
8530 
8531 	return PCI_ERS_RESULT_NEED_RESET;
8532 }
8533 
8534 /**
8535  * s2io_io_slot_reset - called after the pci bus has been reset.
8536  * @pdev: Pointer to PCI device
8537  *
8538  * Restart the card from scratch, as if from a cold-boot.
8539  * At this point, the card has exprienced a hard reset,
8540  * followed by fixups by BIOS, and has its config space
8541  * set up identically to what it was at cold boot.
8542  */
8543 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8544 {
8545 	struct net_device *netdev = pci_get_drvdata(pdev);
8546 	struct s2io_nic *sp = netdev_priv(netdev);
8547 
8548 	if (pci_enable_device(pdev)) {
8549 		pr_err("Cannot re-enable PCI device after reset.\n");
8550 		return PCI_ERS_RESULT_DISCONNECT;
8551 	}
8552 
8553 	pci_set_master(pdev);
8554 	s2io_reset(sp);
8555 
8556 	return PCI_ERS_RESULT_RECOVERED;
8557 }
8558 
8559 /**
8560  * s2io_io_resume - called when traffic can start flowing again.
8561  * @pdev: Pointer to PCI device
8562  *
8563  * This callback is called when the error recovery driver tells
8564  * us that its OK to resume normal operation.
8565  */
8566 static void s2io_io_resume(struct pci_dev *pdev)
8567 {
8568 	struct net_device *netdev = pci_get_drvdata(pdev);
8569 	struct s2io_nic *sp = netdev_priv(netdev);
8570 
8571 	if (netif_running(netdev)) {
8572 		if (s2io_card_up(sp)) {
8573 			pr_err("Can't bring device back up after reset.\n");
8574 			return;
8575 		}
8576 
8577 		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8578 			s2io_card_down(sp);
8579 			pr_err("Can't restore mac addr after reset.\n");
8580 			return;
8581 		}
8582 	}
8583 
8584 	netif_device_attach(netdev);
8585 	netif_tx_wake_all_queues(netdev);
8586 }
8587