1 /************************************************************************ 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC 3 * Copyright(c) 2002-2010 Exar Corp. 4 * 5 * This software may be used and distributed according to the terms of 6 * the GNU General Public License (GPL), incorporated herein by reference. 7 * Drivers based on or derived from this code fall under the GPL and must 8 * retain the authorship, copyright and license notice. This file is not 9 * a complete program and may only be used when the entire operating 10 * system is licensed under the GPL. 11 * See the file COPYING in this distribution for more information. 12 * 13 * Credits: 14 * Jeff Garzik : For pointing out the improper error condition 15 * check in the s2io_xmit routine and also some 16 * issues in the Tx watch dog function. Also for 17 * patiently answering all those innumerable 18 * questions regaring the 2.6 porting issues. 19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some 20 * macros available only in 2.6 Kernel. 21 * Francois Romieu : For pointing out all code part that were 22 * deprecated and also styling related comments. 23 * Grant Grundler : For helping me get rid of some Architecture 24 * dependent code. 25 * Christopher Hellwig : Some more 2.6 specific issues in the driver. 26 * 27 * The module loadable parameters that are supported by the driver and a brief 28 * explanation of all the variables. 29 * 30 * rx_ring_num : This can be used to program the number of receive rings used 31 * in the driver. 32 * rx_ring_sz: This defines the number of receive blocks each ring can have. 33 * This is also an array of size 8. 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid 35 * values are 1, 2. 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of 38 * Tx descriptors that can be associated with each corresponding FIFO. 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA), 40 * 2(MSI_X). Default value is '2(MSI_X)' 41 * lro_max_pkts: This parameter defines maximum number of packets can be 42 * aggregated as a single large packet 43 * napi: This parameter used to enable/disable NAPI (polling Rx) 44 * Possible values '1' for enable and '0' for disable. Default is '1' 45 * vlan_tag_strip: This can be used to enable or disable vlan stripping. 46 * Possible values '1' for enable , '0' for disable. 47 * Default is '2' - which means disable in promisc mode 48 * and enable in non-promiscuous mode. 49 * multiq: This parameter used to enable/disable MULTIQUEUE support. 50 * Possible values '1' for enable and '0' for disable. Default is '0' 51 ************************************************************************/ 52 53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 54 55 #include <linux/module.h> 56 #include <linux/types.h> 57 #include <linux/errno.h> 58 #include <linux/ioport.h> 59 #include <linux/pci.h> 60 #include <linux/dma-mapping.h> 61 #include <linux/kernel.h> 62 #include <linux/netdevice.h> 63 #include <linux/etherdevice.h> 64 #include <linux/mdio.h> 65 #include <linux/skbuff.h> 66 #include <linux/init.h> 67 #include <linux/delay.h> 68 #include <linux/stddef.h> 69 #include <linux/ioctl.h> 70 #include <linux/timex.h> 71 #include <linux/ethtool.h> 72 #include <linux/workqueue.h> 73 #include <linux/if_vlan.h> 74 #include <linux/ip.h> 75 #include <linux/tcp.h> 76 #include <linux/uaccess.h> 77 #include <linux/io.h> 78 #include <linux/io-64-nonatomic-lo-hi.h> 79 #include <linux/slab.h> 80 #include <linux/prefetch.h> 81 #include <net/tcp.h> 82 #include <net/checksum.h> 83 84 #include <asm/div64.h> 85 #include <asm/irq.h> 86 87 /* local include */ 88 #include "s2io.h" 89 #include "s2io-regs.h" 90 91 #define DRV_VERSION "2.0.26.28" 92 93 /* S2io Driver name & version. */ 94 static const char s2io_driver_name[] = "Neterion"; 95 static const char s2io_driver_version[] = DRV_VERSION; 96 97 static const int rxd_size[2] = {32, 48}; 98 static const int rxd_count[2] = {127, 85}; 99 100 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) 101 { 102 int ret; 103 104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) && 105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK)); 106 107 return ret; 108 } 109 110 /* 111 * Cards with following subsystem_id have a link state indication 112 * problem, 600B, 600C, 600D, 640B, 640C and 640D. 113 * macro below identifies these cards given the subsystem_id. 114 */ 115 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \ 116 (dev_type == XFRAME_I_DEVICE) ? \ 117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \ 118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0 119 120 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \ 121 ADAPTER_STATUS_RMAC_LOCAL_FAULT))) 122 123 static inline int is_s2io_card_up(const struct s2io_nic *sp) 124 { 125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state); 126 } 127 128 /* Ethtool related variables and Macros. */ 129 static const char s2io_gstrings[][ETH_GSTRING_LEN] = { 130 "Register test\t(offline)", 131 "Eeprom test\t(offline)", 132 "Link test\t(online)", 133 "RLDRAM test\t(offline)", 134 "BIST Test\t(offline)" 135 }; 136 137 static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = { 138 {"tmac_frms"}, 139 {"tmac_data_octets"}, 140 {"tmac_drop_frms"}, 141 {"tmac_mcst_frms"}, 142 {"tmac_bcst_frms"}, 143 {"tmac_pause_ctrl_frms"}, 144 {"tmac_ttl_octets"}, 145 {"tmac_ucst_frms"}, 146 {"tmac_nucst_frms"}, 147 {"tmac_any_err_frms"}, 148 {"tmac_ttl_less_fb_octets"}, 149 {"tmac_vld_ip_octets"}, 150 {"tmac_vld_ip"}, 151 {"tmac_drop_ip"}, 152 {"tmac_icmp"}, 153 {"tmac_rst_tcp"}, 154 {"tmac_tcp"}, 155 {"tmac_udp"}, 156 {"rmac_vld_frms"}, 157 {"rmac_data_octets"}, 158 {"rmac_fcs_err_frms"}, 159 {"rmac_drop_frms"}, 160 {"rmac_vld_mcst_frms"}, 161 {"rmac_vld_bcst_frms"}, 162 {"rmac_in_rng_len_err_frms"}, 163 {"rmac_out_rng_len_err_frms"}, 164 {"rmac_long_frms"}, 165 {"rmac_pause_ctrl_frms"}, 166 {"rmac_unsup_ctrl_frms"}, 167 {"rmac_ttl_octets"}, 168 {"rmac_accepted_ucst_frms"}, 169 {"rmac_accepted_nucst_frms"}, 170 {"rmac_discarded_frms"}, 171 {"rmac_drop_events"}, 172 {"rmac_ttl_less_fb_octets"}, 173 {"rmac_ttl_frms"}, 174 {"rmac_usized_frms"}, 175 {"rmac_osized_frms"}, 176 {"rmac_frag_frms"}, 177 {"rmac_jabber_frms"}, 178 {"rmac_ttl_64_frms"}, 179 {"rmac_ttl_65_127_frms"}, 180 {"rmac_ttl_128_255_frms"}, 181 {"rmac_ttl_256_511_frms"}, 182 {"rmac_ttl_512_1023_frms"}, 183 {"rmac_ttl_1024_1518_frms"}, 184 {"rmac_ip"}, 185 {"rmac_ip_octets"}, 186 {"rmac_hdr_err_ip"}, 187 {"rmac_drop_ip"}, 188 {"rmac_icmp"}, 189 {"rmac_tcp"}, 190 {"rmac_udp"}, 191 {"rmac_err_drp_udp"}, 192 {"rmac_xgmii_err_sym"}, 193 {"rmac_frms_q0"}, 194 {"rmac_frms_q1"}, 195 {"rmac_frms_q2"}, 196 {"rmac_frms_q3"}, 197 {"rmac_frms_q4"}, 198 {"rmac_frms_q5"}, 199 {"rmac_frms_q6"}, 200 {"rmac_frms_q7"}, 201 {"rmac_full_q0"}, 202 {"rmac_full_q1"}, 203 {"rmac_full_q2"}, 204 {"rmac_full_q3"}, 205 {"rmac_full_q4"}, 206 {"rmac_full_q5"}, 207 {"rmac_full_q6"}, 208 {"rmac_full_q7"}, 209 {"rmac_pause_cnt"}, 210 {"rmac_xgmii_data_err_cnt"}, 211 {"rmac_xgmii_ctrl_err_cnt"}, 212 {"rmac_accepted_ip"}, 213 {"rmac_err_tcp"}, 214 {"rd_req_cnt"}, 215 {"new_rd_req_cnt"}, 216 {"new_rd_req_rtry_cnt"}, 217 {"rd_rtry_cnt"}, 218 {"wr_rtry_rd_ack_cnt"}, 219 {"wr_req_cnt"}, 220 {"new_wr_req_cnt"}, 221 {"new_wr_req_rtry_cnt"}, 222 {"wr_rtry_cnt"}, 223 {"wr_disc_cnt"}, 224 {"rd_rtry_wr_ack_cnt"}, 225 {"txp_wr_cnt"}, 226 {"txd_rd_cnt"}, 227 {"txd_wr_cnt"}, 228 {"rxd_rd_cnt"}, 229 {"rxd_wr_cnt"}, 230 {"txf_rd_cnt"}, 231 {"rxf_wr_cnt"} 232 }; 233 234 static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = { 235 {"rmac_ttl_1519_4095_frms"}, 236 {"rmac_ttl_4096_8191_frms"}, 237 {"rmac_ttl_8192_max_frms"}, 238 {"rmac_ttl_gt_max_frms"}, 239 {"rmac_osized_alt_frms"}, 240 {"rmac_jabber_alt_frms"}, 241 {"rmac_gt_max_alt_frms"}, 242 {"rmac_vlan_frms"}, 243 {"rmac_len_discard"}, 244 {"rmac_fcs_discard"}, 245 {"rmac_pf_discard"}, 246 {"rmac_da_discard"}, 247 {"rmac_red_discard"}, 248 {"rmac_rts_discard"}, 249 {"rmac_ingm_full_discard"}, 250 {"link_fault_cnt"} 251 }; 252 253 static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { 254 {"\n DRIVER STATISTICS"}, 255 {"single_bit_ecc_errs"}, 256 {"double_bit_ecc_errs"}, 257 {"parity_err_cnt"}, 258 {"serious_err_cnt"}, 259 {"soft_reset_cnt"}, 260 {"fifo_full_cnt"}, 261 {"ring_0_full_cnt"}, 262 {"ring_1_full_cnt"}, 263 {"ring_2_full_cnt"}, 264 {"ring_3_full_cnt"}, 265 {"ring_4_full_cnt"}, 266 {"ring_5_full_cnt"}, 267 {"ring_6_full_cnt"}, 268 {"ring_7_full_cnt"}, 269 {"alarm_transceiver_temp_high"}, 270 {"alarm_transceiver_temp_low"}, 271 {"alarm_laser_bias_current_high"}, 272 {"alarm_laser_bias_current_low"}, 273 {"alarm_laser_output_power_high"}, 274 {"alarm_laser_output_power_low"}, 275 {"warn_transceiver_temp_high"}, 276 {"warn_transceiver_temp_low"}, 277 {"warn_laser_bias_current_high"}, 278 {"warn_laser_bias_current_low"}, 279 {"warn_laser_output_power_high"}, 280 {"warn_laser_output_power_low"}, 281 {"lro_aggregated_pkts"}, 282 {"lro_flush_both_count"}, 283 {"lro_out_of_sequence_pkts"}, 284 {"lro_flush_due_to_max_pkts"}, 285 {"lro_avg_aggr_pkts"}, 286 {"mem_alloc_fail_cnt"}, 287 {"pci_map_fail_cnt"}, 288 {"watchdog_timer_cnt"}, 289 {"mem_allocated"}, 290 {"mem_freed"}, 291 {"link_up_cnt"}, 292 {"link_down_cnt"}, 293 {"link_up_time"}, 294 {"link_down_time"}, 295 {"tx_tcode_buf_abort_cnt"}, 296 {"tx_tcode_desc_abort_cnt"}, 297 {"tx_tcode_parity_err_cnt"}, 298 {"tx_tcode_link_loss_cnt"}, 299 {"tx_tcode_list_proc_err_cnt"}, 300 {"rx_tcode_parity_err_cnt"}, 301 {"rx_tcode_abort_cnt"}, 302 {"rx_tcode_parity_abort_cnt"}, 303 {"rx_tcode_rda_fail_cnt"}, 304 {"rx_tcode_unkn_prot_cnt"}, 305 {"rx_tcode_fcs_err_cnt"}, 306 {"rx_tcode_buf_size_err_cnt"}, 307 {"rx_tcode_rxd_corrupt_cnt"}, 308 {"rx_tcode_unkn_err_cnt"}, 309 {"tda_err_cnt"}, 310 {"pfc_err_cnt"}, 311 {"pcc_err_cnt"}, 312 {"tti_err_cnt"}, 313 {"tpa_err_cnt"}, 314 {"sm_err_cnt"}, 315 {"lso_err_cnt"}, 316 {"mac_tmac_err_cnt"}, 317 {"mac_rmac_err_cnt"}, 318 {"xgxs_txgxs_err_cnt"}, 319 {"xgxs_rxgxs_err_cnt"}, 320 {"rc_err_cnt"}, 321 {"prc_pcix_err_cnt"}, 322 {"rpa_err_cnt"}, 323 {"rda_err_cnt"}, 324 {"rti_err_cnt"}, 325 {"mc_err_cnt"} 326 }; 327 328 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys) 329 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys) 330 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys) 331 332 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN) 333 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN) 334 335 #define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN) 336 #define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN) 337 338 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings) 339 #define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN) 340 341 /* copy mac addr to def_mac_addr array */ 342 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr) 343 { 344 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr); 345 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8); 346 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16); 347 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24); 348 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32); 349 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40); 350 } 351 352 /* 353 * Constants to be programmed into the Xena's registers, to configure 354 * the XAUI. 355 */ 356 357 #define END_SIGN 0x0 358 static const u64 herc_act_dtx_cfg[] = { 359 /* Set address */ 360 0x8000051536750000ULL, 0x80000515367500E0ULL, 361 /* Write data */ 362 0x8000051536750004ULL, 0x80000515367500E4ULL, 363 /* Set address */ 364 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 365 /* Write data */ 366 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 367 /* Set address */ 368 0x801205150D440000ULL, 0x801205150D4400E0ULL, 369 /* Write data */ 370 0x801205150D440004ULL, 0x801205150D4400E4ULL, 371 /* Set address */ 372 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 373 /* Write data */ 374 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 375 /* Done */ 376 END_SIGN 377 }; 378 379 static const u64 xena_dtx_cfg[] = { 380 /* Set address */ 381 0x8000051500000000ULL, 0x80000515000000E0ULL, 382 /* Write data */ 383 0x80000515D9350004ULL, 0x80000515D93500E4ULL, 384 /* Set address */ 385 0x8001051500000000ULL, 0x80010515000000E0ULL, 386 /* Write data */ 387 0x80010515001E0004ULL, 0x80010515001E00E4ULL, 388 /* Set address */ 389 0x8002051500000000ULL, 0x80020515000000E0ULL, 390 /* Write data */ 391 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 392 END_SIGN 393 }; 394 395 /* 396 * Constants for Fixing the MacAddress problem seen mostly on 397 * Alpha machines. 398 */ 399 static const u64 fix_mac[] = { 400 0x0060000000000000ULL, 0x0060600000000000ULL, 401 0x0040600000000000ULL, 0x0000600000000000ULL, 402 0x0020600000000000ULL, 0x0060600000000000ULL, 403 0x0020600000000000ULL, 0x0060600000000000ULL, 404 0x0020600000000000ULL, 0x0060600000000000ULL, 405 0x0020600000000000ULL, 0x0060600000000000ULL, 406 0x0020600000000000ULL, 0x0060600000000000ULL, 407 0x0020600000000000ULL, 0x0060600000000000ULL, 408 0x0020600000000000ULL, 0x0060600000000000ULL, 409 0x0020600000000000ULL, 0x0060600000000000ULL, 410 0x0020600000000000ULL, 0x0060600000000000ULL, 411 0x0020600000000000ULL, 0x0060600000000000ULL, 412 0x0020600000000000ULL, 0x0000600000000000ULL, 413 0x0040600000000000ULL, 0x0060600000000000ULL, 414 END_SIGN 415 }; 416 417 MODULE_LICENSE("GPL"); 418 MODULE_VERSION(DRV_VERSION); 419 420 421 /* Module Loadable parameters. */ 422 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM); 423 S2IO_PARM_INT(rx_ring_num, 1); 424 S2IO_PARM_INT(multiq, 0); 425 S2IO_PARM_INT(rx_ring_mode, 1); 426 S2IO_PARM_INT(use_continuous_tx_intrs, 1); 427 S2IO_PARM_INT(rmac_pause_time, 0x100); 428 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187); 429 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187); 430 S2IO_PARM_INT(shared_splits, 0); 431 S2IO_PARM_INT(tmac_util_period, 5); 432 S2IO_PARM_INT(rmac_util_period, 5); 433 S2IO_PARM_INT(l3l4hdr_size, 128); 434 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */ 435 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING); 436 /* Frequency of Rx desc syncs expressed as power of 2 */ 437 S2IO_PARM_INT(rxsync_frequency, 3); 438 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ 439 S2IO_PARM_INT(intr_type, 2); 440 /* Large receive offload feature */ 441 442 /* Max pkts to be aggregated by LRO at one time. If not specified, 443 * aggregation happens until we hit max IP pkt size(64K) 444 */ 445 S2IO_PARM_INT(lro_max_pkts, 0xFFFF); 446 S2IO_PARM_INT(indicate_max_pkts, 0); 447 448 S2IO_PARM_INT(napi, 1); 449 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC); 450 451 static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 452 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 453 static unsigned int rx_ring_sz[MAX_RX_RINGS] = 454 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; 455 static unsigned int rts_frm_len[MAX_RX_RINGS] = 456 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 457 458 module_param_array(tx_fifo_len, uint, NULL, 0); 459 module_param_array(rx_ring_sz, uint, NULL, 0); 460 module_param_array(rts_frm_len, uint, NULL, 0); 461 462 /* 463 * S2IO device table. 464 * This table lists all the devices that this driver supports. 465 */ 466 static const struct pci_device_id s2io_tbl[] = { 467 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN, 468 PCI_ANY_ID, PCI_ANY_ID}, 469 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, 470 PCI_ANY_ID, PCI_ANY_ID}, 471 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN, 472 PCI_ANY_ID, PCI_ANY_ID}, 473 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI, 474 PCI_ANY_ID, PCI_ANY_ID}, 475 {0,} 476 }; 477 478 MODULE_DEVICE_TABLE(pci, s2io_tbl); 479 480 static const struct pci_error_handlers s2io_err_handler = { 481 .error_detected = s2io_io_error_detected, 482 .slot_reset = s2io_io_slot_reset, 483 .resume = s2io_io_resume, 484 }; 485 486 static struct pci_driver s2io_driver = { 487 .name = "S2IO", 488 .id_table = s2io_tbl, 489 .probe = s2io_init_nic, 490 .remove = s2io_rem_nic, 491 .err_handler = &s2io_err_handler, 492 }; 493 494 /* A simplifier macro used both by init and free shared_mem Fns(). */ 495 #define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each) 496 497 /* netqueue manipulation helper functions */ 498 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) 499 { 500 if (!sp->config.multiq) { 501 int i; 502 503 for (i = 0; i < sp->config.tx_fifo_num; i++) 504 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP; 505 } 506 netif_tx_stop_all_queues(sp->dev); 507 } 508 509 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no) 510 { 511 if (!sp->config.multiq) 512 sp->mac_control.fifos[fifo_no].queue_state = 513 FIFO_QUEUE_STOP; 514 515 netif_tx_stop_all_queues(sp->dev); 516 } 517 518 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp) 519 { 520 if (!sp->config.multiq) { 521 int i; 522 523 for (i = 0; i < sp->config.tx_fifo_num; i++) 524 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; 525 } 526 netif_tx_start_all_queues(sp->dev); 527 } 528 529 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp) 530 { 531 if (!sp->config.multiq) { 532 int i; 533 534 for (i = 0; i < sp->config.tx_fifo_num; i++) 535 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; 536 } 537 netif_tx_wake_all_queues(sp->dev); 538 } 539 540 static inline void s2io_wake_tx_queue( 541 struct fifo_info *fifo, int cnt, u8 multiq) 542 { 543 544 if (multiq) { 545 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no)) 546 netif_wake_subqueue(fifo->dev, fifo->fifo_no); 547 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) { 548 if (netif_queue_stopped(fifo->dev)) { 549 fifo->queue_state = FIFO_QUEUE_START; 550 netif_wake_queue(fifo->dev); 551 } 552 } 553 } 554 555 /** 556 * init_shared_mem - Allocation and Initialization of Memory 557 * @nic: Device private variable. 558 * Description: The function allocates all the memory areas shared 559 * between the NIC and the driver. This includes Tx descriptors, 560 * Rx descriptors and the statistics block. 561 */ 562 563 static int init_shared_mem(struct s2io_nic *nic) 564 { 565 u32 size; 566 void *tmp_v_addr, *tmp_v_addr_next; 567 dma_addr_t tmp_p_addr, tmp_p_addr_next; 568 struct RxD_block *pre_rxd_blk = NULL; 569 int i, j, blk_cnt; 570 int lst_size, lst_per_page; 571 struct net_device *dev = nic->dev; 572 unsigned long tmp; 573 struct buffAdd *ba; 574 struct config_param *config = &nic->config; 575 struct mac_info *mac_control = &nic->mac_control; 576 unsigned long long mem_allocated = 0; 577 578 /* Allocation and initialization of TXDLs in FIFOs */ 579 size = 0; 580 for (i = 0; i < config->tx_fifo_num; i++) { 581 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; 582 583 size += tx_cfg->fifo_len; 584 } 585 if (size > MAX_AVAILABLE_TXDS) { 586 DBG_PRINT(ERR_DBG, 587 "Too many TxDs requested: %d, max supported: %d\n", 588 size, MAX_AVAILABLE_TXDS); 589 return -EINVAL; 590 } 591 592 size = 0; 593 for (i = 0; i < config->tx_fifo_num; i++) { 594 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; 595 596 size = tx_cfg->fifo_len; 597 /* 598 * Legal values are from 2 to 8192 599 */ 600 if (size < 2) { 601 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - " 602 "Valid lengths are 2 through 8192\n", 603 i, size); 604 return -EINVAL; 605 } 606 } 607 608 lst_size = (sizeof(struct TxD) * config->max_txds); 609 lst_per_page = PAGE_SIZE / lst_size; 610 611 for (i = 0; i < config->tx_fifo_num; i++) { 612 struct fifo_info *fifo = &mac_control->fifos[i]; 613 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; 614 int fifo_len = tx_cfg->fifo_len; 615 int list_holder_size = fifo_len * sizeof(struct list_info_hold); 616 617 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL); 618 if (!fifo->list_info) { 619 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n"); 620 return -ENOMEM; 621 } 622 mem_allocated += list_holder_size; 623 } 624 for (i = 0; i < config->tx_fifo_num; i++) { 625 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, 626 lst_per_page); 627 struct fifo_info *fifo = &mac_control->fifos[i]; 628 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; 629 630 fifo->tx_curr_put_info.offset = 0; 631 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1; 632 fifo->tx_curr_get_info.offset = 0; 633 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1; 634 fifo->fifo_no = i; 635 fifo->nic = nic; 636 fifo->max_txds = MAX_SKB_FRAGS + 2; 637 fifo->dev = dev; 638 639 for (j = 0; j < page_num; j++) { 640 int k = 0; 641 dma_addr_t tmp_p; 642 void *tmp_v; 643 tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE, 644 &tmp_p, GFP_KERNEL); 645 if (!tmp_v) { 646 DBG_PRINT(INFO_DBG, 647 "dma_alloc_coherent failed for TxDL\n"); 648 return -ENOMEM; 649 } 650 /* If we got a zero DMA address(can happen on 651 * certain platforms like PPC), reallocate. 652 * Store virtual address of page we don't want, 653 * to be freed later. 654 */ 655 if (!tmp_p) { 656 mac_control->zerodma_virt_addr = tmp_v; 657 DBG_PRINT(INIT_DBG, 658 "%s: Zero DMA address for TxDL. " 659 "Virtual address %p\n", 660 dev->name, tmp_v); 661 tmp_v = dma_alloc_coherent(&nic->pdev->dev, 662 PAGE_SIZE, &tmp_p, 663 GFP_KERNEL); 664 if (!tmp_v) { 665 DBG_PRINT(INFO_DBG, 666 "dma_alloc_coherent failed for TxDL\n"); 667 return -ENOMEM; 668 } 669 mem_allocated += PAGE_SIZE; 670 } 671 while (k < lst_per_page) { 672 int l = (j * lst_per_page) + k; 673 if (l == tx_cfg->fifo_len) 674 break; 675 fifo->list_info[l].list_virt_addr = 676 tmp_v + (k * lst_size); 677 fifo->list_info[l].list_phy_addr = 678 tmp_p + (k * lst_size); 679 k++; 680 } 681 } 682 } 683 684 for (i = 0; i < config->tx_fifo_num; i++) { 685 struct fifo_info *fifo = &mac_control->fifos[i]; 686 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; 687 688 size = tx_cfg->fifo_len; 689 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL); 690 if (!fifo->ufo_in_band_v) 691 return -ENOMEM; 692 mem_allocated += (size * sizeof(u64)); 693 } 694 695 /* Allocation and initialization of RXDs in Rings */ 696 size = 0; 697 for (i = 0; i < config->rx_ring_num; i++) { 698 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; 699 struct ring_info *ring = &mac_control->rings[i]; 700 701 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) { 702 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a " 703 "multiple of RxDs per Block\n", 704 dev->name, i); 705 return FAILURE; 706 } 707 size += rx_cfg->num_rxd; 708 ring->block_count = rx_cfg->num_rxd / 709 (rxd_count[nic->rxd_mode] + 1); 710 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count; 711 } 712 if (nic->rxd_mode == RXD_MODE_1) 713 size = (size * (sizeof(struct RxD1))); 714 else 715 size = (size * (sizeof(struct RxD3))); 716 717 for (i = 0; i < config->rx_ring_num; i++) { 718 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; 719 struct ring_info *ring = &mac_control->rings[i]; 720 721 ring->rx_curr_get_info.block_index = 0; 722 ring->rx_curr_get_info.offset = 0; 723 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1; 724 ring->rx_curr_put_info.block_index = 0; 725 ring->rx_curr_put_info.offset = 0; 726 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1; 727 ring->nic = nic; 728 ring->ring_no = i; 729 730 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1); 731 /* Allocating all the Rx blocks */ 732 for (j = 0; j < blk_cnt; j++) { 733 struct rx_block_info *rx_blocks; 734 int l; 735 736 rx_blocks = &ring->rx_blocks[j]; 737 size = SIZE_OF_BLOCK; /* size is always page size */ 738 tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size, 739 &tmp_p_addr, GFP_KERNEL); 740 if (tmp_v_addr == NULL) { 741 /* 742 * In case of failure, free_shared_mem() 743 * is called, which should free any 744 * memory that was alloced till the 745 * failure happened. 746 */ 747 rx_blocks->block_virt_addr = tmp_v_addr; 748 return -ENOMEM; 749 } 750 mem_allocated += size; 751 752 size = sizeof(struct rxd_info) * 753 rxd_count[nic->rxd_mode]; 754 rx_blocks->block_virt_addr = tmp_v_addr; 755 rx_blocks->block_dma_addr = tmp_p_addr; 756 rx_blocks->rxds = kmalloc(size, GFP_KERNEL); 757 if (!rx_blocks->rxds) 758 return -ENOMEM; 759 mem_allocated += size; 760 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) { 761 rx_blocks->rxds[l].virt_addr = 762 rx_blocks->block_virt_addr + 763 (rxd_size[nic->rxd_mode] * l); 764 rx_blocks->rxds[l].dma_addr = 765 rx_blocks->block_dma_addr + 766 (rxd_size[nic->rxd_mode] * l); 767 } 768 } 769 /* Interlinking all Rx Blocks */ 770 for (j = 0; j < blk_cnt; j++) { 771 int next = (j + 1) % blk_cnt; 772 tmp_v_addr = ring->rx_blocks[j].block_virt_addr; 773 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr; 774 tmp_p_addr = ring->rx_blocks[j].block_dma_addr; 775 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr; 776 777 pre_rxd_blk = tmp_v_addr; 778 pre_rxd_blk->reserved_2_pNext_RxD_block = 779 (unsigned long)tmp_v_addr_next; 780 pre_rxd_blk->pNext_RxD_Blk_physical = 781 (u64)tmp_p_addr_next; 782 } 783 } 784 if (nic->rxd_mode == RXD_MODE_3B) { 785 /* 786 * Allocation of Storages for buffer addresses in 2BUFF mode 787 * and the buffers as well. 788 */ 789 for (i = 0; i < config->rx_ring_num; i++) { 790 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; 791 struct ring_info *ring = &mac_control->rings[i]; 792 793 blk_cnt = rx_cfg->num_rxd / 794 (rxd_count[nic->rxd_mode] + 1); 795 size = sizeof(struct buffAdd *) * blk_cnt; 796 ring->ba = kmalloc(size, GFP_KERNEL); 797 if (!ring->ba) 798 return -ENOMEM; 799 mem_allocated += size; 800 for (j = 0; j < blk_cnt; j++) { 801 int k = 0; 802 803 size = sizeof(struct buffAdd) * 804 (rxd_count[nic->rxd_mode] + 1); 805 ring->ba[j] = kmalloc(size, GFP_KERNEL); 806 if (!ring->ba[j]) 807 return -ENOMEM; 808 mem_allocated += size; 809 while (k != rxd_count[nic->rxd_mode]) { 810 ba = &ring->ba[j][k]; 811 size = BUF0_LEN + ALIGN_SIZE; 812 ba->ba_0_org = kmalloc(size, GFP_KERNEL); 813 if (!ba->ba_0_org) 814 return -ENOMEM; 815 mem_allocated += size; 816 tmp = (unsigned long)ba->ba_0_org; 817 tmp += ALIGN_SIZE; 818 tmp &= ~((unsigned long)ALIGN_SIZE); 819 ba->ba_0 = (void *)tmp; 820 821 size = BUF1_LEN + ALIGN_SIZE; 822 ba->ba_1_org = kmalloc(size, GFP_KERNEL); 823 if (!ba->ba_1_org) 824 return -ENOMEM; 825 mem_allocated += size; 826 tmp = (unsigned long)ba->ba_1_org; 827 tmp += ALIGN_SIZE; 828 tmp &= ~((unsigned long)ALIGN_SIZE); 829 ba->ba_1 = (void *)tmp; 830 k++; 831 } 832 } 833 } 834 } 835 836 /* Allocation and initialization of Statistics block */ 837 size = sizeof(struct stat_block); 838 mac_control->stats_mem = 839 dma_alloc_coherent(&nic->pdev->dev, size, 840 &mac_control->stats_mem_phy, GFP_KERNEL); 841 842 if (!mac_control->stats_mem) { 843 /* 844 * In case of failure, free_shared_mem() is called, which 845 * should free any memory that was alloced till the 846 * failure happened. 847 */ 848 return -ENOMEM; 849 } 850 mem_allocated += size; 851 mac_control->stats_mem_sz = size; 852 853 tmp_v_addr = mac_control->stats_mem; 854 mac_control->stats_info = tmp_v_addr; 855 memset(tmp_v_addr, 0, size); 856 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n", 857 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr); 858 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated; 859 return SUCCESS; 860 } 861 862 /** 863 * free_shared_mem - Free the allocated Memory 864 * @nic: Device private variable. 865 * Description: This function is to free all memory locations allocated by 866 * the init_shared_mem() function and return it to the kernel. 867 */ 868 869 static void free_shared_mem(struct s2io_nic *nic) 870 { 871 int i, j, blk_cnt, size; 872 void *tmp_v_addr; 873 dma_addr_t tmp_p_addr; 874 int lst_size, lst_per_page; 875 struct net_device *dev; 876 int page_num = 0; 877 struct config_param *config; 878 struct mac_info *mac_control; 879 struct stat_block *stats; 880 struct swStat *swstats; 881 882 if (!nic) 883 return; 884 885 dev = nic->dev; 886 887 config = &nic->config; 888 mac_control = &nic->mac_control; 889 stats = mac_control->stats_info; 890 swstats = &stats->sw_stat; 891 892 lst_size = sizeof(struct TxD) * config->max_txds; 893 lst_per_page = PAGE_SIZE / lst_size; 894 895 for (i = 0; i < config->tx_fifo_num; i++) { 896 struct fifo_info *fifo = &mac_control->fifos[i]; 897 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; 898 899 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page); 900 for (j = 0; j < page_num; j++) { 901 int mem_blks = (j * lst_per_page); 902 struct list_info_hold *fli; 903 904 if (!fifo->list_info) 905 return; 906 907 fli = &fifo->list_info[mem_blks]; 908 if (!fli->list_virt_addr) 909 break; 910 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE, 911 fli->list_virt_addr, 912 fli->list_phy_addr); 913 swstats->mem_freed += PAGE_SIZE; 914 } 915 /* If we got a zero DMA address during allocation, 916 * free the page now 917 */ 918 if (mac_control->zerodma_virt_addr) { 919 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE, 920 mac_control->zerodma_virt_addr, 921 (dma_addr_t)0); 922 DBG_PRINT(INIT_DBG, 923 "%s: Freeing TxDL with zero DMA address. " 924 "Virtual address %p\n", 925 dev->name, mac_control->zerodma_virt_addr); 926 swstats->mem_freed += PAGE_SIZE; 927 } 928 kfree(fifo->list_info); 929 swstats->mem_freed += tx_cfg->fifo_len * 930 sizeof(struct list_info_hold); 931 } 932 933 size = SIZE_OF_BLOCK; 934 for (i = 0; i < config->rx_ring_num; i++) { 935 struct ring_info *ring = &mac_control->rings[i]; 936 937 blk_cnt = ring->block_count; 938 for (j = 0; j < blk_cnt; j++) { 939 tmp_v_addr = ring->rx_blocks[j].block_virt_addr; 940 tmp_p_addr = ring->rx_blocks[j].block_dma_addr; 941 if (tmp_v_addr == NULL) 942 break; 943 dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr, 944 tmp_p_addr); 945 swstats->mem_freed += size; 946 kfree(ring->rx_blocks[j].rxds); 947 swstats->mem_freed += sizeof(struct rxd_info) * 948 rxd_count[nic->rxd_mode]; 949 } 950 } 951 952 if (nic->rxd_mode == RXD_MODE_3B) { 953 /* Freeing buffer storage addresses in 2BUFF mode. */ 954 for (i = 0; i < config->rx_ring_num; i++) { 955 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; 956 struct ring_info *ring = &mac_control->rings[i]; 957 958 blk_cnt = rx_cfg->num_rxd / 959 (rxd_count[nic->rxd_mode] + 1); 960 for (j = 0; j < blk_cnt; j++) { 961 int k = 0; 962 if (!ring->ba[j]) 963 continue; 964 while (k != rxd_count[nic->rxd_mode]) { 965 struct buffAdd *ba = &ring->ba[j][k]; 966 kfree(ba->ba_0_org); 967 swstats->mem_freed += 968 BUF0_LEN + ALIGN_SIZE; 969 kfree(ba->ba_1_org); 970 swstats->mem_freed += 971 BUF1_LEN + ALIGN_SIZE; 972 k++; 973 } 974 kfree(ring->ba[j]); 975 swstats->mem_freed += sizeof(struct buffAdd) * 976 (rxd_count[nic->rxd_mode] + 1); 977 } 978 kfree(ring->ba); 979 swstats->mem_freed += sizeof(struct buffAdd *) * 980 blk_cnt; 981 } 982 } 983 984 for (i = 0; i < nic->config.tx_fifo_num; i++) { 985 struct fifo_info *fifo = &mac_control->fifos[i]; 986 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; 987 988 if (fifo->ufo_in_band_v) { 989 swstats->mem_freed += tx_cfg->fifo_len * 990 sizeof(u64); 991 kfree(fifo->ufo_in_band_v); 992 } 993 } 994 995 if (mac_control->stats_mem) { 996 swstats->mem_freed += mac_control->stats_mem_sz; 997 dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz, 998 mac_control->stats_mem, 999 mac_control->stats_mem_phy); 1000 } 1001 } 1002 1003 /* 1004 * s2io_verify_pci_mode - 1005 */ 1006 1007 static int s2io_verify_pci_mode(struct s2io_nic *nic) 1008 { 1009 struct XENA_dev_config __iomem *bar0 = nic->bar0; 1010 register u64 val64 = 0; 1011 int mode; 1012 1013 val64 = readq(&bar0->pci_mode); 1014 mode = (u8)GET_PCI_MODE(val64); 1015 1016 if (val64 & PCI_MODE_UNKNOWN_MODE) 1017 return -1; /* Unknown PCI mode */ 1018 return mode; 1019 } 1020 1021 #define NEC_VENID 0x1033 1022 #define NEC_DEVID 0x0125 1023 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev) 1024 { 1025 struct pci_dev *tdev = NULL; 1026 for_each_pci_dev(tdev) { 1027 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { 1028 if (tdev->bus == s2io_pdev->bus->parent) { 1029 pci_dev_put(tdev); 1030 return 1; 1031 } 1032 } 1033 } 1034 return 0; 1035 } 1036 1037 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266}; 1038 /* 1039 * s2io_print_pci_mode - 1040 */ 1041 static int s2io_print_pci_mode(struct s2io_nic *nic) 1042 { 1043 struct XENA_dev_config __iomem *bar0 = nic->bar0; 1044 register u64 val64 = 0; 1045 int mode; 1046 struct config_param *config = &nic->config; 1047 const char *pcimode; 1048 1049 val64 = readq(&bar0->pci_mode); 1050 mode = (u8)GET_PCI_MODE(val64); 1051 1052 if (val64 & PCI_MODE_UNKNOWN_MODE) 1053 return -1; /* Unknown PCI mode */ 1054 1055 config->bus_speed = bus_speed[mode]; 1056 1057 if (s2io_on_nec_bridge(nic->pdev)) { 1058 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n", 1059 nic->dev->name); 1060 return mode; 1061 } 1062 1063 switch (mode) { 1064 case PCI_MODE_PCI_33: 1065 pcimode = "33MHz PCI bus"; 1066 break; 1067 case PCI_MODE_PCI_66: 1068 pcimode = "66MHz PCI bus"; 1069 break; 1070 case PCI_MODE_PCIX_M1_66: 1071 pcimode = "66MHz PCIX(M1) bus"; 1072 break; 1073 case PCI_MODE_PCIX_M1_100: 1074 pcimode = "100MHz PCIX(M1) bus"; 1075 break; 1076 case PCI_MODE_PCIX_M1_133: 1077 pcimode = "133MHz PCIX(M1) bus"; 1078 break; 1079 case PCI_MODE_PCIX_M2_66: 1080 pcimode = "133MHz PCIX(M2) bus"; 1081 break; 1082 case PCI_MODE_PCIX_M2_100: 1083 pcimode = "200MHz PCIX(M2) bus"; 1084 break; 1085 case PCI_MODE_PCIX_M2_133: 1086 pcimode = "266MHz PCIX(M2) bus"; 1087 break; 1088 default: 1089 pcimode = "unsupported bus!"; 1090 mode = -1; 1091 } 1092 1093 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n", 1094 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode); 1095 1096 return mode; 1097 } 1098 1099 /** 1100 * init_tti - Initialization transmit traffic interrupt scheme 1101 * @nic: device private variable 1102 * @link: link status (UP/DOWN) used to enable/disable continuous 1103 * transmit interrupts 1104 * Description: The function configures transmit traffic interrupts 1105 * Return Value: SUCCESS on success and 1106 * '-1' on failure 1107 */ 1108 1109 static int init_tti(struct s2io_nic *nic, int link, bool may_sleep) 1110 { 1111 struct XENA_dev_config __iomem *bar0 = nic->bar0; 1112 register u64 val64 = 0; 1113 int i; 1114 struct config_param *config = &nic->config; 1115 1116 for (i = 0; i < config->tx_fifo_num; i++) { 1117 /* 1118 * TTI Initialization. Default Tx timer gets us about 1119 * 250 interrupts per sec. Continuous interrupts are enabled 1120 * by default. 1121 */ 1122 if (nic->device_type == XFRAME_II_DEVICE) { 1123 int count = (nic->config.bus_speed * 125)/2; 1124 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count); 1125 } else 1126 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078); 1127 1128 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) | 1129 TTI_DATA1_MEM_TX_URNG_B(0x10) | 1130 TTI_DATA1_MEM_TX_URNG_C(0x30) | 1131 TTI_DATA1_MEM_TX_TIMER_AC_EN; 1132 if (i == 0) 1133 if (use_continuous_tx_intrs && (link == LINK_UP)) 1134 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; 1135 writeq(val64, &bar0->tti_data1_mem); 1136 1137 if (nic->config.intr_type == MSI_X) { 1138 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1139 TTI_DATA2_MEM_TX_UFC_B(0x100) | 1140 TTI_DATA2_MEM_TX_UFC_C(0x200) | 1141 TTI_DATA2_MEM_TX_UFC_D(0x300); 1142 } else { 1143 if ((nic->config.tx_steering_type == 1144 TX_DEFAULT_STEERING) && 1145 (config->tx_fifo_num > 1) && 1146 (i >= nic->udp_fifo_idx) && 1147 (i < (nic->udp_fifo_idx + 1148 nic->total_udp_fifos))) 1149 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) | 1150 TTI_DATA2_MEM_TX_UFC_B(0x80) | 1151 TTI_DATA2_MEM_TX_UFC_C(0x100) | 1152 TTI_DATA2_MEM_TX_UFC_D(0x120); 1153 else 1154 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1155 TTI_DATA2_MEM_TX_UFC_B(0x20) | 1156 TTI_DATA2_MEM_TX_UFC_C(0x40) | 1157 TTI_DATA2_MEM_TX_UFC_D(0x80); 1158 } 1159 1160 writeq(val64, &bar0->tti_data2_mem); 1161 1162 val64 = TTI_CMD_MEM_WE | 1163 TTI_CMD_MEM_STROBE_NEW_CMD | 1164 TTI_CMD_MEM_OFFSET(i); 1165 writeq(val64, &bar0->tti_command_mem); 1166 1167 if (wait_for_cmd_complete(&bar0->tti_command_mem, 1168 TTI_CMD_MEM_STROBE_NEW_CMD, 1169 S2IO_BIT_RESET, may_sleep) != SUCCESS) 1170 return FAILURE; 1171 } 1172 1173 return SUCCESS; 1174 } 1175 1176 /** 1177 * init_nic - Initialization of hardware 1178 * @nic: device private variable 1179 * Description: The function sequentially configures every block 1180 * of the H/W from their reset values. 1181 * Return Value: SUCCESS on success and 1182 * '-1' on failure (endian settings incorrect). 1183 */ 1184 1185 static int init_nic(struct s2io_nic *nic) 1186 { 1187 struct XENA_dev_config __iomem *bar0 = nic->bar0; 1188 struct net_device *dev = nic->dev; 1189 register u64 val64 = 0; 1190 void __iomem *add; 1191 u32 time; 1192 int i, j; 1193 int dtx_cnt = 0; 1194 unsigned long long mem_share; 1195 int mem_size; 1196 struct config_param *config = &nic->config; 1197 struct mac_info *mac_control = &nic->mac_control; 1198 1199 /* to set the swapper controle on the card */ 1200 if (s2io_set_swapper(nic)) { 1201 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n"); 1202 return -EIO; 1203 } 1204 1205 /* 1206 * Herc requires EOI to be removed from reset before XGXS, so.. 1207 */ 1208 if (nic->device_type & XFRAME_II_DEVICE) { 1209 val64 = 0xA500000000ULL; 1210 writeq(val64, &bar0->sw_reset); 1211 msleep(500); 1212 val64 = readq(&bar0->sw_reset); 1213 } 1214 1215 /* Remove XGXS from reset state */ 1216 val64 = 0; 1217 writeq(val64, &bar0->sw_reset); 1218 msleep(500); 1219 val64 = readq(&bar0->sw_reset); 1220 1221 /* Ensure that it's safe to access registers by checking 1222 * RIC_RUNNING bit is reset. Check is valid only for XframeII. 1223 */ 1224 if (nic->device_type == XFRAME_II_DEVICE) { 1225 for (i = 0; i < 50; i++) { 1226 val64 = readq(&bar0->adapter_status); 1227 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING)) 1228 break; 1229 msleep(10); 1230 } 1231 if (i == 50) 1232 return -ENODEV; 1233 } 1234 1235 /* Enable Receiving broadcasts */ 1236 add = &bar0->mac_cfg; 1237 val64 = readq(&bar0->mac_cfg); 1238 val64 |= MAC_RMAC_BCAST_ENABLE; 1239 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 1240 writel((u32)val64, add); 1241 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 1242 writel((u32) (val64 >> 32), (add + 4)); 1243 1244 /* Read registers in all blocks */ 1245 val64 = readq(&bar0->mac_int_mask); 1246 val64 = readq(&bar0->mc_int_mask); 1247 val64 = readq(&bar0->xgxs_int_mask); 1248 1249 /* Set MTU */ 1250 val64 = dev->mtu; 1251 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); 1252 1253 if (nic->device_type & XFRAME_II_DEVICE) { 1254 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) { 1255 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt], 1256 &bar0->dtx_control, UF); 1257 if (dtx_cnt & 0x1) 1258 msleep(1); /* Necessary!! */ 1259 dtx_cnt++; 1260 } 1261 } else { 1262 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) { 1263 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt], 1264 &bar0->dtx_control, UF); 1265 val64 = readq(&bar0->dtx_control); 1266 dtx_cnt++; 1267 } 1268 } 1269 1270 /* Tx DMA Initialization */ 1271 val64 = 0; 1272 writeq(val64, &bar0->tx_fifo_partition_0); 1273 writeq(val64, &bar0->tx_fifo_partition_1); 1274 writeq(val64, &bar0->tx_fifo_partition_2); 1275 writeq(val64, &bar0->tx_fifo_partition_3); 1276 1277 for (i = 0, j = 0; i < config->tx_fifo_num; i++) { 1278 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; 1279 1280 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) | 1281 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3); 1282 1283 if (i == (config->tx_fifo_num - 1)) { 1284 if (i % 2 == 0) 1285 i++; 1286 } 1287 1288 switch (i) { 1289 case 1: 1290 writeq(val64, &bar0->tx_fifo_partition_0); 1291 val64 = 0; 1292 j = 0; 1293 break; 1294 case 3: 1295 writeq(val64, &bar0->tx_fifo_partition_1); 1296 val64 = 0; 1297 j = 0; 1298 break; 1299 case 5: 1300 writeq(val64, &bar0->tx_fifo_partition_2); 1301 val64 = 0; 1302 j = 0; 1303 break; 1304 case 7: 1305 writeq(val64, &bar0->tx_fifo_partition_3); 1306 val64 = 0; 1307 j = 0; 1308 break; 1309 default: 1310 j++; 1311 break; 1312 } 1313 } 1314 1315 /* 1316 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug 1317 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE. 1318 */ 1319 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4)) 1320 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable); 1321 1322 val64 = readq(&bar0->tx_fifo_partition_0); 1323 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n", 1324 &bar0->tx_fifo_partition_0, (unsigned long long)val64); 1325 1326 /* 1327 * Initialization of Tx_PA_CONFIG register to ignore packet 1328 * integrity checking. 1329 */ 1330 val64 = readq(&bar0->tx_pa_cfg); 1331 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | 1332 TX_PA_CFG_IGNORE_SNAP_OUI | 1333 TX_PA_CFG_IGNORE_LLC_CTRL | 1334 TX_PA_CFG_IGNORE_L2_ERR; 1335 writeq(val64, &bar0->tx_pa_cfg); 1336 1337 /* Rx DMA initialization. */ 1338 val64 = 0; 1339 for (i = 0; i < config->rx_ring_num; i++) { 1340 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; 1341 1342 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3); 1343 } 1344 writeq(val64, &bar0->rx_queue_priority); 1345 1346 /* 1347 * Allocating equal share of memory to all the 1348 * configured Rings. 1349 */ 1350 val64 = 0; 1351 if (nic->device_type & XFRAME_II_DEVICE) 1352 mem_size = 32; 1353 else 1354 mem_size = 64; 1355 1356 for (i = 0; i < config->rx_ring_num; i++) { 1357 switch (i) { 1358 case 0: 1359 mem_share = (mem_size / config->rx_ring_num + 1360 mem_size % config->rx_ring_num); 1361 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share); 1362 continue; 1363 case 1: 1364 mem_share = (mem_size / config->rx_ring_num); 1365 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share); 1366 continue; 1367 case 2: 1368 mem_share = (mem_size / config->rx_ring_num); 1369 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share); 1370 continue; 1371 case 3: 1372 mem_share = (mem_size / config->rx_ring_num); 1373 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share); 1374 continue; 1375 case 4: 1376 mem_share = (mem_size / config->rx_ring_num); 1377 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share); 1378 continue; 1379 case 5: 1380 mem_share = (mem_size / config->rx_ring_num); 1381 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share); 1382 continue; 1383 case 6: 1384 mem_share = (mem_size / config->rx_ring_num); 1385 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share); 1386 continue; 1387 case 7: 1388 mem_share = (mem_size / config->rx_ring_num); 1389 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share); 1390 continue; 1391 } 1392 } 1393 writeq(val64, &bar0->rx_queue_cfg); 1394 1395 /* 1396 * Filling Tx round robin registers 1397 * as per the number of FIFOs for equal scheduling priority 1398 */ 1399 switch (config->tx_fifo_num) { 1400 case 1: 1401 val64 = 0x0; 1402 writeq(val64, &bar0->tx_w_round_robin_0); 1403 writeq(val64, &bar0->tx_w_round_robin_1); 1404 writeq(val64, &bar0->tx_w_round_robin_2); 1405 writeq(val64, &bar0->tx_w_round_robin_3); 1406 writeq(val64, &bar0->tx_w_round_robin_4); 1407 break; 1408 case 2: 1409 val64 = 0x0001000100010001ULL; 1410 writeq(val64, &bar0->tx_w_round_robin_0); 1411 writeq(val64, &bar0->tx_w_round_robin_1); 1412 writeq(val64, &bar0->tx_w_round_robin_2); 1413 writeq(val64, &bar0->tx_w_round_robin_3); 1414 val64 = 0x0001000100000000ULL; 1415 writeq(val64, &bar0->tx_w_round_robin_4); 1416 break; 1417 case 3: 1418 val64 = 0x0001020001020001ULL; 1419 writeq(val64, &bar0->tx_w_round_robin_0); 1420 val64 = 0x0200010200010200ULL; 1421 writeq(val64, &bar0->tx_w_round_robin_1); 1422 val64 = 0x0102000102000102ULL; 1423 writeq(val64, &bar0->tx_w_round_robin_2); 1424 val64 = 0x0001020001020001ULL; 1425 writeq(val64, &bar0->tx_w_round_robin_3); 1426 val64 = 0x0200010200000000ULL; 1427 writeq(val64, &bar0->tx_w_round_robin_4); 1428 break; 1429 case 4: 1430 val64 = 0x0001020300010203ULL; 1431 writeq(val64, &bar0->tx_w_round_robin_0); 1432 writeq(val64, &bar0->tx_w_round_robin_1); 1433 writeq(val64, &bar0->tx_w_round_robin_2); 1434 writeq(val64, &bar0->tx_w_round_robin_3); 1435 val64 = 0x0001020300000000ULL; 1436 writeq(val64, &bar0->tx_w_round_robin_4); 1437 break; 1438 case 5: 1439 val64 = 0x0001020304000102ULL; 1440 writeq(val64, &bar0->tx_w_round_robin_0); 1441 val64 = 0x0304000102030400ULL; 1442 writeq(val64, &bar0->tx_w_round_robin_1); 1443 val64 = 0x0102030400010203ULL; 1444 writeq(val64, &bar0->tx_w_round_robin_2); 1445 val64 = 0x0400010203040001ULL; 1446 writeq(val64, &bar0->tx_w_round_robin_3); 1447 val64 = 0x0203040000000000ULL; 1448 writeq(val64, &bar0->tx_w_round_robin_4); 1449 break; 1450 case 6: 1451 val64 = 0x0001020304050001ULL; 1452 writeq(val64, &bar0->tx_w_round_robin_0); 1453 val64 = 0x0203040500010203ULL; 1454 writeq(val64, &bar0->tx_w_round_robin_1); 1455 val64 = 0x0405000102030405ULL; 1456 writeq(val64, &bar0->tx_w_round_robin_2); 1457 val64 = 0x0001020304050001ULL; 1458 writeq(val64, &bar0->tx_w_round_robin_3); 1459 val64 = 0x0203040500000000ULL; 1460 writeq(val64, &bar0->tx_w_round_robin_4); 1461 break; 1462 case 7: 1463 val64 = 0x0001020304050600ULL; 1464 writeq(val64, &bar0->tx_w_round_robin_0); 1465 val64 = 0x0102030405060001ULL; 1466 writeq(val64, &bar0->tx_w_round_robin_1); 1467 val64 = 0x0203040506000102ULL; 1468 writeq(val64, &bar0->tx_w_round_robin_2); 1469 val64 = 0x0304050600010203ULL; 1470 writeq(val64, &bar0->tx_w_round_robin_3); 1471 val64 = 0x0405060000000000ULL; 1472 writeq(val64, &bar0->tx_w_round_robin_4); 1473 break; 1474 case 8: 1475 val64 = 0x0001020304050607ULL; 1476 writeq(val64, &bar0->tx_w_round_robin_0); 1477 writeq(val64, &bar0->tx_w_round_robin_1); 1478 writeq(val64, &bar0->tx_w_round_robin_2); 1479 writeq(val64, &bar0->tx_w_round_robin_3); 1480 val64 = 0x0001020300000000ULL; 1481 writeq(val64, &bar0->tx_w_round_robin_4); 1482 break; 1483 } 1484 1485 /* Enable all configured Tx FIFO partitions */ 1486 val64 = readq(&bar0->tx_fifo_partition_0); 1487 val64 |= (TX_FIFO_PARTITION_EN); 1488 writeq(val64, &bar0->tx_fifo_partition_0); 1489 1490 /* Filling the Rx round robin registers as per the 1491 * number of Rings and steering based on QoS with 1492 * equal priority. 1493 */ 1494 switch (config->rx_ring_num) { 1495 case 1: 1496 val64 = 0x0; 1497 writeq(val64, &bar0->rx_w_round_robin_0); 1498 writeq(val64, &bar0->rx_w_round_robin_1); 1499 writeq(val64, &bar0->rx_w_round_robin_2); 1500 writeq(val64, &bar0->rx_w_round_robin_3); 1501 writeq(val64, &bar0->rx_w_round_robin_4); 1502 1503 val64 = 0x8080808080808080ULL; 1504 writeq(val64, &bar0->rts_qos_steering); 1505 break; 1506 case 2: 1507 val64 = 0x0001000100010001ULL; 1508 writeq(val64, &bar0->rx_w_round_robin_0); 1509 writeq(val64, &bar0->rx_w_round_robin_1); 1510 writeq(val64, &bar0->rx_w_round_robin_2); 1511 writeq(val64, &bar0->rx_w_round_robin_3); 1512 val64 = 0x0001000100000000ULL; 1513 writeq(val64, &bar0->rx_w_round_robin_4); 1514 1515 val64 = 0x8080808040404040ULL; 1516 writeq(val64, &bar0->rts_qos_steering); 1517 break; 1518 case 3: 1519 val64 = 0x0001020001020001ULL; 1520 writeq(val64, &bar0->rx_w_round_robin_0); 1521 val64 = 0x0200010200010200ULL; 1522 writeq(val64, &bar0->rx_w_round_robin_1); 1523 val64 = 0x0102000102000102ULL; 1524 writeq(val64, &bar0->rx_w_round_robin_2); 1525 val64 = 0x0001020001020001ULL; 1526 writeq(val64, &bar0->rx_w_round_robin_3); 1527 val64 = 0x0200010200000000ULL; 1528 writeq(val64, &bar0->rx_w_round_robin_4); 1529 1530 val64 = 0x8080804040402020ULL; 1531 writeq(val64, &bar0->rts_qos_steering); 1532 break; 1533 case 4: 1534 val64 = 0x0001020300010203ULL; 1535 writeq(val64, &bar0->rx_w_round_robin_0); 1536 writeq(val64, &bar0->rx_w_round_robin_1); 1537 writeq(val64, &bar0->rx_w_round_robin_2); 1538 writeq(val64, &bar0->rx_w_round_robin_3); 1539 val64 = 0x0001020300000000ULL; 1540 writeq(val64, &bar0->rx_w_round_robin_4); 1541 1542 val64 = 0x8080404020201010ULL; 1543 writeq(val64, &bar0->rts_qos_steering); 1544 break; 1545 case 5: 1546 val64 = 0x0001020304000102ULL; 1547 writeq(val64, &bar0->rx_w_round_robin_0); 1548 val64 = 0x0304000102030400ULL; 1549 writeq(val64, &bar0->rx_w_round_robin_1); 1550 val64 = 0x0102030400010203ULL; 1551 writeq(val64, &bar0->rx_w_round_robin_2); 1552 val64 = 0x0400010203040001ULL; 1553 writeq(val64, &bar0->rx_w_round_robin_3); 1554 val64 = 0x0203040000000000ULL; 1555 writeq(val64, &bar0->rx_w_round_robin_4); 1556 1557 val64 = 0x8080404020201008ULL; 1558 writeq(val64, &bar0->rts_qos_steering); 1559 break; 1560 case 6: 1561 val64 = 0x0001020304050001ULL; 1562 writeq(val64, &bar0->rx_w_round_robin_0); 1563 val64 = 0x0203040500010203ULL; 1564 writeq(val64, &bar0->rx_w_round_robin_1); 1565 val64 = 0x0405000102030405ULL; 1566 writeq(val64, &bar0->rx_w_round_robin_2); 1567 val64 = 0x0001020304050001ULL; 1568 writeq(val64, &bar0->rx_w_round_robin_3); 1569 val64 = 0x0203040500000000ULL; 1570 writeq(val64, &bar0->rx_w_round_robin_4); 1571 1572 val64 = 0x8080404020100804ULL; 1573 writeq(val64, &bar0->rts_qos_steering); 1574 break; 1575 case 7: 1576 val64 = 0x0001020304050600ULL; 1577 writeq(val64, &bar0->rx_w_round_robin_0); 1578 val64 = 0x0102030405060001ULL; 1579 writeq(val64, &bar0->rx_w_round_robin_1); 1580 val64 = 0x0203040506000102ULL; 1581 writeq(val64, &bar0->rx_w_round_robin_2); 1582 val64 = 0x0304050600010203ULL; 1583 writeq(val64, &bar0->rx_w_round_robin_3); 1584 val64 = 0x0405060000000000ULL; 1585 writeq(val64, &bar0->rx_w_round_robin_4); 1586 1587 val64 = 0x8080402010080402ULL; 1588 writeq(val64, &bar0->rts_qos_steering); 1589 break; 1590 case 8: 1591 val64 = 0x0001020304050607ULL; 1592 writeq(val64, &bar0->rx_w_round_robin_0); 1593 writeq(val64, &bar0->rx_w_round_robin_1); 1594 writeq(val64, &bar0->rx_w_round_robin_2); 1595 writeq(val64, &bar0->rx_w_round_robin_3); 1596 val64 = 0x0001020300000000ULL; 1597 writeq(val64, &bar0->rx_w_round_robin_4); 1598 1599 val64 = 0x8040201008040201ULL; 1600 writeq(val64, &bar0->rts_qos_steering); 1601 break; 1602 } 1603 1604 /* UDP Fix */ 1605 val64 = 0; 1606 for (i = 0; i < 8; i++) 1607 writeq(val64, &bar0->rts_frm_len_n[i]); 1608 1609 /* Set the default rts frame length for the rings configured */ 1610 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22); 1611 for (i = 0 ; i < config->rx_ring_num ; i++) 1612 writeq(val64, &bar0->rts_frm_len_n[i]); 1613 1614 /* Set the frame length for the configured rings 1615 * desired by the user 1616 */ 1617 for (i = 0; i < config->rx_ring_num; i++) { 1618 /* If rts_frm_len[i] == 0 then it is assumed that user not 1619 * specified frame length steering. 1620 * If the user provides the frame length then program 1621 * the rts_frm_len register for those values or else 1622 * leave it as it is. 1623 */ 1624 if (rts_frm_len[i] != 0) { 1625 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]), 1626 &bar0->rts_frm_len_n[i]); 1627 } 1628 } 1629 1630 /* Disable differentiated services steering logic */ 1631 for (i = 0; i < 64; i++) { 1632 if (rts_ds_steer(nic, i, 0) == FAILURE) { 1633 DBG_PRINT(ERR_DBG, 1634 "%s: rts_ds_steer failed on codepoint %d\n", 1635 dev->name, i); 1636 return -ENODEV; 1637 } 1638 } 1639 1640 /* Program statistics memory */ 1641 writeq(mac_control->stats_mem_phy, &bar0->stat_addr); 1642 1643 if (nic->device_type == XFRAME_II_DEVICE) { 1644 val64 = STAT_BC(0x320); 1645 writeq(val64, &bar0->stat_byte_cnt); 1646 } 1647 1648 /* 1649 * Initializing the sampling rate for the device to calculate the 1650 * bandwidth utilization. 1651 */ 1652 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) | 1653 MAC_RX_LINK_UTIL_VAL(rmac_util_period); 1654 writeq(val64, &bar0->mac_link_util); 1655 1656 /* 1657 * Initializing the Transmit and Receive Traffic Interrupt 1658 * Scheme. 1659 */ 1660 1661 /* Initialize TTI */ 1662 if (SUCCESS != init_tti(nic, nic->last_link_state, true)) 1663 return -ENODEV; 1664 1665 /* RTI Initialization */ 1666 if (nic->device_type == XFRAME_II_DEVICE) { 1667 /* 1668 * Programmed to generate Apprx 500 Intrs per 1669 * second 1670 */ 1671 int count = (nic->config.bus_speed * 125)/4; 1672 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count); 1673 } else 1674 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF); 1675 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) | 1676 RTI_DATA1_MEM_RX_URNG_B(0x10) | 1677 RTI_DATA1_MEM_RX_URNG_C(0x30) | 1678 RTI_DATA1_MEM_RX_TIMER_AC_EN; 1679 1680 writeq(val64, &bar0->rti_data1_mem); 1681 1682 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | 1683 RTI_DATA2_MEM_RX_UFC_B(0x2) ; 1684 if (nic->config.intr_type == MSI_X) 1685 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | 1686 RTI_DATA2_MEM_RX_UFC_D(0x40)); 1687 else 1688 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | 1689 RTI_DATA2_MEM_RX_UFC_D(0x80)); 1690 writeq(val64, &bar0->rti_data2_mem); 1691 1692 for (i = 0; i < config->rx_ring_num; i++) { 1693 val64 = RTI_CMD_MEM_WE | 1694 RTI_CMD_MEM_STROBE_NEW_CMD | 1695 RTI_CMD_MEM_OFFSET(i); 1696 writeq(val64, &bar0->rti_command_mem); 1697 1698 /* 1699 * Once the operation completes, the Strobe bit of the 1700 * command register will be reset. We poll for this 1701 * particular condition. We wait for a maximum of 500ms 1702 * for the operation to complete, if it's not complete 1703 * by then we return error. 1704 */ 1705 time = 0; 1706 while (true) { 1707 val64 = readq(&bar0->rti_command_mem); 1708 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) 1709 break; 1710 1711 if (time > 10) { 1712 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n", 1713 dev->name); 1714 return -ENODEV; 1715 } 1716 time++; 1717 msleep(50); 1718 } 1719 } 1720 1721 /* 1722 * Initializing proper values as Pause threshold into all 1723 * the 8 Queues on Rx side. 1724 */ 1725 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3); 1726 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7); 1727 1728 /* Disable RMAC PAD STRIPPING */ 1729 add = &bar0->mac_cfg; 1730 val64 = readq(&bar0->mac_cfg); 1731 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD); 1732 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 1733 writel((u32) (val64), add); 1734 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 1735 writel((u32) (val64 >> 32), (add + 4)); 1736 val64 = readq(&bar0->mac_cfg); 1737 1738 /* Enable FCS stripping by adapter */ 1739 add = &bar0->mac_cfg; 1740 val64 = readq(&bar0->mac_cfg); 1741 val64 |= MAC_CFG_RMAC_STRIP_FCS; 1742 if (nic->device_type == XFRAME_II_DEVICE) 1743 writeq(val64, &bar0->mac_cfg); 1744 else { 1745 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 1746 writel((u32) (val64), add); 1747 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 1748 writel((u32) (val64 >> 32), (add + 4)); 1749 } 1750 1751 /* 1752 * Set the time value to be inserted in the pause frame 1753 * generated by xena. 1754 */ 1755 val64 = readq(&bar0->rmac_pause_cfg); 1756 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff)); 1757 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time); 1758 writeq(val64, &bar0->rmac_pause_cfg); 1759 1760 /* 1761 * Set the Threshold Limit for Generating the pause frame 1762 * If the amount of data in any Queue exceeds ratio of 1763 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256 1764 * pause frame is generated 1765 */ 1766 val64 = 0; 1767 for (i = 0; i < 4; i++) { 1768 val64 |= (((u64)0xFF00 | 1769 nic->mac_control.mc_pause_threshold_q0q3) 1770 << (i * 2 * 8)); 1771 } 1772 writeq(val64, &bar0->mc_pause_thresh_q0q3); 1773 1774 val64 = 0; 1775 for (i = 0; i < 4; i++) { 1776 val64 |= (((u64)0xFF00 | 1777 nic->mac_control.mc_pause_threshold_q4q7) 1778 << (i * 2 * 8)); 1779 } 1780 writeq(val64, &bar0->mc_pause_thresh_q4q7); 1781 1782 /* 1783 * TxDMA will stop Read request if the number of read split has 1784 * exceeded the limit pointed by shared_splits 1785 */ 1786 val64 = readq(&bar0->pic_control); 1787 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits); 1788 writeq(val64, &bar0->pic_control); 1789 1790 if (nic->config.bus_speed == 266) { 1791 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout); 1792 writeq(0x0, &bar0->read_retry_delay); 1793 writeq(0x0, &bar0->write_retry_delay); 1794 } 1795 1796 /* 1797 * Programming the Herc to split every write transaction 1798 * that does not start on an ADB to reduce disconnects. 1799 */ 1800 if (nic->device_type == XFRAME_II_DEVICE) { 1801 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN | 1802 MISC_LINK_STABILITY_PRD(3); 1803 writeq(val64, &bar0->misc_control); 1804 val64 = readq(&bar0->pic_control2); 1805 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15)); 1806 writeq(val64, &bar0->pic_control2); 1807 } 1808 if (strstr(nic->product_name, "CX4")) { 1809 val64 = TMAC_AVG_IPG(0x17); 1810 writeq(val64, &bar0->tmac_avg_ipg); 1811 } 1812 1813 return SUCCESS; 1814 } 1815 #define LINK_UP_DOWN_INTERRUPT 1 1816 #define MAC_RMAC_ERR_TIMER 2 1817 1818 static int s2io_link_fault_indication(struct s2io_nic *nic) 1819 { 1820 if (nic->device_type == XFRAME_II_DEVICE) 1821 return LINK_UP_DOWN_INTERRUPT; 1822 else 1823 return MAC_RMAC_ERR_TIMER; 1824 } 1825 1826 /** 1827 * do_s2io_write_bits - update alarm bits in alarm register 1828 * @value: alarm bits 1829 * @flag: interrupt status 1830 * @addr: address value 1831 * Description: update alarm bits in alarm register 1832 * Return Value: 1833 * NONE. 1834 */ 1835 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr) 1836 { 1837 u64 temp64; 1838 1839 temp64 = readq(addr); 1840 1841 if (flag == ENABLE_INTRS) 1842 temp64 &= ~((u64)value); 1843 else 1844 temp64 |= ((u64)value); 1845 writeq(temp64, addr); 1846 } 1847 1848 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag) 1849 { 1850 struct XENA_dev_config __iomem *bar0 = nic->bar0; 1851 register u64 gen_int_mask = 0; 1852 u64 interruptible; 1853 1854 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask); 1855 if (mask & TX_DMA_INTR) { 1856 gen_int_mask |= TXDMA_INT_M; 1857 1858 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT | 1859 TXDMA_PCC_INT | TXDMA_TTI_INT | 1860 TXDMA_LSO_INT | TXDMA_TPA_INT | 1861 TXDMA_SM_INT, flag, &bar0->txdma_int_mask); 1862 1863 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM | 1864 PFC_MISC_0_ERR | PFC_MISC_1_ERR | 1865 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag, 1866 &bar0->pfc_err_mask); 1867 1868 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM | 1869 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR | 1870 TDA_PCIX_ERR, flag, &bar0->tda_err_mask); 1871 1872 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR | 1873 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM | 1874 PCC_N_SERR | PCC_6_COF_OV_ERR | 1875 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR | 1876 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR | 1877 PCC_TXB_ECC_SG_ERR, 1878 flag, &bar0->pcc_err_mask); 1879 1880 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR | 1881 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask); 1882 1883 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT | 1884 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM | 1885 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, 1886 flag, &bar0->lso_err_mask); 1887 1888 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP, 1889 flag, &bar0->tpa_err_mask); 1890 1891 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask); 1892 } 1893 1894 if (mask & TX_MAC_INTR) { 1895 gen_int_mask |= TXMAC_INT_M; 1896 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag, 1897 &bar0->mac_int_mask); 1898 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR | 1899 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR | 1900 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR, 1901 flag, &bar0->mac_tmac_err_mask); 1902 } 1903 1904 if (mask & TX_XGXS_INTR) { 1905 gen_int_mask |= TXXGXS_INT_M; 1906 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag, 1907 &bar0->xgxs_int_mask); 1908 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR | 1909 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, 1910 flag, &bar0->xgxs_txgxs_err_mask); 1911 } 1912 1913 if (mask & RX_DMA_INTR) { 1914 gen_int_mask |= RXDMA_INT_M; 1915 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M | 1916 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M, 1917 flag, &bar0->rxdma_int_mask); 1918 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR | 1919 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM | 1920 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR | 1921 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask); 1922 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn | 1923 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn | 1924 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag, 1925 &bar0->prc_pcix_err_mask); 1926 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR | 1927 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag, 1928 &bar0->rpa_err_mask); 1929 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR | 1930 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM | 1931 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR | 1932 RDA_FRM_ECC_SG_ERR | 1933 RDA_MISC_ERR|RDA_PCIX_ERR, 1934 flag, &bar0->rda_err_mask); 1935 do_s2io_write_bits(RTI_SM_ERR_ALARM | 1936 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, 1937 flag, &bar0->rti_err_mask); 1938 } 1939 1940 if (mask & RX_MAC_INTR) { 1941 gen_int_mask |= RXMAC_INT_M; 1942 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag, 1943 &bar0->mac_int_mask); 1944 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR | 1945 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR | 1946 RMAC_DOUBLE_ECC_ERR); 1947 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) 1948 interruptible |= RMAC_LINK_STATE_CHANGE_INT; 1949 do_s2io_write_bits(interruptible, 1950 flag, &bar0->mac_rmac_err_mask); 1951 } 1952 1953 if (mask & RX_XGXS_INTR) { 1954 gen_int_mask |= RXXGXS_INT_M; 1955 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag, 1956 &bar0->xgxs_int_mask); 1957 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag, 1958 &bar0->xgxs_rxgxs_err_mask); 1959 } 1960 1961 if (mask & MC_INTR) { 1962 gen_int_mask |= MC_INT_M; 1963 do_s2io_write_bits(MC_INT_MASK_MC_INT, 1964 flag, &bar0->mc_int_mask); 1965 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG | 1966 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag, 1967 &bar0->mc_err_mask); 1968 } 1969 nic->general_int_mask = gen_int_mask; 1970 1971 /* Remove this line when alarm interrupts are enabled */ 1972 nic->general_int_mask = 0; 1973 } 1974 1975 /** 1976 * en_dis_able_nic_intrs - Enable or Disable the interrupts 1977 * @nic: device private variable, 1978 * @mask: A mask indicating which Intr block must be modified and, 1979 * @flag: A flag indicating whether to enable or disable the Intrs. 1980 * Description: This function will either disable or enable the interrupts 1981 * depending on the flag argument. The mask argument can be used to 1982 * enable/disable any Intr block. 1983 * Return Value: NONE. 1984 */ 1985 1986 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) 1987 { 1988 struct XENA_dev_config __iomem *bar0 = nic->bar0; 1989 register u64 temp64 = 0, intr_mask = 0; 1990 1991 intr_mask = nic->general_int_mask; 1992 1993 /* Top level interrupt classification */ 1994 /* PIC Interrupts */ 1995 if (mask & TX_PIC_INTR) { 1996 /* Enable PIC Intrs in the general intr mask register */ 1997 intr_mask |= TXPIC_INT_M; 1998 if (flag == ENABLE_INTRS) { 1999 /* 2000 * If Hercules adapter enable GPIO otherwise 2001 * disable all PCIX, Flash, MDIO, IIC and GPIO 2002 * interrupts for now. 2003 * TODO 2004 */ 2005 if (s2io_link_fault_indication(nic) == 2006 LINK_UP_DOWN_INTERRUPT) { 2007 do_s2io_write_bits(PIC_INT_GPIO, flag, 2008 &bar0->pic_int_mask); 2009 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag, 2010 &bar0->gpio_int_mask); 2011 } else 2012 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); 2013 } else if (flag == DISABLE_INTRS) { 2014 /* 2015 * Disable PIC Intrs in the general 2016 * intr mask register 2017 */ 2018 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); 2019 } 2020 } 2021 2022 /* Tx traffic interrupts */ 2023 if (mask & TX_TRAFFIC_INTR) { 2024 intr_mask |= TXTRAFFIC_INT_M; 2025 if (flag == ENABLE_INTRS) { 2026 /* 2027 * Enable all the Tx side interrupts 2028 * writing 0 Enables all 64 TX interrupt levels 2029 */ 2030 writeq(0x0, &bar0->tx_traffic_mask); 2031 } else if (flag == DISABLE_INTRS) { 2032 /* 2033 * Disable Tx Traffic Intrs in the general intr mask 2034 * register. 2035 */ 2036 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask); 2037 } 2038 } 2039 2040 /* Rx traffic interrupts */ 2041 if (mask & RX_TRAFFIC_INTR) { 2042 intr_mask |= RXTRAFFIC_INT_M; 2043 if (flag == ENABLE_INTRS) { 2044 /* writing 0 Enables all 8 RX interrupt levels */ 2045 writeq(0x0, &bar0->rx_traffic_mask); 2046 } else if (flag == DISABLE_INTRS) { 2047 /* 2048 * Disable Rx Traffic Intrs in the general intr mask 2049 * register. 2050 */ 2051 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask); 2052 } 2053 } 2054 2055 temp64 = readq(&bar0->general_int_mask); 2056 if (flag == ENABLE_INTRS) 2057 temp64 &= ~((u64)intr_mask); 2058 else 2059 temp64 = DISABLE_ALL_INTRS; 2060 writeq(temp64, &bar0->general_int_mask); 2061 2062 nic->general_int_mask = readq(&bar0->general_int_mask); 2063 } 2064 2065 /** 2066 * verify_pcc_quiescent- Checks for PCC quiescent state 2067 * @sp : private member of the device structure, which is a pointer to the 2068 * s2io_nic structure. 2069 * @flag: boolean controlling function path 2070 * Return: 1 If PCC is quiescence 2071 * 0 If PCC is not quiescence 2072 */ 2073 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag) 2074 { 2075 int ret = 0, herc; 2076 struct XENA_dev_config __iomem *bar0 = sp->bar0; 2077 u64 val64 = readq(&bar0->adapter_status); 2078 2079 herc = (sp->device_type == XFRAME_II_DEVICE); 2080 2081 if (flag == false) { 2082 if ((!herc && (sp->pdev->revision >= 4)) || herc) { 2083 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE)) 2084 ret = 1; 2085 } else { 2086 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE)) 2087 ret = 1; 2088 } 2089 } else { 2090 if ((!herc && (sp->pdev->revision >= 4)) || herc) { 2091 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) == 2092 ADAPTER_STATUS_RMAC_PCC_IDLE)) 2093 ret = 1; 2094 } else { 2095 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) == 2096 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE)) 2097 ret = 1; 2098 } 2099 } 2100 2101 return ret; 2102 } 2103 /** 2104 * verify_xena_quiescence - Checks whether the H/W is ready 2105 * @sp : private member of the device structure, which is a pointer to the 2106 * s2io_nic structure. 2107 * Description: Returns whether the H/W is ready to go or not. Depending 2108 * on whether adapter enable bit was written or not the comparison 2109 * differs and the calling function passes the input argument flag to 2110 * indicate this. 2111 * Return: 1 If xena is quiescence 2112 * 0 If Xena is not quiescence 2113 */ 2114 2115 static int verify_xena_quiescence(struct s2io_nic *sp) 2116 { 2117 int mode; 2118 struct XENA_dev_config __iomem *bar0 = sp->bar0; 2119 u64 val64 = readq(&bar0->adapter_status); 2120 mode = s2io_verify_pci_mode(sp); 2121 2122 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) { 2123 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n"); 2124 return 0; 2125 } 2126 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) { 2127 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n"); 2128 return 0; 2129 } 2130 if (!(val64 & ADAPTER_STATUS_PFC_READY)) { 2131 DBG_PRINT(ERR_DBG, "PFC is not ready!\n"); 2132 return 0; 2133 } 2134 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) { 2135 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n"); 2136 return 0; 2137 } 2138 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) { 2139 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n"); 2140 return 0; 2141 } 2142 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) { 2143 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n"); 2144 return 0; 2145 } 2146 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) { 2147 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n"); 2148 return 0; 2149 } 2150 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) { 2151 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n"); 2152 return 0; 2153 } 2154 2155 /* 2156 * In PCI 33 mode, the P_PLL is not used, and therefore, 2157 * the the P_PLL_LOCK bit in the adapter_status register will 2158 * not be asserted. 2159 */ 2160 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) && 2161 sp->device_type == XFRAME_II_DEVICE && 2162 mode != PCI_MODE_PCI_33) { 2163 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n"); 2164 return 0; 2165 } 2166 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) == 2167 ADAPTER_STATUS_RC_PRC_QUIESCENT)) { 2168 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n"); 2169 return 0; 2170 } 2171 return 1; 2172 } 2173 2174 /** 2175 * fix_mac_address - Fix for Mac addr problem on Alpha platforms 2176 * @sp: Pointer to device specifc structure 2177 * Description : 2178 * New procedure to clear mac address reading problems on Alpha platforms 2179 * 2180 */ 2181 2182 static void fix_mac_address(struct s2io_nic *sp) 2183 { 2184 struct XENA_dev_config __iomem *bar0 = sp->bar0; 2185 int i = 0; 2186 2187 while (fix_mac[i] != END_SIGN) { 2188 writeq(fix_mac[i++], &bar0->gpio_control); 2189 udelay(10); 2190 (void) readq(&bar0->gpio_control); 2191 } 2192 } 2193 2194 /** 2195 * start_nic - Turns the device on 2196 * @nic : device private variable. 2197 * Description: 2198 * This function actually turns the device on. Before this function is 2199 * called,all Registers are configured from their reset states 2200 * and shared memory is allocated but the NIC is still quiescent. On 2201 * calling this function, the device interrupts are cleared and the NIC is 2202 * literally switched on by writing into the adapter control register. 2203 * Return Value: 2204 * SUCCESS on success and -1 on failure. 2205 */ 2206 2207 static int start_nic(struct s2io_nic *nic) 2208 { 2209 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2210 struct net_device *dev = nic->dev; 2211 register u64 val64 = 0; 2212 u16 subid, i; 2213 struct config_param *config = &nic->config; 2214 struct mac_info *mac_control = &nic->mac_control; 2215 2216 /* PRC Initialization and configuration */ 2217 for (i = 0; i < config->rx_ring_num; i++) { 2218 struct ring_info *ring = &mac_control->rings[i]; 2219 2220 writeq((u64)ring->rx_blocks[0].block_dma_addr, 2221 &bar0->prc_rxd0_n[i]); 2222 2223 val64 = readq(&bar0->prc_ctrl_n[i]); 2224 if (nic->rxd_mode == RXD_MODE_1) 2225 val64 |= PRC_CTRL_RC_ENABLED; 2226 else 2227 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3; 2228 if (nic->device_type == XFRAME_II_DEVICE) 2229 val64 |= PRC_CTRL_GROUP_READS; 2230 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF); 2231 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000); 2232 writeq(val64, &bar0->prc_ctrl_n[i]); 2233 } 2234 2235 if (nic->rxd_mode == RXD_MODE_3B) { 2236 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */ 2237 val64 = readq(&bar0->rx_pa_cfg); 2238 val64 |= RX_PA_CFG_IGNORE_L2_ERR; 2239 writeq(val64, &bar0->rx_pa_cfg); 2240 } 2241 2242 if (vlan_tag_strip == 0) { 2243 val64 = readq(&bar0->rx_pa_cfg); 2244 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; 2245 writeq(val64, &bar0->rx_pa_cfg); 2246 nic->vlan_strip_flag = 0; 2247 } 2248 2249 /* 2250 * Enabling MC-RLDRAM. After enabling the device, we timeout 2251 * for around 100ms, which is approximately the time required 2252 * for the device to be ready for operation. 2253 */ 2254 val64 = readq(&bar0->mc_rldram_mrs); 2255 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE; 2256 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); 2257 val64 = readq(&bar0->mc_rldram_mrs); 2258 2259 msleep(100); /* Delay by around 100 ms. */ 2260 2261 /* Enabling ECC Protection. */ 2262 val64 = readq(&bar0->adapter_control); 2263 val64 &= ~ADAPTER_ECC_EN; 2264 writeq(val64, &bar0->adapter_control); 2265 2266 /* 2267 * Verify if the device is ready to be enabled, if so enable 2268 * it. 2269 */ 2270 val64 = readq(&bar0->adapter_status); 2271 if (!verify_xena_quiescence(nic)) { 2272 DBG_PRINT(ERR_DBG, "%s: device is not ready, " 2273 "Adapter status reads: 0x%llx\n", 2274 dev->name, (unsigned long long)val64); 2275 return FAILURE; 2276 } 2277 2278 /* 2279 * With some switches, link might be already up at this point. 2280 * Because of this weird behavior, when we enable laser, 2281 * we may not get link. We need to handle this. We cannot 2282 * figure out which switch is misbehaving. So we are forced to 2283 * make a global change. 2284 */ 2285 2286 /* Enabling Laser. */ 2287 val64 = readq(&bar0->adapter_control); 2288 val64 |= ADAPTER_EOI_TX_ON; 2289 writeq(val64, &bar0->adapter_control); 2290 2291 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) { 2292 /* 2293 * Dont see link state interrupts initially on some switches, 2294 * so directly scheduling the link state task here. 2295 */ 2296 schedule_work(&nic->set_link_task); 2297 } 2298 /* SXE-002: Initialize link and activity LED */ 2299 subid = nic->pdev->subsystem_device; 2300 if (((subid & 0xFF) >= 0x07) && 2301 (nic->device_type == XFRAME_I_DEVICE)) { 2302 val64 = readq(&bar0->gpio_control); 2303 val64 |= 0x0000800000000000ULL; 2304 writeq(val64, &bar0->gpio_control); 2305 val64 = 0x0411040400000000ULL; 2306 writeq(val64, (void __iomem *)bar0 + 0x2700); 2307 } 2308 2309 return SUCCESS; 2310 } 2311 /** 2312 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb 2313 * @fifo_data: fifo data pointer 2314 * @txdlp: descriptor 2315 * @get_off: unused 2316 */ 2317 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, 2318 struct TxD *txdlp, int get_off) 2319 { 2320 struct s2io_nic *nic = fifo_data->nic; 2321 struct sk_buff *skb; 2322 struct TxD *txds; 2323 u16 j, frg_cnt; 2324 2325 txds = txdlp; 2326 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) { 2327 dma_unmap_single(&nic->pdev->dev, 2328 (dma_addr_t)txds->Buffer_Pointer, 2329 sizeof(u64), DMA_TO_DEVICE); 2330 txds++; 2331 } 2332 2333 skb = (struct sk_buff *)((unsigned long)txds->Host_Control); 2334 if (!skb) { 2335 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); 2336 return NULL; 2337 } 2338 dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer, 2339 skb_headlen(skb), DMA_TO_DEVICE); 2340 frg_cnt = skb_shinfo(skb)->nr_frags; 2341 if (frg_cnt) { 2342 txds++; 2343 for (j = 0; j < frg_cnt; j++, txds++) { 2344 const skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 2345 if (!txds->Buffer_Pointer) 2346 break; 2347 dma_unmap_page(&nic->pdev->dev, 2348 (dma_addr_t)txds->Buffer_Pointer, 2349 skb_frag_size(frag), DMA_TO_DEVICE); 2350 } 2351 } 2352 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); 2353 return skb; 2354 } 2355 2356 /** 2357 * free_tx_buffers - Free all queued Tx buffers 2358 * @nic : device private variable. 2359 * Description: 2360 * Free all queued Tx buffers. 2361 * Return Value: void 2362 */ 2363 2364 static void free_tx_buffers(struct s2io_nic *nic) 2365 { 2366 struct net_device *dev = nic->dev; 2367 struct sk_buff *skb; 2368 struct TxD *txdp; 2369 int i, j; 2370 int cnt = 0; 2371 struct config_param *config = &nic->config; 2372 struct mac_info *mac_control = &nic->mac_control; 2373 struct stat_block *stats = mac_control->stats_info; 2374 struct swStat *swstats = &stats->sw_stat; 2375 2376 for (i = 0; i < config->tx_fifo_num; i++) { 2377 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; 2378 struct fifo_info *fifo = &mac_control->fifos[i]; 2379 unsigned long flags; 2380 2381 spin_lock_irqsave(&fifo->tx_lock, flags); 2382 for (j = 0; j < tx_cfg->fifo_len; j++) { 2383 txdp = fifo->list_info[j].list_virt_addr; 2384 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); 2385 if (skb) { 2386 swstats->mem_freed += skb->truesize; 2387 dev_kfree_skb(skb); 2388 cnt++; 2389 } 2390 } 2391 DBG_PRINT(INTR_DBG, 2392 "%s: forcibly freeing %d skbs on FIFO%d\n", 2393 dev->name, cnt, i); 2394 fifo->tx_curr_get_info.offset = 0; 2395 fifo->tx_curr_put_info.offset = 0; 2396 spin_unlock_irqrestore(&fifo->tx_lock, flags); 2397 } 2398 } 2399 2400 /** 2401 * stop_nic - To stop the nic 2402 * @nic : device private variable. 2403 * Description: 2404 * This function does exactly the opposite of what the start_nic() 2405 * function does. This function is called to stop the device. 2406 * Return Value: 2407 * void. 2408 */ 2409 2410 static void stop_nic(struct s2io_nic *nic) 2411 { 2412 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2413 register u64 val64 = 0; 2414 u16 interruptible; 2415 2416 /* Disable all interrupts */ 2417 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS); 2418 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; 2419 interruptible |= TX_PIC_INTR; 2420 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS); 2421 2422 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */ 2423 val64 = readq(&bar0->adapter_control); 2424 val64 &= ~(ADAPTER_CNTL_EN); 2425 writeq(val64, &bar0->adapter_control); 2426 } 2427 2428 /** 2429 * fill_rx_buffers - Allocates the Rx side skbs 2430 * @nic : device private variable. 2431 * @ring: per ring structure 2432 * @from_card_up: If this is true, we will map the buffer to get 2433 * the dma address for buf0 and buf1 to give it to the card. 2434 * Else we will sync the already mapped buffer to give it to the card. 2435 * Description: 2436 * The function allocates Rx side skbs and puts the physical 2437 * address of these buffers into the RxD buffer pointers, so that the NIC 2438 * can DMA the received frame into these locations. 2439 * The NIC supports 3 receive modes, viz 2440 * 1. single buffer, 2441 * 2. three buffer and 2442 * 3. Five buffer modes. 2443 * Each mode defines how many fragments the received frame will be split 2444 * up into by the NIC. The frame is split into L3 header, L4 Header, 2445 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself 2446 * is split into 3 fragments. As of now only single buffer mode is 2447 * supported. 2448 * Return Value: 2449 * SUCCESS on success or an appropriate -ve value on failure. 2450 */ 2451 static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, 2452 int from_card_up) 2453 { 2454 struct sk_buff *skb; 2455 struct RxD_t *rxdp; 2456 int off, size, block_no, block_no1; 2457 u32 alloc_tab = 0; 2458 u32 alloc_cnt; 2459 u64 tmp; 2460 struct buffAdd *ba; 2461 struct RxD_t *first_rxdp = NULL; 2462 u64 Buffer0_ptr = 0, Buffer1_ptr = 0; 2463 struct RxD1 *rxdp1; 2464 struct RxD3 *rxdp3; 2465 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat; 2466 2467 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left; 2468 2469 block_no1 = ring->rx_curr_get_info.block_index; 2470 while (alloc_tab < alloc_cnt) { 2471 block_no = ring->rx_curr_put_info.block_index; 2472 2473 off = ring->rx_curr_put_info.offset; 2474 2475 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr; 2476 2477 if ((block_no == block_no1) && 2478 (off == ring->rx_curr_get_info.offset) && 2479 (rxdp->Host_Control)) { 2480 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n", 2481 ring->dev->name); 2482 goto end; 2483 } 2484 if (off && (off == ring->rxd_count)) { 2485 ring->rx_curr_put_info.block_index++; 2486 if (ring->rx_curr_put_info.block_index == 2487 ring->block_count) 2488 ring->rx_curr_put_info.block_index = 0; 2489 block_no = ring->rx_curr_put_info.block_index; 2490 off = 0; 2491 ring->rx_curr_put_info.offset = off; 2492 rxdp = ring->rx_blocks[block_no].block_virt_addr; 2493 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2494 ring->dev->name, rxdp); 2495 2496 } 2497 2498 if ((rxdp->Control_1 & RXD_OWN_XENA) && 2499 ((ring->rxd_mode == RXD_MODE_3B) && 2500 (rxdp->Control_2 & s2BIT(0)))) { 2501 ring->rx_curr_put_info.offset = off; 2502 goto end; 2503 } 2504 /* calculate size of skb based on ring mode */ 2505 size = ring->mtu + 2506 HEADER_ETHERNET_II_802_3_SIZE + 2507 HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 2508 if (ring->rxd_mode == RXD_MODE_1) 2509 size += NET_IP_ALIGN; 2510 else 2511 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4; 2512 2513 /* allocate skb */ 2514 skb = netdev_alloc_skb(nic->dev, size); 2515 if (!skb) { 2516 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n", 2517 ring->dev->name); 2518 if (first_rxdp) { 2519 dma_wmb(); 2520 first_rxdp->Control_1 |= RXD_OWN_XENA; 2521 } 2522 swstats->mem_alloc_fail_cnt++; 2523 2524 return -ENOMEM ; 2525 } 2526 swstats->mem_allocated += skb->truesize; 2527 2528 if (ring->rxd_mode == RXD_MODE_1) { 2529 /* 1 buffer mode - normal operation mode */ 2530 rxdp1 = (struct RxD1 *)rxdp; 2531 memset(rxdp, 0, sizeof(struct RxD1)); 2532 skb_reserve(skb, NET_IP_ALIGN); 2533 rxdp1->Buffer0_ptr = 2534 dma_map_single(&ring->pdev->dev, skb->data, 2535 size - NET_IP_ALIGN, 2536 DMA_FROM_DEVICE); 2537 if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr)) 2538 goto pci_map_failed; 2539 2540 rxdp->Control_2 = 2541 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); 2542 rxdp->Host_Control = (unsigned long)skb; 2543 } else if (ring->rxd_mode == RXD_MODE_3B) { 2544 /* 2545 * 2 buffer mode - 2546 * 2 buffer mode provides 128 2547 * byte aligned receive buffers. 2548 */ 2549 2550 rxdp3 = (struct RxD3 *)rxdp; 2551 /* save buffer pointers to avoid frequent dma mapping */ 2552 Buffer0_ptr = rxdp3->Buffer0_ptr; 2553 Buffer1_ptr = rxdp3->Buffer1_ptr; 2554 memset(rxdp, 0, sizeof(struct RxD3)); 2555 /* restore the buffer pointers for dma sync*/ 2556 rxdp3->Buffer0_ptr = Buffer0_ptr; 2557 rxdp3->Buffer1_ptr = Buffer1_ptr; 2558 2559 ba = &ring->ba[block_no][off]; 2560 skb_reserve(skb, BUF0_LEN); 2561 tmp = (u64)(unsigned long)skb->data; 2562 tmp += ALIGN_SIZE; 2563 tmp &= ~ALIGN_SIZE; 2564 skb->data = (void *) (unsigned long)tmp; 2565 skb_reset_tail_pointer(skb); 2566 2567 if (from_card_up) { 2568 rxdp3->Buffer0_ptr = 2569 dma_map_single(&ring->pdev->dev, 2570 ba->ba_0, BUF0_LEN, 2571 DMA_FROM_DEVICE); 2572 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr)) 2573 goto pci_map_failed; 2574 } else 2575 dma_sync_single_for_device(&ring->pdev->dev, 2576 (dma_addr_t)rxdp3->Buffer0_ptr, 2577 BUF0_LEN, 2578 DMA_FROM_DEVICE); 2579 2580 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2581 if (ring->rxd_mode == RXD_MODE_3B) { 2582 /* Two buffer mode */ 2583 2584 /* 2585 * Buffer2 will have L3/L4 header plus 2586 * L4 payload 2587 */ 2588 rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev, 2589 skb->data, 2590 ring->mtu + 4, 2591 DMA_FROM_DEVICE); 2592 2593 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr)) 2594 goto pci_map_failed; 2595 2596 if (from_card_up) { 2597 rxdp3->Buffer1_ptr = 2598 dma_map_single(&ring->pdev->dev, 2599 ba->ba_1, 2600 BUF1_LEN, 2601 DMA_FROM_DEVICE); 2602 2603 if (dma_mapping_error(&nic->pdev->dev, 2604 rxdp3->Buffer1_ptr)) { 2605 dma_unmap_single(&ring->pdev->dev, 2606 (dma_addr_t)(unsigned long) 2607 skb->data, 2608 ring->mtu + 4, 2609 DMA_FROM_DEVICE); 2610 goto pci_map_failed; 2611 } 2612 } 2613 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2614 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2615 (ring->mtu + 4); 2616 } 2617 rxdp->Control_2 |= s2BIT(0); 2618 rxdp->Host_Control = (unsigned long) (skb); 2619 } 2620 if (alloc_tab & ((1 << rxsync_frequency) - 1)) 2621 rxdp->Control_1 |= RXD_OWN_XENA; 2622 off++; 2623 if (off == (ring->rxd_count + 1)) 2624 off = 0; 2625 ring->rx_curr_put_info.offset = off; 2626 2627 rxdp->Control_2 |= SET_RXD_MARKER; 2628 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { 2629 if (first_rxdp) { 2630 dma_wmb(); 2631 first_rxdp->Control_1 |= RXD_OWN_XENA; 2632 } 2633 first_rxdp = rxdp; 2634 } 2635 ring->rx_bufs_left += 1; 2636 alloc_tab++; 2637 } 2638 2639 end: 2640 /* Transfer ownership of first descriptor to adapter just before 2641 * exiting. Before that, use memory barrier so that ownership 2642 * and other fields are seen by adapter correctly. 2643 */ 2644 if (first_rxdp) { 2645 dma_wmb(); 2646 first_rxdp->Control_1 |= RXD_OWN_XENA; 2647 } 2648 2649 return SUCCESS; 2650 2651 pci_map_failed: 2652 swstats->pci_map_fail_cnt++; 2653 swstats->mem_freed += skb->truesize; 2654 dev_kfree_skb_irq(skb); 2655 return -ENOMEM; 2656 } 2657 2658 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) 2659 { 2660 struct net_device *dev = sp->dev; 2661 int j; 2662 struct sk_buff *skb; 2663 struct RxD_t *rxdp; 2664 struct RxD1 *rxdp1; 2665 struct RxD3 *rxdp3; 2666 struct mac_info *mac_control = &sp->mac_control; 2667 struct stat_block *stats = mac_control->stats_info; 2668 struct swStat *swstats = &stats->sw_stat; 2669 2670 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { 2671 rxdp = mac_control->rings[ring_no]. 2672 rx_blocks[blk].rxds[j].virt_addr; 2673 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control); 2674 if (!skb) 2675 continue; 2676 if (sp->rxd_mode == RXD_MODE_1) { 2677 rxdp1 = (struct RxD1 *)rxdp; 2678 dma_unmap_single(&sp->pdev->dev, 2679 (dma_addr_t)rxdp1->Buffer0_ptr, 2680 dev->mtu + 2681 HEADER_ETHERNET_II_802_3_SIZE + 2682 HEADER_802_2_SIZE + HEADER_SNAP_SIZE, 2683 DMA_FROM_DEVICE); 2684 memset(rxdp, 0, sizeof(struct RxD1)); 2685 } else if (sp->rxd_mode == RXD_MODE_3B) { 2686 rxdp3 = (struct RxD3 *)rxdp; 2687 dma_unmap_single(&sp->pdev->dev, 2688 (dma_addr_t)rxdp3->Buffer0_ptr, 2689 BUF0_LEN, DMA_FROM_DEVICE); 2690 dma_unmap_single(&sp->pdev->dev, 2691 (dma_addr_t)rxdp3->Buffer1_ptr, 2692 BUF1_LEN, DMA_FROM_DEVICE); 2693 dma_unmap_single(&sp->pdev->dev, 2694 (dma_addr_t)rxdp3->Buffer2_ptr, 2695 dev->mtu + 4, DMA_FROM_DEVICE); 2696 memset(rxdp, 0, sizeof(struct RxD3)); 2697 } 2698 swstats->mem_freed += skb->truesize; 2699 dev_kfree_skb(skb); 2700 mac_control->rings[ring_no].rx_bufs_left -= 1; 2701 } 2702 } 2703 2704 /** 2705 * free_rx_buffers - Frees all Rx buffers 2706 * @sp: device private variable. 2707 * Description: 2708 * This function will free all Rx buffers allocated by host. 2709 * Return Value: 2710 * NONE. 2711 */ 2712 2713 static void free_rx_buffers(struct s2io_nic *sp) 2714 { 2715 struct net_device *dev = sp->dev; 2716 int i, blk = 0, buf_cnt = 0; 2717 struct config_param *config = &sp->config; 2718 struct mac_info *mac_control = &sp->mac_control; 2719 2720 for (i = 0; i < config->rx_ring_num; i++) { 2721 struct ring_info *ring = &mac_control->rings[i]; 2722 2723 for (blk = 0; blk < rx_ring_sz[i]; blk++) 2724 free_rxd_blk(sp, i, blk); 2725 2726 ring->rx_curr_put_info.block_index = 0; 2727 ring->rx_curr_get_info.block_index = 0; 2728 ring->rx_curr_put_info.offset = 0; 2729 ring->rx_curr_get_info.offset = 0; 2730 ring->rx_bufs_left = 0; 2731 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n", 2732 dev->name, buf_cnt, i); 2733 } 2734 } 2735 2736 static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring) 2737 { 2738 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { 2739 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n", 2740 ring->dev->name); 2741 } 2742 return 0; 2743 } 2744 2745 /** 2746 * s2io_poll - Rx interrupt handler for NAPI support 2747 * @napi : pointer to the napi structure. 2748 * @budget : The number of packets that were budgeted to be processed 2749 * during one pass through the 'Poll" function. 2750 * Description: 2751 * Comes into picture only if NAPI support has been incorporated. It does 2752 * the same thing that rx_intr_handler does, but not in a interrupt context 2753 * also It will process only a given number of packets. 2754 * Return value: 2755 * 0 on success and 1 if there are No Rx packets to be processed. 2756 */ 2757 2758 static int s2io_poll_msix(struct napi_struct *napi, int budget) 2759 { 2760 struct ring_info *ring = container_of(napi, struct ring_info, napi); 2761 struct net_device *dev = ring->dev; 2762 int pkts_processed = 0; 2763 u8 __iomem *addr = NULL; 2764 u8 val8 = 0; 2765 struct s2io_nic *nic = netdev_priv(dev); 2766 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2767 int budget_org = budget; 2768 2769 if (unlikely(!is_s2io_card_up(nic))) 2770 return 0; 2771 2772 pkts_processed = rx_intr_handler(ring, budget); 2773 s2io_chk_rx_buffers(nic, ring); 2774 2775 if (pkts_processed < budget_org) { 2776 napi_complete_done(napi, pkts_processed); 2777 /*Re Enable MSI-Rx Vector*/ 2778 addr = (u8 __iomem *)&bar0->xmsi_mask_reg; 2779 addr += 7 - ring->ring_no; 2780 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf; 2781 writeb(val8, addr); 2782 val8 = readb(addr); 2783 } 2784 return pkts_processed; 2785 } 2786 2787 static int s2io_poll_inta(struct napi_struct *napi, int budget) 2788 { 2789 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); 2790 int pkts_processed = 0; 2791 int ring_pkts_processed, i; 2792 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2793 int budget_org = budget; 2794 struct config_param *config = &nic->config; 2795 struct mac_info *mac_control = &nic->mac_control; 2796 2797 if (unlikely(!is_s2io_card_up(nic))) 2798 return 0; 2799 2800 for (i = 0; i < config->rx_ring_num; i++) { 2801 struct ring_info *ring = &mac_control->rings[i]; 2802 ring_pkts_processed = rx_intr_handler(ring, budget); 2803 s2io_chk_rx_buffers(nic, ring); 2804 pkts_processed += ring_pkts_processed; 2805 budget -= ring_pkts_processed; 2806 if (budget <= 0) 2807 break; 2808 } 2809 if (pkts_processed < budget_org) { 2810 napi_complete_done(napi, pkts_processed); 2811 /* Re enable the Rx interrupts for the ring */ 2812 writeq(0, &bar0->rx_traffic_mask); 2813 readl(&bar0->rx_traffic_mask); 2814 } 2815 return pkts_processed; 2816 } 2817 2818 #ifdef CONFIG_NET_POLL_CONTROLLER 2819 /** 2820 * s2io_netpoll - netpoll event handler entry point 2821 * @dev : pointer to the device structure. 2822 * Description: 2823 * This function will be called by upper layer to check for events on the 2824 * interface in situations where interrupts are disabled. It is used for 2825 * specific in-kernel networking tasks, such as remote consoles and kernel 2826 * debugging over the network (example netdump in RedHat). 2827 */ 2828 static void s2io_netpoll(struct net_device *dev) 2829 { 2830 struct s2io_nic *nic = netdev_priv(dev); 2831 const int irq = nic->pdev->irq; 2832 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2833 u64 val64 = 0xFFFFFFFFFFFFFFFFULL; 2834 int i; 2835 struct config_param *config = &nic->config; 2836 struct mac_info *mac_control = &nic->mac_control; 2837 2838 if (pci_channel_offline(nic->pdev)) 2839 return; 2840 2841 disable_irq(irq); 2842 2843 writeq(val64, &bar0->rx_traffic_int); 2844 writeq(val64, &bar0->tx_traffic_int); 2845 2846 /* we need to free up the transmitted skbufs or else netpoll will 2847 * run out of skbs and will fail and eventually netpoll application such 2848 * as netdump will fail. 2849 */ 2850 for (i = 0; i < config->tx_fifo_num; i++) 2851 tx_intr_handler(&mac_control->fifos[i]); 2852 2853 /* check for received packet and indicate up to network */ 2854 for (i = 0; i < config->rx_ring_num; i++) { 2855 struct ring_info *ring = &mac_control->rings[i]; 2856 2857 rx_intr_handler(ring, 0); 2858 } 2859 2860 for (i = 0; i < config->rx_ring_num; i++) { 2861 struct ring_info *ring = &mac_control->rings[i]; 2862 2863 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { 2864 DBG_PRINT(INFO_DBG, 2865 "%s: Out of memory in Rx Netpoll!!\n", 2866 dev->name); 2867 break; 2868 } 2869 } 2870 enable_irq(irq); 2871 } 2872 #endif 2873 2874 /** 2875 * rx_intr_handler - Rx interrupt handler 2876 * @ring_data: per ring structure. 2877 * @budget: budget for napi processing. 2878 * Description: 2879 * If the interrupt is because of a received frame or if the 2880 * receive ring contains fresh as yet un-processed frames,this function is 2881 * called. It picks out the RxD at which place the last Rx processing had 2882 * stopped and sends the skb to the OSM's Rx handler and then increments 2883 * the offset. 2884 * Return Value: 2885 * No. of napi packets processed. 2886 */ 2887 static int rx_intr_handler(struct ring_info *ring_data, int budget) 2888 { 2889 int get_block, put_block; 2890 struct rx_curr_get_info get_info, put_info; 2891 struct RxD_t *rxdp; 2892 struct sk_buff *skb; 2893 int pkt_cnt = 0, napi_pkts = 0; 2894 int i; 2895 struct RxD1 *rxdp1; 2896 struct RxD3 *rxdp3; 2897 2898 if (budget <= 0) 2899 return napi_pkts; 2900 2901 get_info = ring_data->rx_curr_get_info; 2902 get_block = get_info.block_index; 2903 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info)); 2904 put_block = put_info.block_index; 2905 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr; 2906 2907 while (RXD_IS_UP2DT(rxdp)) { 2908 /* 2909 * If your are next to put index then it's 2910 * FIFO full condition 2911 */ 2912 if ((get_block == put_block) && 2913 (get_info.offset + 1) == put_info.offset) { 2914 DBG_PRINT(INTR_DBG, "%s: Ring Full\n", 2915 ring_data->dev->name); 2916 break; 2917 } 2918 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control); 2919 if (skb == NULL) { 2920 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n", 2921 ring_data->dev->name); 2922 return 0; 2923 } 2924 if (ring_data->rxd_mode == RXD_MODE_1) { 2925 rxdp1 = (struct RxD1 *)rxdp; 2926 dma_unmap_single(&ring_data->pdev->dev, 2927 (dma_addr_t)rxdp1->Buffer0_ptr, 2928 ring_data->mtu + 2929 HEADER_ETHERNET_II_802_3_SIZE + 2930 HEADER_802_2_SIZE + 2931 HEADER_SNAP_SIZE, 2932 DMA_FROM_DEVICE); 2933 } else if (ring_data->rxd_mode == RXD_MODE_3B) { 2934 rxdp3 = (struct RxD3 *)rxdp; 2935 dma_sync_single_for_cpu(&ring_data->pdev->dev, 2936 (dma_addr_t)rxdp3->Buffer0_ptr, 2937 BUF0_LEN, DMA_FROM_DEVICE); 2938 dma_unmap_single(&ring_data->pdev->dev, 2939 (dma_addr_t)rxdp3->Buffer2_ptr, 2940 ring_data->mtu + 4, DMA_FROM_DEVICE); 2941 } 2942 prefetch(skb->data); 2943 rx_osm_handler(ring_data, rxdp); 2944 get_info.offset++; 2945 ring_data->rx_curr_get_info.offset = get_info.offset; 2946 rxdp = ring_data->rx_blocks[get_block]. 2947 rxds[get_info.offset].virt_addr; 2948 if (get_info.offset == rxd_count[ring_data->rxd_mode]) { 2949 get_info.offset = 0; 2950 ring_data->rx_curr_get_info.offset = get_info.offset; 2951 get_block++; 2952 if (get_block == ring_data->block_count) 2953 get_block = 0; 2954 ring_data->rx_curr_get_info.block_index = get_block; 2955 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 2956 } 2957 2958 if (ring_data->nic->config.napi) { 2959 budget--; 2960 napi_pkts++; 2961 if (!budget) 2962 break; 2963 } 2964 pkt_cnt++; 2965 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) 2966 break; 2967 } 2968 if (ring_data->lro) { 2969 /* Clear all LRO sessions before exiting */ 2970 for (i = 0; i < MAX_LRO_SESSIONS; i++) { 2971 struct lro *lro = &ring_data->lro0_n[i]; 2972 if (lro->in_use) { 2973 update_L3L4_header(ring_data->nic, lro); 2974 queue_rx_frame(lro->parent, lro->vlan_tag); 2975 clear_lro_session(lro); 2976 } 2977 } 2978 } 2979 return napi_pkts; 2980 } 2981 2982 /** 2983 * tx_intr_handler - Transmit interrupt handler 2984 * @fifo_data : fifo data pointer 2985 * Description: 2986 * If an interrupt was raised to indicate DMA complete of the 2987 * Tx packet, this function is called. It identifies the last TxD 2988 * whose buffer was freed and frees all skbs whose data have already 2989 * DMA'ed into the NICs internal memory. 2990 * Return Value: 2991 * NONE 2992 */ 2993 2994 static void tx_intr_handler(struct fifo_info *fifo_data) 2995 { 2996 struct s2io_nic *nic = fifo_data->nic; 2997 struct tx_curr_get_info get_info, put_info; 2998 struct sk_buff *skb = NULL; 2999 struct TxD *txdlp; 3000 int pkt_cnt = 0; 3001 unsigned long flags = 0; 3002 u8 err_mask; 3003 struct stat_block *stats = nic->mac_control.stats_info; 3004 struct swStat *swstats = &stats->sw_stat; 3005 3006 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags)) 3007 return; 3008 3009 get_info = fifo_data->tx_curr_get_info; 3010 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info)); 3011 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr; 3012 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && 3013 (get_info.offset != put_info.offset) && 3014 (txdlp->Host_Control)) { 3015 /* Check for TxD errors */ 3016 if (txdlp->Control_1 & TXD_T_CODE) { 3017 unsigned long long err; 3018 err = txdlp->Control_1 & TXD_T_CODE; 3019 if (err & 0x1) { 3020 swstats->parity_err_cnt++; 3021 } 3022 3023 /* update t_code statistics */ 3024 err_mask = err >> 48; 3025 switch (err_mask) { 3026 case 2: 3027 swstats->tx_buf_abort_cnt++; 3028 break; 3029 3030 case 3: 3031 swstats->tx_desc_abort_cnt++; 3032 break; 3033 3034 case 7: 3035 swstats->tx_parity_err_cnt++; 3036 break; 3037 3038 case 10: 3039 swstats->tx_link_loss_cnt++; 3040 break; 3041 3042 case 15: 3043 swstats->tx_list_proc_err_cnt++; 3044 break; 3045 } 3046 } 3047 3048 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset); 3049 if (skb == NULL) { 3050 spin_unlock_irqrestore(&fifo_data->tx_lock, flags); 3051 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n", 3052 __func__); 3053 return; 3054 } 3055 pkt_cnt++; 3056 3057 /* Updating the statistics block */ 3058 swstats->mem_freed += skb->truesize; 3059 dev_consume_skb_irq(skb); 3060 3061 get_info.offset++; 3062 if (get_info.offset == get_info.fifo_len + 1) 3063 get_info.offset = 0; 3064 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr; 3065 fifo_data->tx_curr_get_info.offset = get_info.offset; 3066 } 3067 3068 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq); 3069 3070 spin_unlock_irqrestore(&fifo_data->tx_lock, flags); 3071 } 3072 3073 /** 3074 * s2io_mdio_write - Function to write in to MDIO registers 3075 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS) 3076 * @addr : address value 3077 * @value : data value 3078 * @dev : pointer to net_device structure 3079 * Description: 3080 * This function is used to write values to the MDIO registers 3081 * NONE 3082 */ 3083 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, 3084 struct net_device *dev) 3085 { 3086 u64 val64; 3087 struct s2io_nic *sp = netdev_priv(dev); 3088 struct XENA_dev_config __iomem *bar0 = sp->bar0; 3089 3090 /* address transaction */ 3091 val64 = MDIO_MMD_INDX_ADDR(addr) | 3092 MDIO_MMD_DEV_ADDR(mmd_type) | 3093 MDIO_MMS_PRT_ADDR(0x0); 3094 writeq(val64, &bar0->mdio_control); 3095 val64 = val64 | MDIO_CTRL_START_TRANS(0xE); 3096 writeq(val64, &bar0->mdio_control); 3097 udelay(100); 3098 3099 /* Data transaction */ 3100 val64 = MDIO_MMD_INDX_ADDR(addr) | 3101 MDIO_MMD_DEV_ADDR(mmd_type) | 3102 MDIO_MMS_PRT_ADDR(0x0) | 3103 MDIO_MDIO_DATA(value) | 3104 MDIO_OP(MDIO_OP_WRITE_TRANS); 3105 writeq(val64, &bar0->mdio_control); 3106 val64 = val64 | MDIO_CTRL_START_TRANS(0xE); 3107 writeq(val64, &bar0->mdio_control); 3108 udelay(100); 3109 3110 val64 = MDIO_MMD_INDX_ADDR(addr) | 3111 MDIO_MMD_DEV_ADDR(mmd_type) | 3112 MDIO_MMS_PRT_ADDR(0x0) | 3113 MDIO_OP(MDIO_OP_READ_TRANS); 3114 writeq(val64, &bar0->mdio_control); 3115 val64 = val64 | MDIO_CTRL_START_TRANS(0xE); 3116 writeq(val64, &bar0->mdio_control); 3117 udelay(100); 3118 } 3119 3120 /** 3121 * s2io_mdio_read - Function to write in to MDIO registers 3122 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS) 3123 * @addr : address value 3124 * @dev : pointer to net_device structure 3125 * Description: 3126 * This function is used to read values to the MDIO registers 3127 * NONE 3128 */ 3129 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev) 3130 { 3131 u64 val64 = 0x0; 3132 u64 rval64 = 0x0; 3133 struct s2io_nic *sp = netdev_priv(dev); 3134 struct XENA_dev_config __iomem *bar0 = sp->bar0; 3135 3136 /* address transaction */ 3137 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr) 3138 | MDIO_MMD_DEV_ADDR(mmd_type) 3139 | MDIO_MMS_PRT_ADDR(0x0)); 3140 writeq(val64, &bar0->mdio_control); 3141 val64 = val64 | MDIO_CTRL_START_TRANS(0xE); 3142 writeq(val64, &bar0->mdio_control); 3143 udelay(100); 3144 3145 /* Data transaction */ 3146 val64 = MDIO_MMD_INDX_ADDR(addr) | 3147 MDIO_MMD_DEV_ADDR(mmd_type) | 3148 MDIO_MMS_PRT_ADDR(0x0) | 3149 MDIO_OP(MDIO_OP_READ_TRANS); 3150 writeq(val64, &bar0->mdio_control); 3151 val64 = val64 | MDIO_CTRL_START_TRANS(0xE); 3152 writeq(val64, &bar0->mdio_control); 3153 udelay(100); 3154 3155 /* Read the value from regs */ 3156 rval64 = readq(&bar0->mdio_control); 3157 rval64 = rval64 & 0xFFFF0000; 3158 rval64 = rval64 >> 16; 3159 return rval64; 3160 } 3161 3162 /** 3163 * s2io_chk_xpak_counter - Function to check the status of the xpak counters 3164 * @counter : counter value to be updated 3165 * @regs_stat : registers status 3166 * @index : index 3167 * @flag : flag to indicate the status 3168 * @type : counter type 3169 * Description: 3170 * This function is to check the status of the xpak counters value 3171 * NONE 3172 */ 3173 3174 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, 3175 u16 flag, u16 type) 3176 { 3177 u64 mask = 0x3; 3178 u64 val64; 3179 int i; 3180 for (i = 0; i < index; i++) 3181 mask = mask << 0x2; 3182 3183 if (flag > 0) { 3184 *counter = *counter + 1; 3185 val64 = *regs_stat & mask; 3186 val64 = val64 >> (index * 0x2); 3187 val64 = val64 + 1; 3188 if (val64 == 3) { 3189 switch (type) { 3190 case 1: 3191 DBG_PRINT(ERR_DBG, 3192 "Take Xframe NIC out of service.\n"); 3193 DBG_PRINT(ERR_DBG, 3194 "Excessive temperatures may result in premature transceiver failure.\n"); 3195 break; 3196 case 2: 3197 DBG_PRINT(ERR_DBG, 3198 "Take Xframe NIC out of service.\n"); 3199 DBG_PRINT(ERR_DBG, 3200 "Excessive bias currents may indicate imminent laser diode failure.\n"); 3201 break; 3202 case 3: 3203 DBG_PRINT(ERR_DBG, 3204 "Take Xframe NIC out of service.\n"); 3205 DBG_PRINT(ERR_DBG, 3206 "Excessive laser output power may saturate far-end receiver.\n"); 3207 break; 3208 default: 3209 DBG_PRINT(ERR_DBG, 3210 "Incorrect XPAK Alarm type\n"); 3211 } 3212 val64 = 0x0; 3213 } 3214 val64 = val64 << (index * 0x2); 3215 *regs_stat = (*regs_stat & (~mask)) | (val64); 3216 3217 } else { 3218 *regs_stat = *regs_stat & (~mask); 3219 } 3220 } 3221 3222 /** 3223 * s2io_updt_xpak_counter - Function to update the xpak counters 3224 * @dev : pointer to net_device struct 3225 * Description: 3226 * This function is to upate the status of the xpak counters value 3227 * NONE 3228 */ 3229 static void s2io_updt_xpak_counter(struct net_device *dev) 3230 { 3231 u16 flag = 0x0; 3232 u16 type = 0x0; 3233 u16 val16 = 0x0; 3234 u64 val64 = 0x0; 3235 u64 addr = 0x0; 3236 3237 struct s2io_nic *sp = netdev_priv(dev); 3238 struct stat_block *stats = sp->mac_control.stats_info; 3239 struct xpakStat *xstats = &stats->xpak_stat; 3240 3241 /* Check the communication with the MDIO slave */ 3242 addr = MDIO_CTRL1; 3243 val64 = 0x0; 3244 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); 3245 if ((val64 == 0xFFFF) || (val64 == 0x0000)) { 3246 DBG_PRINT(ERR_DBG, 3247 "ERR: MDIO slave access failed - Returned %llx\n", 3248 (unsigned long long)val64); 3249 return; 3250 } 3251 3252 /* Check for the expected value of control reg 1 */ 3253 if (val64 != MDIO_CTRL1_SPEED10G) { 3254 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - " 3255 "Returned: %llx- Expected: 0x%x\n", 3256 (unsigned long long)val64, MDIO_CTRL1_SPEED10G); 3257 return; 3258 } 3259 3260 /* Loading the DOM register to MDIO register */ 3261 addr = 0xA100; 3262 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev); 3263 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); 3264 3265 /* Reading the Alarm flags */ 3266 addr = 0xA070; 3267 val64 = 0x0; 3268 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); 3269 3270 flag = CHECKBIT(val64, 0x7); 3271 type = 1; 3272 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high, 3273 &xstats->xpak_regs_stat, 3274 0x0, flag, type); 3275 3276 if (CHECKBIT(val64, 0x6)) 3277 xstats->alarm_transceiver_temp_low++; 3278 3279 flag = CHECKBIT(val64, 0x3); 3280 type = 2; 3281 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high, 3282 &xstats->xpak_regs_stat, 3283 0x2, flag, type); 3284 3285 if (CHECKBIT(val64, 0x2)) 3286 xstats->alarm_laser_bias_current_low++; 3287 3288 flag = CHECKBIT(val64, 0x1); 3289 type = 3; 3290 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high, 3291 &xstats->xpak_regs_stat, 3292 0x4, flag, type); 3293 3294 if (CHECKBIT(val64, 0x0)) 3295 xstats->alarm_laser_output_power_low++; 3296 3297 /* Reading the Warning flags */ 3298 addr = 0xA074; 3299 val64 = 0x0; 3300 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); 3301 3302 if (CHECKBIT(val64, 0x7)) 3303 xstats->warn_transceiver_temp_high++; 3304 3305 if (CHECKBIT(val64, 0x6)) 3306 xstats->warn_transceiver_temp_low++; 3307 3308 if (CHECKBIT(val64, 0x3)) 3309 xstats->warn_laser_bias_current_high++; 3310 3311 if (CHECKBIT(val64, 0x2)) 3312 xstats->warn_laser_bias_current_low++; 3313 3314 if (CHECKBIT(val64, 0x1)) 3315 xstats->warn_laser_output_power_high++; 3316 3317 if (CHECKBIT(val64, 0x0)) 3318 xstats->warn_laser_output_power_low++; 3319 } 3320 3321 /** 3322 * wait_for_cmd_complete - waits for a command to complete. 3323 * @addr: address 3324 * @busy_bit: bit to check for busy 3325 * @bit_state: state to check 3326 * Description: Function that waits for a command to Write into RMAC 3327 * ADDR DATA registers to be completed and returns either success or 3328 * error depending on whether the command was complete or not. 3329 * Return value: 3330 * SUCCESS on success and FAILURE on failure. 3331 */ 3332 3333 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, 3334 int bit_state, bool may_sleep) 3335 { 3336 int ret = FAILURE, cnt = 0, delay = 1; 3337 u64 val64; 3338 3339 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET)) 3340 return FAILURE; 3341 3342 do { 3343 val64 = readq(addr); 3344 if (bit_state == S2IO_BIT_RESET) { 3345 if (!(val64 & busy_bit)) { 3346 ret = SUCCESS; 3347 break; 3348 } 3349 } else { 3350 if (val64 & busy_bit) { 3351 ret = SUCCESS; 3352 break; 3353 } 3354 } 3355 3356 if (!may_sleep) 3357 mdelay(delay); 3358 else 3359 msleep(delay); 3360 3361 if (++cnt >= 10) 3362 delay = 50; 3363 } while (cnt < 20); 3364 return ret; 3365 } 3366 /** 3367 * check_pci_device_id - Checks if the device id is supported 3368 * @id : device id 3369 * Description: Function to check if the pci device id is supported by driver. 3370 * Return value: Actual device id if supported else PCI_ANY_ID 3371 */ 3372 static u16 check_pci_device_id(u16 id) 3373 { 3374 switch (id) { 3375 case PCI_DEVICE_ID_HERC_WIN: 3376 case PCI_DEVICE_ID_HERC_UNI: 3377 return XFRAME_II_DEVICE; 3378 case PCI_DEVICE_ID_S2IO_UNI: 3379 case PCI_DEVICE_ID_S2IO_WIN: 3380 return XFRAME_I_DEVICE; 3381 default: 3382 return PCI_ANY_ID; 3383 } 3384 } 3385 3386 /** 3387 * s2io_reset - Resets the card. 3388 * @sp : private member of the device structure. 3389 * Description: Function to Reset the card. This function then also 3390 * restores the previously saved PCI configuration space registers as 3391 * the card reset also resets the configuration space. 3392 * Return value: 3393 * void. 3394 */ 3395 3396 static void s2io_reset(struct s2io_nic *sp) 3397 { 3398 struct XENA_dev_config __iomem *bar0 = sp->bar0; 3399 u64 val64; 3400 u16 subid, pci_cmd; 3401 int i; 3402 u16 val16; 3403 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt; 3404 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt; 3405 struct stat_block *stats; 3406 struct swStat *swstats; 3407 3408 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n", 3409 __func__, pci_name(sp->pdev)); 3410 3411 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ 3412 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); 3413 3414 val64 = SW_RESET_ALL; 3415 writeq(val64, &bar0->sw_reset); 3416 if (strstr(sp->product_name, "CX4")) 3417 msleep(750); 3418 msleep(250); 3419 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) { 3420 3421 /* Restore the PCI state saved during initialization. */ 3422 pci_restore_state(sp->pdev); 3423 pci_save_state(sp->pdev); 3424 pci_read_config_word(sp->pdev, 0x2, &val16); 3425 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID) 3426 break; 3427 msleep(200); 3428 } 3429 3430 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) 3431 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__); 3432 3433 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd); 3434 3435 s2io_init_pci(sp); 3436 3437 /* Set swapper to enable I/O register access */ 3438 s2io_set_swapper(sp); 3439 3440 /* restore mac_addr entries */ 3441 do_s2io_restore_unicast_mc(sp); 3442 3443 /* Restore the MSIX table entries from local variables */ 3444 restore_xmsi_data(sp); 3445 3446 /* Clear certain PCI/PCI-X fields after reset */ 3447 if (sp->device_type == XFRAME_II_DEVICE) { 3448 /* Clear "detected parity error" bit */ 3449 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); 3450 3451 /* Clearing PCIX Ecc status register */ 3452 pci_write_config_dword(sp->pdev, 0x68, 0x7C); 3453 3454 /* Clearing PCI_STATUS error reflected here */ 3455 writeq(s2BIT(62), &bar0->txpic_int_reg); 3456 } 3457 3458 /* Reset device statistics maintained by OS */ 3459 memset(&sp->stats, 0, sizeof(struct net_device_stats)); 3460 3461 stats = sp->mac_control.stats_info; 3462 swstats = &stats->sw_stat; 3463 3464 /* save link up/down time/cnt, reset/memory/watchdog cnt */ 3465 up_cnt = swstats->link_up_cnt; 3466 down_cnt = swstats->link_down_cnt; 3467 up_time = swstats->link_up_time; 3468 down_time = swstats->link_down_time; 3469 reset_cnt = swstats->soft_reset_cnt; 3470 mem_alloc_cnt = swstats->mem_allocated; 3471 mem_free_cnt = swstats->mem_freed; 3472 watchdog_cnt = swstats->watchdog_timer_cnt; 3473 3474 memset(stats, 0, sizeof(struct stat_block)); 3475 3476 /* restore link up/down time/cnt, reset/memory/watchdog cnt */ 3477 swstats->link_up_cnt = up_cnt; 3478 swstats->link_down_cnt = down_cnt; 3479 swstats->link_up_time = up_time; 3480 swstats->link_down_time = down_time; 3481 swstats->soft_reset_cnt = reset_cnt; 3482 swstats->mem_allocated = mem_alloc_cnt; 3483 swstats->mem_freed = mem_free_cnt; 3484 swstats->watchdog_timer_cnt = watchdog_cnt; 3485 3486 /* SXE-002: Configure link and activity LED to turn it off */ 3487 subid = sp->pdev->subsystem_device; 3488 if (((subid & 0xFF) >= 0x07) && 3489 (sp->device_type == XFRAME_I_DEVICE)) { 3490 val64 = readq(&bar0->gpio_control); 3491 val64 |= 0x0000800000000000ULL; 3492 writeq(val64, &bar0->gpio_control); 3493 val64 = 0x0411040400000000ULL; 3494 writeq(val64, (void __iomem *)bar0 + 0x2700); 3495 } 3496 3497 /* 3498 * Clear spurious ECC interrupts that would have occurred on 3499 * XFRAME II cards after reset. 3500 */ 3501 if (sp->device_type == XFRAME_II_DEVICE) { 3502 val64 = readq(&bar0->pcc_err_reg); 3503 writeq(val64, &bar0->pcc_err_reg); 3504 } 3505 3506 sp->device_enabled_once = false; 3507 } 3508 3509 /** 3510 * s2io_set_swapper - to set the swapper controle on the card 3511 * @sp : private member of the device structure, 3512 * pointer to the s2io_nic structure. 3513 * Description: Function to set the swapper control on the card 3514 * correctly depending on the 'endianness' of the system. 3515 * Return value: 3516 * SUCCESS on success and FAILURE on failure. 3517 */ 3518 3519 static int s2io_set_swapper(struct s2io_nic *sp) 3520 { 3521 struct net_device *dev = sp->dev; 3522 struct XENA_dev_config __iomem *bar0 = sp->bar0; 3523 u64 val64, valt, valr; 3524 3525 /* 3526 * Set proper endian settings and verify the same by reading 3527 * the PIF Feed-back register. 3528 */ 3529 3530 val64 = readq(&bar0->pif_rd_swapper_fb); 3531 if (val64 != 0x0123456789ABCDEFULL) { 3532 int i = 0; 3533 static const u64 value[] = { 3534 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */ 3535 0x8100008181000081ULL, /* FE=1, SE=0 */ 3536 0x4200004242000042ULL, /* FE=0, SE=1 */ 3537 0 /* FE=0, SE=0 */ 3538 }; 3539 3540 while (i < 4) { 3541 writeq(value[i], &bar0->swapper_ctrl); 3542 val64 = readq(&bar0->pif_rd_swapper_fb); 3543 if (val64 == 0x0123456789ABCDEFULL) 3544 break; 3545 i++; 3546 } 3547 if (i == 4) { 3548 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, " 3549 "feedback read %llx\n", 3550 dev->name, (unsigned long long)val64); 3551 return FAILURE; 3552 } 3553 valr = value[i]; 3554 } else { 3555 valr = readq(&bar0->swapper_ctrl); 3556 } 3557 3558 valt = 0x0123456789ABCDEFULL; 3559 writeq(valt, &bar0->xmsi_address); 3560 val64 = readq(&bar0->xmsi_address); 3561 3562 if (val64 != valt) { 3563 int i = 0; 3564 static const u64 value[] = { 3565 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */ 3566 0x0081810000818100ULL, /* FE=1, SE=0 */ 3567 0x0042420000424200ULL, /* FE=0, SE=1 */ 3568 0 /* FE=0, SE=0 */ 3569 }; 3570 3571 while (i < 4) { 3572 writeq((value[i] | valr), &bar0->swapper_ctrl); 3573 writeq(valt, &bar0->xmsi_address); 3574 val64 = readq(&bar0->xmsi_address); 3575 if (val64 == valt) 3576 break; 3577 i++; 3578 } 3579 if (i == 4) { 3580 unsigned long long x = val64; 3581 DBG_PRINT(ERR_DBG, 3582 "Write failed, Xmsi_addr reads:0x%llx\n", x); 3583 return FAILURE; 3584 } 3585 } 3586 val64 = readq(&bar0->swapper_ctrl); 3587 val64 &= 0xFFFF000000000000ULL; 3588 3589 #ifdef __BIG_ENDIAN 3590 /* 3591 * The device by default set to a big endian format, so a 3592 * big endian driver need not set anything. 3593 */ 3594 val64 |= (SWAPPER_CTRL_TXP_FE | 3595 SWAPPER_CTRL_TXP_SE | 3596 SWAPPER_CTRL_TXD_R_FE | 3597 SWAPPER_CTRL_TXD_W_FE | 3598 SWAPPER_CTRL_TXF_R_FE | 3599 SWAPPER_CTRL_RXD_R_FE | 3600 SWAPPER_CTRL_RXD_W_FE | 3601 SWAPPER_CTRL_RXF_W_FE | 3602 SWAPPER_CTRL_XMSI_FE | 3603 SWAPPER_CTRL_STATS_FE | 3604 SWAPPER_CTRL_STATS_SE); 3605 if (sp->config.intr_type == INTA) 3606 val64 |= SWAPPER_CTRL_XMSI_SE; 3607 writeq(val64, &bar0->swapper_ctrl); 3608 #else 3609 /* 3610 * Initially we enable all bits to make it accessible by the 3611 * driver, then we selectively enable only those bits that 3612 * we want to set. 3613 */ 3614 val64 |= (SWAPPER_CTRL_TXP_FE | 3615 SWAPPER_CTRL_TXP_SE | 3616 SWAPPER_CTRL_TXD_R_FE | 3617 SWAPPER_CTRL_TXD_R_SE | 3618 SWAPPER_CTRL_TXD_W_FE | 3619 SWAPPER_CTRL_TXD_W_SE | 3620 SWAPPER_CTRL_TXF_R_FE | 3621 SWAPPER_CTRL_RXD_R_FE | 3622 SWAPPER_CTRL_RXD_R_SE | 3623 SWAPPER_CTRL_RXD_W_FE | 3624 SWAPPER_CTRL_RXD_W_SE | 3625 SWAPPER_CTRL_RXF_W_FE | 3626 SWAPPER_CTRL_XMSI_FE | 3627 SWAPPER_CTRL_STATS_FE | 3628 SWAPPER_CTRL_STATS_SE); 3629 if (sp->config.intr_type == INTA) 3630 val64 |= SWAPPER_CTRL_XMSI_SE; 3631 writeq(val64, &bar0->swapper_ctrl); 3632 #endif 3633 val64 = readq(&bar0->swapper_ctrl); 3634 3635 /* 3636 * Verifying if endian settings are accurate by reading a 3637 * feedback register. 3638 */ 3639 val64 = readq(&bar0->pif_rd_swapper_fb); 3640 if (val64 != 0x0123456789ABCDEFULL) { 3641 /* Endian settings are incorrect, calls for another dekko. */ 3642 DBG_PRINT(ERR_DBG, 3643 "%s: Endian settings are wrong, feedback read %llx\n", 3644 dev->name, (unsigned long long)val64); 3645 return FAILURE; 3646 } 3647 3648 return SUCCESS; 3649 } 3650 3651 static int wait_for_msix_trans(struct s2io_nic *nic, int i) 3652 { 3653 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3654 u64 val64; 3655 int ret = 0, cnt = 0; 3656 3657 do { 3658 val64 = readq(&bar0->xmsi_access); 3659 if (!(val64 & s2BIT(15))) 3660 break; 3661 mdelay(1); 3662 cnt++; 3663 } while (cnt < 5); 3664 if (cnt == 5) { 3665 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i); 3666 ret = 1; 3667 } 3668 3669 return ret; 3670 } 3671 3672 static void restore_xmsi_data(struct s2io_nic *nic) 3673 { 3674 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3675 u64 val64; 3676 int i, msix_index; 3677 3678 if (nic->device_type == XFRAME_I_DEVICE) 3679 return; 3680 3681 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { 3682 msix_index = (i) ? ((i-1) * 8 + 1) : 0; 3683 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3684 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3685 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); 3686 writeq(val64, &bar0->xmsi_access); 3687 if (wait_for_msix_trans(nic, msix_index)) 3688 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n", 3689 __func__, msix_index); 3690 } 3691 } 3692 3693 static void store_xmsi_data(struct s2io_nic *nic) 3694 { 3695 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3696 u64 val64, addr, data; 3697 int i, msix_index; 3698 3699 if (nic->device_type == XFRAME_I_DEVICE) 3700 return; 3701 3702 /* Store and display */ 3703 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { 3704 msix_index = (i) ? ((i-1) * 8 + 1) : 0; 3705 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); 3706 writeq(val64, &bar0->xmsi_access); 3707 if (wait_for_msix_trans(nic, msix_index)) { 3708 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n", 3709 __func__, msix_index); 3710 continue; 3711 } 3712 addr = readq(&bar0->xmsi_address); 3713 data = readq(&bar0->xmsi_data); 3714 if (addr && data) { 3715 nic->msix_info[i].addr = addr; 3716 nic->msix_info[i].data = data; 3717 } 3718 } 3719 } 3720 3721 static int s2io_enable_msi_x(struct s2io_nic *nic) 3722 { 3723 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3724 u64 rx_mat; 3725 u16 msi_control; /* Temp variable */ 3726 int ret, i, j, msix_indx = 1; 3727 int size; 3728 struct stat_block *stats = nic->mac_control.stats_info; 3729 struct swStat *swstats = &stats->sw_stat; 3730 3731 size = nic->num_entries * sizeof(struct msix_entry); 3732 nic->entries = kzalloc(size, GFP_KERNEL); 3733 if (!nic->entries) { 3734 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 3735 __func__); 3736 swstats->mem_alloc_fail_cnt++; 3737 return -ENOMEM; 3738 } 3739 swstats->mem_allocated += size; 3740 3741 size = nic->num_entries * sizeof(struct s2io_msix_entry); 3742 nic->s2io_entries = kzalloc(size, GFP_KERNEL); 3743 if (!nic->s2io_entries) { 3744 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 3745 __func__); 3746 swstats->mem_alloc_fail_cnt++; 3747 kfree(nic->entries); 3748 swstats->mem_freed 3749 += (nic->num_entries * sizeof(struct msix_entry)); 3750 return -ENOMEM; 3751 } 3752 swstats->mem_allocated += size; 3753 3754 nic->entries[0].entry = 0; 3755 nic->s2io_entries[0].entry = 0; 3756 nic->s2io_entries[0].in_use = MSIX_FLG; 3757 nic->s2io_entries[0].type = MSIX_ALARM_TYPE; 3758 nic->s2io_entries[0].arg = &nic->mac_control.fifos; 3759 3760 for (i = 1; i < nic->num_entries; i++) { 3761 nic->entries[i].entry = ((i - 1) * 8) + 1; 3762 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1; 3763 nic->s2io_entries[i].arg = NULL; 3764 nic->s2io_entries[i].in_use = 0; 3765 } 3766 3767 rx_mat = readq(&bar0->rx_mat); 3768 for (j = 0; j < nic->config.rx_ring_num; j++) { 3769 rx_mat |= RX_MAT_SET(j, msix_indx); 3770 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j]; 3771 nic->s2io_entries[j+1].type = MSIX_RING_TYPE; 3772 nic->s2io_entries[j+1].in_use = MSIX_FLG; 3773 msix_indx += 8; 3774 } 3775 writeq(rx_mat, &bar0->rx_mat); 3776 readq(&bar0->rx_mat); 3777 3778 ret = pci_enable_msix_range(nic->pdev, nic->entries, 3779 nic->num_entries, nic->num_entries); 3780 /* We fail init if error or we get less vectors than min required */ 3781 if (ret < 0) { 3782 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n"); 3783 kfree(nic->entries); 3784 swstats->mem_freed += nic->num_entries * 3785 sizeof(struct msix_entry); 3786 kfree(nic->s2io_entries); 3787 swstats->mem_freed += nic->num_entries * 3788 sizeof(struct s2io_msix_entry); 3789 nic->entries = NULL; 3790 nic->s2io_entries = NULL; 3791 return -ENOMEM; 3792 } 3793 3794 /* 3795 * To enable MSI-X, MSI also needs to be enabled, due to a bug 3796 * in the herc NIC. (Temp change, needs to be removed later) 3797 */ 3798 pci_read_config_word(nic->pdev, 0x42, &msi_control); 3799 msi_control |= 0x1; /* Enable MSI */ 3800 pci_write_config_word(nic->pdev, 0x42, msi_control); 3801 3802 return 0; 3803 } 3804 3805 /* Handle software interrupt used during MSI(X) test */ 3806 static irqreturn_t s2io_test_intr(int irq, void *dev_id) 3807 { 3808 struct s2io_nic *sp = dev_id; 3809 3810 sp->msi_detected = 1; 3811 wake_up(&sp->msi_wait); 3812 3813 return IRQ_HANDLED; 3814 } 3815 3816 /* Test interrupt path by forcing a a software IRQ */ 3817 static int s2io_test_msi(struct s2io_nic *sp) 3818 { 3819 struct pci_dev *pdev = sp->pdev; 3820 struct XENA_dev_config __iomem *bar0 = sp->bar0; 3821 int err; 3822 u64 val64, saved64; 3823 3824 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0, 3825 sp->name, sp); 3826 if (err) { 3827 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n", 3828 sp->dev->name, pci_name(pdev), pdev->irq); 3829 return err; 3830 } 3831 3832 init_waitqueue_head(&sp->msi_wait); 3833 sp->msi_detected = 0; 3834 3835 saved64 = val64 = readq(&bar0->scheduled_int_ctrl); 3836 val64 |= SCHED_INT_CTRL_ONE_SHOT; 3837 val64 |= SCHED_INT_CTRL_TIMER_EN; 3838 val64 |= SCHED_INT_CTRL_INT2MSI(1); 3839 writeq(val64, &bar0->scheduled_int_ctrl); 3840 3841 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10); 3842 3843 if (!sp->msi_detected) { 3844 /* MSI(X) test failed, go back to INTx mode */ 3845 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated " 3846 "using MSI(X) during test\n", 3847 sp->dev->name, pci_name(pdev)); 3848 3849 err = -EOPNOTSUPP; 3850 } 3851 3852 free_irq(sp->entries[1].vector, sp); 3853 3854 writeq(saved64, &bar0->scheduled_int_ctrl); 3855 3856 return err; 3857 } 3858 3859 static void remove_msix_isr(struct s2io_nic *sp) 3860 { 3861 int i; 3862 u16 msi_control; 3863 3864 for (i = 0; i < sp->num_entries; i++) { 3865 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) { 3866 int vector = sp->entries[i].vector; 3867 void *arg = sp->s2io_entries[i].arg; 3868 free_irq(vector, arg); 3869 } 3870 } 3871 3872 kfree(sp->entries); 3873 kfree(sp->s2io_entries); 3874 sp->entries = NULL; 3875 sp->s2io_entries = NULL; 3876 3877 pci_read_config_word(sp->pdev, 0x42, &msi_control); 3878 msi_control &= 0xFFFE; /* Disable MSI */ 3879 pci_write_config_word(sp->pdev, 0x42, msi_control); 3880 3881 pci_disable_msix(sp->pdev); 3882 } 3883 3884 static void remove_inta_isr(struct s2io_nic *sp) 3885 { 3886 free_irq(sp->pdev->irq, sp->dev); 3887 } 3888 3889 /* ********************************************************* * 3890 * Functions defined below concern the OS part of the driver * 3891 * ********************************************************* */ 3892 3893 /** 3894 * s2io_open - open entry point of the driver 3895 * @dev : pointer to the device structure. 3896 * Description: 3897 * This function is the open entry point of the driver. It mainly calls a 3898 * function to allocate Rx buffers and inserts them into the buffer 3899 * descriptors and then enables the Rx part of the NIC. 3900 * Return value: 3901 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3902 * file on failure. 3903 */ 3904 3905 static int s2io_open(struct net_device *dev) 3906 { 3907 struct s2io_nic *sp = netdev_priv(dev); 3908 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 3909 int err = 0; 3910 3911 /* 3912 * Make sure you have link off by default every time 3913 * Nic is initialized 3914 */ 3915 netif_carrier_off(dev); 3916 sp->last_link_state = 0; 3917 3918 /* Initialize H/W and enable interrupts */ 3919 err = s2io_card_up(sp); 3920 if (err) { 3921 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", 3922 dev->name); 3923 goto hw_init_failed; 3924 } 3925 3926 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) { 3927 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); 3928 s2io_card_down(sp); 3929 err = -ENODEV; 3930 goto hw_init_failed; 3931 } 3932 s2io_start_all_tx_queue(sp); 3933 return 0; 3934 3935 hw_init_failed: 3936 if (sp->config.intr_type == MSI_X) { 3937 if (sp->entries) { 3938 kfree(sp->entries); 3939 swstats->mem_freed += sp->num_entries * 3940 sizeof(struct msix_entry); 3941 } 3942 if (sp->s2io_entries) { 3943 kfree(sp->s2io_entries); 3944 swstats->mem_freed += sp->num_entries * 3945 sizeof(struct s2io_msix_entry); 3946 } 3947 } 3948 return err; 3949 } 3950 3951 /** 3952 * s2io_close -close entry point of the driver 3953 * @dev : device pointer. 3954 * Description: 3955 * This is the stop entry point of the driver. It needs to undo exactly 3956 * whatever was done by the open entry point,thus it's usually referred to 3957 * as the close function.Among other things this function mainly stops the 3958 * Rx side of the NIC and frees all the Rx buffers in the Rx rings. 3959 * Return value: 3960 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3961 * file on failure. 3962 */ 3963 3964 static int s2io_close(struct net_device *dev) 3965 { 3966 struct s2io_nic *sp = netdev_priv(dev); 3967 struct config_param *config = &sp->config; 3968 u64 tmp64; 3969 int offset; 3970 3971 /* Return if the device is already closed * 3972 * Can happen when s2io_card_up failed in change_mtu * 3973 */ 3974 if (!is_s2io_card_up(sp)) 3975 return 0; 3976 3977 s2io_stop_all_tx_queue(sp); 3978 /* delete all populated mac entries */ 3979 for (offset = 1; offset < config->max_mc_addr; offset++) { 3980 tmp64 = do_s2io_read_unicast_mc(sp, offset); 3981 if (tmp64 != S2IO_DISABLE_MAC_ENTRY) 3982 do_s2io_delete_unicast_mc(sp, tmp64); 3983 } 3984 3985 s2io_card_down(sp); 3986 3987 return 0; 3988 } 3989 3990 /** 3991 * s2io_xmit - Tx entry point of te driver 3992 * @skb : the socket buffer containing the Tx data. 3993 * @dev : device pointer. 3994 * Description : 3995 * This function is the Tx entry point of the driver. S2IO NIC supports 3996 * certain protocol assist features on Tx side, namely CSO, S/G, LSO. 3997 * NOTE: when device can't queue the pkt,just the trans_start variable will 3998 * not be upadted. 3999 * Return value: 4000 * 0 on success & 1 on failure. 4001 */ 4002 4003 static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) 4004 { 4005 struct s2io_nic *sp = netdev_priv(dev); 4006 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off; 4007 register u64 val64; 4008 struct TxD *txdp; 4009 struct TxFIFO_element __iomem *tx_fifo; 4010 unsigned long flags = 0; 4011 u16 vlan_tag = 0; 4012 struct fifo_info *fifo = NULL; 4013 int offload_type; 4014 int enable_per_list_interrupt = 0; 4015 struct config_param *config = &sp->config; 4016 struct mac_info *mac_control = &sp->mac_control; 4017 struct stat_block *stats = mac_control->stats_info; 4018 struct swStat *swstats = &stats->sw_stat; 4019 4020 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name); 4021 4022 if (unlikely(skb->len <= 0)) { 4023 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name); 4024 dev_kfree_skb_any(skb); 4025 return NETDEV_TX_OK; 4026 } 4027 4028 if (!is_s2io_card_up(sp)) { 4029 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n", 4030 dev->name); 4031 dev_kfree_skb_any(skb); 4032 return NETDEV_TX_OK; 4033 } 4034 4035 queue = 0; 4036 if (skb_vlan_tag_present(skb)) 4037 vlan_tag = skb_vlan_tag_get(skb); 4038 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) { 4039 if (skb->protocol == htons(ETH_P_IP)) { 4040 struct iphdr *ip; 4041 struct tcphdr *th; 4042 ip = ip_hdr(skb); 4043 4044 if (!ip_is_fragment(ip)) { 4045 th = (struct tcphdr *)(((unsigned char *)ip) + 4046 ip->ihl*4); 4047 4048 if (ip->protocol == IPPROTO_TCP) { 4049 queue_len = sp->total_tcp_fifos; 4050 queue = (ntohs(th->source) + 4051 ntohs(th->dest)) & 4052 sp->fifo_selector[queue_len - 1]; 4053 if (queue >= queue_len) 4054 queue = queue_len - 1; 4055 } else if (ip->protocol == IPPROTO_UDP) { 4056 queue_len = sp->total_udp_fifos; 4057 queue = (ntohs(th->source) + 4058 ntohs(th->dest)) & 4059 sp->fifo_selector[queue_len - 1]; 4060 if (queue >= queue_len) 4061 queue = queue_len - 1; 4062 queue += sp->udp_fifo_idx; 4063 if (skb->len > 1024) 4064 enable_per_list_interrupt = 1; 4065 } 4066 } 4067 } 4068 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING) 4069 /* get fifo number based on skb->priority value */ 4070 queue = config->fifo_mapping 4071 [skb->priority & (MAX_TX_FIFOS - 1)]; 4072 fifo = &mac_control->fifos[queue]; 4073 4074 spin_lock_irqsave(&fifo->tx_lock, flags); 4075 4076 if (sp->config.multiq) { 4077 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { 4078 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4079 return NETDEV_TX_BUSY; 4080 } 4081 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) { 4082 if (netif_queue_stopped(dev)) { 4083 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4084 return NETDEV_TX_BUSY; 4085 } 4086 } 4087 4088 put_off = (u16)fifo->tx_curr_put_info.offset; 4089 get_off = (u16)fifo->tx_curr_get_info.offset; 4090 txdp = fifo->list_info[put_off].list_virt_addr; 4091 4092 queue_len = fifo->tx_curr_put_info.fifo_len + 1; 4093 /* Avoid "put" pointer going beyond "get" pointer */ 4094 if (txdp->Host_Control || 4095 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { 4096 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); 4097 s2io_stop_tx_queue(sp, fifo->fifo_no); 4098 dev_kfree_skb_any(skb); 4099 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4100 return NETDEV_TX_OK; 4101 } 4102 4103 offload_type = s2io_offload_type(skb); 4104 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 4105 txdp->Control_1 |= TXD_TCP_LSO_EN; 4106 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); 4107 } 4108 if (skb->ip_summed == CHECKSUM_PARTIAL) { 4109 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN | 4110 TXD_TX_CKO_TCP_EN | 4111 TXD_TX_CKO_UDP_EN); 4112 } 4113 txdp->Control_1 |= TXD_GATHER_CODE_FIRST; 4114 txdp->Control_1 |= TXD_LIST_OWN_XENA; 4115 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no); 4116 if (enable_per_list_interrupt) 4117 if (put_off & (queue_len >> 5)) 4118 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST; 4119 if (vlan_tag) { 4120 txdp->Control_2 |= TXD_VLAN_ENABLE; 4121 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); 4122 } 4123 4124 frg_len = skb_headlen(skb); 4125 txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data, 4126 frg_len, DMA_TO_DEVICE); 4127 if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer)) 4128 goto pci_map_failed; 4129 4130 txdp->Host_Control = (unsigned long)skb; 4131 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 4132 4133 frg_cnt = skb_shinfo(skb)->nr_frags; 4134 /* For fragmented SKB. */ 4135 for (i = 0; i < frg_cnt; i++) { 4136 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4137 /* A '0' length fragment will be ignored */ 4138 if (!skb_frag_size(frag)) 4139 continue; 4140 txdp++; 4141 txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev, 4142 frag, 0, 4143 skb_frag_size(frag), 4144 DMA_TO_DEVICE); 4145 txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag)); 4146 } 4147 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 4148 4149 tx_fifo = mac_control->tx_FIFO_start[queue]; 4150 val64 = fifo->list_info[put_off].list_phy_addr; 4151 writeq(val64, &tx_fifo->TxDL_Pointer); 4152 4153 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 4154 TX_FIFO_LAST_LIST); 4155 if (offload_type) 4156 val64 |= TX_FIFO_SPECIAL_FUNC; 4157 4158 writeq(val64, &tx_fifo->List_Control); 4159 4160 put_off++; 4161 if (put_off == fifo->tx_curr_put_info.fifo_len + 1) 4162 put_off = 0; 4163 fifo->tx_curr_put_info.offset = put_off; 4164 4165 /* Avoid "put" pointer going beyond "get" pointer */ 4166 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { 4167 swstats->fifo_full_cnt++; 4168 DBG_PRINT(TX_DBG, 4169 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n", 4170 put_off, get_off); 4171 s2io_stop_tx_queue(sp, fifo->fifo_no); 4172 } 4173 swstats->mem_allocated += skb->truesize; 4174 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4175 4176 if (sp->config.intr_type == MSI_X) 4177 tx_intr_handler(fifo); 4178 4179 return NETDEV_TX_OK; 4180 4181 pci_map_failed: 4182 swstats->pci_map_fail_cnt++; 4183 s2io_stop_tx_queue(sp, fifo->fifo_no); 4184 swstats->mem_freed += skb->truesize; 4185 dev_kfree_skb_any(skb); 4186 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4187 return NETDEV_TX_OK; 4188 } 4189 4190 static void 4191 s2io_alarm_handle(struct timer_list *t) 4192 { 4193 struct s2io_nic *sp = from_timer(sp, t, alarm_timer); 4194 struct net_device *dev = sp->dev; 4195 4196 s2io_handle_errors(dev); 4197 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 4198 } 4199 4200 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) 4201 { 4202 struct ring_info *ring = (struct ring_info *)dev_id; 4203 struct s2io_nic *sp = ring->nic; 4204 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4205 4206 if (unlikely(!is_s2io_card_up(sp))) 4207 return IRQ_HANDLED; 4208 4209 if (sp->config.napi) { 4210 u8 __iomem *addr = NULL; 4211 u8 val8 = 0; 4212 4213 addr = (u8 __iomem *)&bar0->xmsi_mask_reg; 4214 addr += (7 - ring->ring_no); 4215 val8 = (ring->ring_no == 0) ? 0x7f : 0xff; 4216 writeb(val8, addr); 4217 val8 = readb(addr); 4218 napi_schedule(&ring->napi); 4219 } else { 4220 rx_intr_handler(ring, 0); 4221 s2io_chk_rx_buffers(sp, ring); 4222 } 4223 4224 return IRQ_HANDLED; 4225 } 4226 4227 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) 4228 { 4229 int i; 4230 struct fifo_info *fifos = (struct fifo_info *)dev_id; 4231 struct s2io_nic *sp = fifos->nic; 4232 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4233 struct config_param *config = &sp->config; 4234 u64 reason; 4235 4236 if (unlikely(!is_s2io_card_up(sp))) 4237 return IRQ_NONE; 4238 4239 reason = readq(&bar0->general_int_status); 4240 if (unlikely(reason == S2IO_MINUS_ONE)) 4241 /* Nothing much can be done. Get out */ 4242 return IRQ_HANDLED; 4243 4244 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) { 4245 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); 4246 4247 if (reason & GEN_INTR_TXPIC) 4248 s2io_txpic_intr_handle(sp); 4249 4250 if (reason & GEN_INTR_TXTRAFFIC) 4251 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); 4252 4253 for (i = 0; i < config->tx_fifo_num; i++) 4254 tx_intr_handler(&fifos[i]); 4255 4256 writeq(sp->general_int_mask, &bar0->general_int_mask); 4257 readl(&bar0->general_int_status); 4258 return IRQ_HANDLED; 4259 } 4260 /* The interrupt was not raised by us */ 4261 return IRQ_NONE; 4262 } 4263 4264 static void s2io_txpic_intr_handle(struct s2io_nic *sp) 4265 { 4266 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4267 u64 val64; 4268 4269 val64 = readq(&bar0->pic_int_status); 4270 if (val64 & PIC_INT_GPIO) { 4271 val64 = readq(&bar0->gpio_int_reg); 4272 if ((val64 & GPIO_INT_REG_LINK_DOWN) && 4273 (val64 & GPIO_INT_REG_LINK_UP)) { 4274 /* 4275 * This is unstable state so clear both up/down 4276 * interrupt and adapter to re-evaluate the link state. 4277 */ 4278 val64 |= GPIO_INT_REG_LINK_DOWN; 4279 val64 |= GPIO_INT_REG_LINK_UP; 4280 writeq(val64, &bar0->gpio_int_reg); 4281 val64 = readq(&bar0->gpio_int_mask); 4282 val64 &= ~(GPIO_INT_MASK_LINK_UP | 4283 GPIO_INT_MASK_LINK_DOWN); 4284 writeq(val64, &bar0->gpio_int_mask); 4285 } else if (val64 & GPIO_INT_REG_LINK_UP) { 4286 val64 = readq(&bar0->adapter_status); 4287 /* Enable Adapter */ 4288 val64 = readq(&bar0->adapter_control); 4289 val64 |= ADAPTER_CNTL_EN; 4290 writeq(val64, &bar0->adapter_control); 4291 val64 |= ADAPTER_LED_ON; 4292 writeq(val64, &bar0->adapter_control); 4293 if (!sp->device_enabled_once) 4294 sp->device_enabled_once = 1; 4295 4296 s2io_link(sp, LINK_UP); 4297 /* 4298 * unmask link down interrupt and mask link-up 4299 * intr 4300 */ 4301 val64 = readq(&bar0->gpio_int_mask); 4302 val64 &= ~GPIO_INT_MASK_LINK_DOWN; 4303 val64 |= GPIO_INT_MASK_LINK_UP; 4304 writeq(val64, &bar0->gpio_int_mask); 4305 4306 } else if (val64 & GPIO_INT_REG_LINK_DOWN) { 4307 val64 = readq(&bar0->adapter_status); 4308 s2io_link(sp, LINK_DOWN); 4309 /* Link is down so unmaks link up interrupt */ 4310 val64 = readq(&bar0->gpio_int_mask); 4311 val64 &= ~GPIO_INT_MASK_LINK_UP; 4312 val64 |= GPIO_INT_MASK_LINK_DOWN; 4313 writeq(val64, &bar0->gpio_int_mask); 4314 4315 /* turn off LED */ 4316 val64 = readq(&bar0->adapter_control); 4317 val64 = val64 & (~ADAPTER_LED_ON); 4318 writeq(val64, &bar0->adapter_control); 4319 } 4320 } 4321 val64 = readq(&bar0->gpio_int_mask); 4322 } 4323 4324 /** 4325 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter 4326 * @value: alarm bits 4327 * @addr: address value 4328 * @cnt: counter variable 4329 * Description: Check for alarm and increment the counter 4330 * Return Value: 4331 * 1 - if alarm bit set 4332 * 0 - if alarm bit is not set 4333 */ 4334 static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr, 4335 unsigned long long *cnt) 4336 { 4337 u64 val64; 4338 val64 = readq(addr); 4339 if (val64 & value) { 4340 writeq(val64, addr); 4341 (*cnt)++; 4342 return 1; 4343 } 4344 return 0; 4345 4346 } 4347 4348 /** 4349 * s2io_handle_errors - Xframe error indication handler 4350 * @dev_id: opaque handle to dev 4351 * Description: Handle alarms such as loss of link, single or 4352 * double ECC errors, critical and serious errors. 4353 * Return Value: 4354 * NONE 4355 */ 4356 static void s2io_handle_errors(void *dev_id) 4357 { 4358 struct net_device *dev = (struct net_device *)dev_id; 4359 struct s2io_nic *sp = netdev_priv(dev); 4360 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4361 u64 temp64 = 0, val64 = 0; 4362 int i = 0; 4363 4364 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat; 4365 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat; 4366 4367 if (!is_s2io_card_up(sp)) 4368 return; 4369 4370 if (pci_channel_offline(sp->pdev)) 4371 return; 4372 4373 memset(&sw_stat->ring_full_cnt, 0, 4374 sizeof(sw_stat->ring_full_cnt)); 4375 4376 /* Handling the XPAK counters update */ 4377 if (stats->xpak_timer_count < 72000) { 4378 /* waiting for an hour */ 4379 stats->xpak_timer_count++; 4380 } else { 4381 s2io_updt_xpak_counter(dev); 4382 /* reset the count to zero */ 4383 stats->xpak_timer_count = 0; 4384 } 4385 4386 /* Handling link status change error Intr */ 4387 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) { 4388 val64 = readq(&bar0->mac_rmac_err_reg); 4389 writeq(val64, &bar0->mac_rmac_err_reg); 4390 if (val64 & RMAC_LINK_STATE_CHANGE_INT) 4391 schedule_work(&sp->set_link_task); 4392 } 4393 4394 /* In case of a serious error, the device will be Reset. */ 4395 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source, 4396 &sw_stat->serious_err_cnt)) 4397 goto reset; 4398 4399 /* Check for data parity error */ 4400 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg, 4401 &sw_stat->parity_err_cnt)) 4402 goto reset; 4403 4404 /* Check for ring full counter */ 4405 if (sp->device_type == XFRAME_II_DEVICE) { 4406 val64 = readq(&bar0->ring_bump_counter1); 4407 for (i = 0; i < 4; i++) { 4408 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16)); 4409 temp64 >>= 64 - ((i+1)*16); 4410 sw_stat->ring_full_cnt[i] += temp64; 4411 } 4412 4413 val64 = readq(&bar0->ring_bump_counter2); 4414 for (i = 0; i < 4; i++) { 4415 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16)); 4416 temp64 >>= 64 - ((i+1)*16); 4417 sw_stat->ring_full_cnt[i+4] += temp64; 4418 } 4419 } 4420 4421 val64 = readq(&bar0->txdma_int_status); 4422 /*check for pfc_err*/ 4423 if (val64 & TXDMA_PFC_INT) { 4424 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM | 4425 PFC_MISC_0_ERR | PFC_MISC_1_ERR | 4426 PFC_PCIX_ERR, 4427 &bar0->pfc_err_reg, 4428 &sw_stat->pfc_err_cnt)) 4429 goto reset; 4430 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, 4431 &bar0->pfc_err_reg, 4432 &sw_stat->pfc_err_cnt); 4433 } 4434 4435 /*check for tda_err*/ 4436 if (val64 & TXDMA_TDA_INT) { 4437 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | 4438 TDA_SM0_ERR_ALARM | 4439 TDA_SM1_ERR_ALARM, 4440 &bar0->tda_err_reg, 4441 &sw_stat->tda_err_cnt)) 4442 goto reset; 4443 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR, 4444 &bar0->tda_err_reg, 4445 &sw_stat->tda_err_cnt); 4446 } 4447 /*check for pcc_err*/ 4448 if (val64 & TXDMA_PCC_INT) { 4449 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM | 4450 PCC_N_SERR | PCC_6_COF_OV_ERR | 4451 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR | 4452 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR | 4453 PCC_TXB_ECC_DB_ERR, 4454 &bar0->pcc_err_reg, 4455 &sw_stat->pcc_err_cnt)) 4456 goto reset; 4457 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR, 4458 &bar0->pcc_err_reg, 4459 &sw_stat->pcc_err_cnt); 4460 } 4461 4462 /*check for tti_err*/ 4463 if (val64 & TXDMA_TTI_INT) { 4464 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, 4465 &bar0->tti_err_reg, 4466 &sw_stat->tti_err_cnt)) 4467 goto reset; 4468 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR, 4469 &bar0->tti_err_reg, 4470 &sw_stat->tti_err_cnt); 4471 } 4472 4473 /*check for lso_err*/ 4474 if (val64 & TXDMA_LSO_INT) { 4475 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT | 4476 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM, 4477 &bar0->lso_err_reg, 4478 &sw_stat->lso_err_cnt)) 4479 goto reset; 4480 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, 4481 &bar0->lso_err_reg, 4482 &sw_stat->lso_err_cnt); 4483 } 4484 4485 /*check for tpa_err*/ 4486 if (val64 & TXDMA_TPA_INT) { 4487 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, 4488 &bar0->tpa_err_reg, 4489 &sw_stat->tpa_err_cnt)) 4490 goto reset; 4491 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, 4492 &bar0->tpa_err_reg, 4493 &sw_stat->tpa_err_cnt); 4494 } 4495 4496 /*check for sm_err*/ 4497 if (val64 & TXDMA_SM_INT) { 4498 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, 4499 &bar0->sm_err_reg, 4500 &sw_stat->sm_err_cnt)) 4501 goto reset; 4502 } 4503 4504 val64 = readq(&bar0->mac_int_status); 4505 if (val64 & MAC_INT_STATUS_TMAC_INT) { 4506 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR, 4507 &bar0->mac_tmac_err_reg, 4508 &sw_stat->mac_tmac_err_cnt)) 4509 goto reset; 4510 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR | 4511 TMAC_DESC_ECC_SG_ERR | 4512 TMAC_DESC_ECC_DB_ERR, 4513 &bar0->mac_tmac_err_reg, 4514 &sw_stat->mac_tmac_err_cnt); 4515 } 4516 4517 val64 = readq(&bar0->xgxs_int_status); 4518 if (val64 & XGXS_INT_STATUS_TXGXS) { 4519 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR, 4520 &bar0->xgxs_txgxs_err_reg, 4521 &sw_stat->xgxs_txgxs_err_cnt)) 4522 goto reset; 4523 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, 4524 &bar0->xgxs_txgxs_err_reg, 4525 &sw_stat->xgxs_txgxs_err_cnt); 4526 } 4527 4528 val64 = readq(&bar0->rxdma_int_status); 4529 if (val64 & RXDMA_INT_RC_INT_M) { 4530 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | 4531 RC_FTC_ECC_DB_ERR | 4532 RC_PRCn_SM_ERR_ALARM | 4533 RC_FTC_SM_ERR_ALARM, 4534 &bar0->rc_err_reg, 4535 &sw_stat->rc_err_cnt)) 4536 goto reset; 4537 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | 4538 RC_FTC_ECC_SG_ERR | 4539 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg, 4540 &sw_stat->rc_err_cnt); 4541 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | 4542 PRC_PCI_AB_WR_Rn | 4543 PRC_PCI_AB_F_WR_Rn, 4544 &bar0->prc_pcix_err_reg, 4545 &sw_stat->prc_pcix_err_cnt)) 4546 goto reset; 4547 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | 4548 PRC_PCI_DP_WR_Rn | 4549 PRC_PCI_DP_F_WR_Rn, 4550 &bar0->prc_pcix_err_reg, 4551 &sw_stat->prc_pcix_err_cnt); 4552 } 4553 4554 if (val64 & RXDMA_INT_RPA_INT_M) { 4555 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR, 4556 &bar0->rpa_err_reg, 4557 &sw_stat->rpa_err_cnt)) 4558 goto reset; 4559 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, 4560 &bar0->rpa_err_reg, 4561 &sw_stat->rpa_err_cnt); 4562 } 4563 4564 if (val64 & RXDMA_INT_RDA_INT_M) { 4565 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR | 4566 RDA_FRM_ECC_DB_N_AERR | 4567 RDA_SM1_ERR_ALARM | 4568 RDA_SM0_ERR_ALARM | 4569 RDA_RXD_ECC_DB_SERR, 4570 &bar0->rda_err_reg, 4571 &sw_stat->rda_err_cnt)) 4572 goto reset; 4573 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | 4574 RDA_FRM_ECC_SG_ERR | 4575 RDA_MISC_ERR | 4576 RDA_PCIX_ERR, 4577 &bar0->rda_err_reg, 4578 &sw_stat->rda_err_cnt); 4579 } 4580 4581 if (val64 & RXDMA_INT_RTI_INT_M) { 4582 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, 4583 &bar0->rti_err_reg, 4584 &sw_stat->rti_err_cnt)) 4585 goto reset; 4586 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, 4587 &bar0->rti_err_reg, 4588 &sw_stat->rti_err_cnt); 4589 } 4590 4591 val64 = readq(&bar0->mac_int_status); 4592 if (val64 & MAC_INT_STATUS_RMAC_INT) { 4593 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR, 4594 &bar0->mac_rmac_err_reg, 4595 &sw_stat->mac_rmac_err_cnt)) 4596 goto reset; 4597 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT | 4598 RMAC_SINGLE_ECC_ERR | 4599 RMAC_DOUBLE_ECC_ERR, 4600 &bar0->mac_rmac_err_reg, 4601 &sw_stat->mac_rmac_err_cnt); 4602 } 4603 4604 val64 = readq(&bar0->xgxs_int_status); 4605 if (val64 & XGXS_INT_STATUS_RXGXS) { 4606 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, 4607 &bar0->xgxs_rxgxs_err_reg, 4608 &sw_stat->xgxs_rxgxs_err_cnt)) 4609 goto reset; 4610 } 4611 4612 val64 = readq(&bar0->mc_int_status); 4613 if (val64 & MC_INT_STATUS_MC_INT) { 4614 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, 4615 &bar0->mc_err_reg, 4616 &sw_stat->mc_err_cnt)) 4617 goto reset; 4618 4619 /* Handling Ecc errors */ 4620 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) { 4621 writeq(val64, &bar0->mc_err_reg); 4622 if (val64 & MC_ERR_REG_ECC_ALL_DBL) { 4623 sw_stat->double_ecc_errs++; 4624 if (sp->device_type != XFRAME_II_DEVICE) { 4625 /* 4626 * Reset XframeI only if critical error 4627 */ 4628 if (val64 & 4629 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 | 4630 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) 4631 goto reset; 4632 } 4633 } else 4634 sw_stat->single_ecc_errs++; 4635 } 4636 } 4637 return; 4638 4639 reset: 4640 s2io_stop_all_tx_queue(sp); 4641 schedule_work(&sp->rst_timer_task); 4642 sw_stat->soft_reset_cnt++; 4643 } 4644 4645 /** 4646 * s2io_isr - ISR handler of the device . 4647 * @irq: the irq of the device. 4648 * @dev_id: a void pointer to the dev structure of the NIC. 4649 * Description: This function is the ISR handler of the device. It 4650 * identifies the reason for the interrupt and calls the relevant 4651 * service routines. As a contongency measure, this ISR allocates the 4652 * recv buffers, if their numbers are below the panic value which is 4653 * presently set to 25% of the original number of rcv buffers allocated. 4654 * Return value: 4655 * IRQ_HANDLED: will be returned if IRQ was handled by this routine 4656 * IRQ_NONE: will be returned if interrupt is not from our device 4657 */ 4658 static irqreturn_t s2io_isr(int irq, void *dev_id) 4659 { 4660 struct net_device *dev = (struct net_device *)dev_id; 4661 struct s2io_nic *sp = netdev_priv(dev); 4662 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4663 int i; 4664 u64 reason = 0; 4665 struct mac_info *mac_control; 4666 struct config_param *config; 4667 4668 /* Pretend we handled any irq's from a disconnected card */ 4669 if (pci_channel_offline(sp->pdev)) 4670 return IRQ_NONE; 4671 4672 if (!is_s2io_card_up(sp)) 4673 return IRQ_NONE; 4674 4675 config = &sp->config; 4676 mac_control = &sp->mac_control; 4677 4678 /* 4679 * Identify the cause for interrupt and call the appropriate 4680 * interrupt handler. Causes for the interrupt could be; 4681 * 1. Rx of packet. 4682 * 2. Tx complete. 4683 * 3. Link down. 4684 */ 4685 reason = readq(&bar0->general_int_status); 4686 4687 if (unlikely(reason == S2IO_MINUS_ONE)) 4688 return IRQ_HANDLED; /* Nothing much can be done. Get out */ 4689 4690 if (reason & 4691 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) { 4692 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); 4693 4694 if (config->napi) { 4695 if (reason & GEN_INTR_RXTRAFFIC) { 4696 napi_schedule(&sp->napi); 4697 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); 4698 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4699 readl(&bar0->rx_traffic_int); 4700 } 4701 } else { 4702 /* 4703 * rx_traffic_int reg is an R1 register, writing all 1's 4704 * will ensure that the actual interrupt causing bit 4705 * get's cleared and hence a read can be avoided. 4706 */ 4707 if (reason & GEN_INTR_RXTRAFFIC) 4708 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4709 4710 for (i = 0; i < config->rx_ring_num; i++) { 4711 struct ring_info *ring = &mac_control->rings[i]; 4712 4713 rx_intr_handler(ring, 0); 4714 } 4715 } 4716 4717 /* 4718 * tx_traffic_int reg is an R1 register, writing all 1's 4719 * will ensure that the actual interrupt causing bit get's 4720 * cleared and hence a read can be avoided. 4721 */ 4722 if (reason & GEN_INTR_TXTRAFFIC) 4723 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); 4724 4725 for (i = 0; i < config->tx_fifo_num; i++) 4726 tx_intr_handler(&mac_control->fifos[i]); 4727 4728 if (reason & GEN_INTR_TXPIC) 4729 s2io_txpic_intr_handle(sp); 4730 4731 /* 4732 * Reallocate the buffers from the interrupt handler itself. 4733 */ 4734 if (!config->napi) { 4735 for (i = 0; i < config->rx_ring_num; i++) { 4736 struct ring_info *ring = &mac_control->rings[i]; 4737 4738 s2io_chk_rx_buffers(sp, ring); 4739 } 4740 } 4741 writeq(sp->general_int_mask, &bar0->general_int_mask); 4742 readl(&bar0->general_int_status); 4743 4744 return IRQ_HANDLED; 4745 4746 } else if (!reason) { 4747 /* The interrupt was not raised by us */ 4748 return IRQ_NONE; 4749 } 4750 4751 return IRQ_HANDLED; 4752 } 4753 4754 /* 4755 * s2io_updt_stats - 4756 */ 4757 static void s2io_updt_stats(struct s2io_nic *sp) 4758 { 4759 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4760 u64 val64; 4761 int cnt = 0; 4762 4763 if (is_s2io_card_up(sp)) { 4764 /* Apprx 30us on a 133 MHz bus */ 4765 val64 = SET_UPDT_CLICKS(10) | 4766 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN; 4767 writeq(val64, &bar0->stat_cfg); 4768 do { 4769 udelay(100); 4770 val64 = readq(&bar0->stat_cfg); 4771 if (!(val64 & s2BIT(0))) 4772 break; 4773 cnt++; 4774 if (cnt == 5) 4775 break; /* Updt failed */ 4776 } while (1); 4777 } 4778 } 4779 4780 /** 4781 * s2io_get_stats - Updates the device statistics structure. 4782 * @dev : pointer to the device structure. 4783 * Description: 4784 * This function updates the device statistics structure in the s2io_nic 4785 * structure and returns a pointer to the same. 4786 * Return value: 4787 * pointer to the updated net_device_stats structure. 4788 */ 4789 static struct net_device_stats *s2io_get_stats(struct net_device *dev) 4790 { 4791 struct s2io_nic *sp = netdev_priv(dev); 4792 struct mac_info *mac_control = &sp->mac_control; 4793 struct stat_block *stats = mac_control->stats_info; 4794 u64 delta; 4795 4796 /* Configure Stats for immediate updt */ 4797 s2io_updt_stats(sp); 4798 4799 /* A device reset will cause the on-adapter statistics to be zero'ed. 4800 * This can be done while running by changing the MTU. To prevent the 4801 * system from having the stats zero'ed, the driver keeps a copy of the 4802 * last update to the system (which is also zero'ed on reset). This 4803 * enables the driver to accurately know the delta between the last 4804 * update and the current update. 4805 */ 4806 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 | 4807 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets; 4808 sp->stats.rx_packets += delta; 4809 dev->stats.rx_packets += delta; 4810 4811 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 | 4812 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets; 4813 sp->stats.tx_packets += delta; 4814 dev->stats.tx_packets += delta; 4815 4816 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 | 4817 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes; 4818 sp->stats.rx_bytes += delta; 4819 dev->stats.rx_bytes += delta; 4820 4821 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 | 4822 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes; 4823 sp->stats.tx_bytes += delta; 4824 dev->stats.tx_bytes += delta; 4825 4826 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors; 4827 sp->stats.rx_errors += delta; 4828 dev->stats.rx_errors += delta; 4829 4830 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 | 4831 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors; 4832 sp->stats.tx_errors += delta; 4833 dev->stats.tx_errors += delta; 4834 4835 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped; 4836 sp->stats.rx_dropped += delta; 4837 dev->stats.rx_dropped += delta; 4838 4839 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped; 4840 sp->stats.tx_dropped += delta; 4841 dev->stats.tx_dropped += delta; 4842 4843 /* The adapter MAC interprets pause frames as multicast packets, but 4844 * does not pass them up. This erroneously increases the multicast 4845 * packet count and needs to be deducted when the multicast frame count 4846 * is queried. 4847 */ 4848 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 | 4849 le32_to_cpu(stats->rmac_vld_mcst_frms); 4850 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms); 4851 delta -= sp->stats.multicast; 4852 sp->stats.multicast += delta; 4853 dev->stats.multicast += delta; 4854 4855 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 | 4856 le32_to_cpu(stats->rmac_usized_frms)) + 4857 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors; 4858 sp->stats.rx_length_errors += delta; 4859 dev->stats.rx_length_errors += delta; 4860 4861 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors; 4862 sp->stats.rx_crc_errors += delta; 4863 dev->stats.rx_crc_errors += delta; 4864 4865 return &dev->stats; 4866 } 4867 4868 /** 4869 * s2io_set_multicast - entry point for multicast address enable/disable. 4870 * @dev : pointer to the device structure 4871 * Description: 4872 * This function is a driver entry point which gets called by the kernel 4873 * whenever multicast addresses must be enabled/disabled. This also gets 4874 * called to set/reset promiscuous mode. Depending on the deivce flag, we 4875 * determine, if multicast address must be enabled or if promiscuous mode 4876 * is to be disabled etc. 4877 * Return value: 4878 * void. 4879 */ 4880 static void s2io_set_multicast(struct net_device *dev, bool may_sleep) 4881 { 4882 int i, j, prev_cnt; 4883 struct netdev_hw_addr *ha; 4884 struct s2io_nic *sp = netdev_priv(dev); 4885 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4886 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = 4887 0xfeffffffffffULL; 4888 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0; 4889 void __iomem *add; 4890 struct config_param *config = &sp->config; 4891 4892 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) { 4893 /* Enable all Multicast addresses */ 4894 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac), 4895 &bar0->rmac_addr_data0_mem); 4896 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask), 4897 &bar0->rmac_addr_data1_mem); 4898 val64 = RMAC_ADDR_CMD_MEM_WE | 4899 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4900 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1); 4901 writeq(val64, &bar0->rmac_addr_cmd_mem); 4902 /* Wait till command completes */ 4903 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4904 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4905 S2IO_BIT_RESET, may_sleep); 4906 4907 sp->m_cast_flg = 1; 4908 sp->all_multi_pos = config->max_mc_addr - 1; 4909 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) { 4910 /* Disable all Multicast addresses */ 4911 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), 4912 &bar0->rmac_addr_data0_mem); 4913 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0), 4914 &bar0->rmac_addr_data1_mem); 4915 val64 = RMAC_ADDR_CMD_MEM_WE | 4916 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4917 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos); 4918 writeq(val64, &bar0->rmac_addr_cmd_mem); 4919 /* Wait till command completes */ 4920 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 4921 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4922 S2IO_BIT_RESET, may_sleep); 4923 4924 sp->m_cast_flg = 0; 4925 sp->all_multi_pos = 0; 4926 } 4927 4928 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) { 4929 /* Put the NIC into promiscuous mode */ 4930 add = &bar0->mac_cfg; 4931 val64 = readq(&bar0->mac_cfg); 4932 val64 |= MAC_CFG_RMAC_PROM_ENABLE; 4933 4934 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 4935 writel((u32)val64, add); 4936 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 4937 writel((u32) (val64 >> 32), (add + 4)); 4938 4939 if (vlan_tag_strip != 1) { 4940 val64 = readq(&bar0->rx_pa_cfg); 4941 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; 4942 writeq(val64, &bar0->rx_pa_cfg); 4943 sp->vlan_strip_flag = 0; 4944 } 4945 4946 val64 = readq(&bar0->mac_cfg); 4947 sp->promisc_flg = 1; 4948 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n", 4949 dev->name); 4950 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) { 4951 /* Remove the NIC from promiscuous mode */ 4952 add = &bar0->mac_cfg; 4953 val64 = readq(&bar0->mac_cfg); 4954 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE; 4955 4956 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 4957 writel((u32)val64, add); 4958 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 4959 writel((u32) (val64 >> 32), (add + 4)); 4960 4961 if (vlan_tag_strip != 0) { 4962 val64 = readq(&bar0->rx_pa_cfg); 4963 val64 |= RX_PA_CFG_STRIP_VLAN_TAG; 4964 writeq(val64, &bar0->rx_pa_cfg); 4965 sp->vlan_strip_flag = 1; 4966 } 4967 4968 val64 = readq(&bar0->mac_cfg); 4969 sp->promisc_flg = 0; 4970 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name); 4971 } 4972 4973 /* Update individual M_CAST address list */ 4974 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) { 4975 if (netdev_mc_count(dev) > 4976 (config->max_mc_addr - config->max_mac_addr)) { 4977 DBG_PRINT(ERR_DBG, 4978 "%s: No more Rx filters can be added - " 4979 "please enable ALL_MULTI instead\n", 4980 dev->name); 4981 return; 4982 } 4983 4984 prev_cnt = sp->mc_addr_count; 4985 sp->mc_addr_count = netdev_mc_count(dev); 4986 4987 /* Clear out the previous list of Mc in the H/W. */ 4988 for (i = 0; i < prev_cnt; i++) { 4989 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), 4990 &bar0->rmac_addr_data0_mem); 4991 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), 4992 &bar0->rmac_addr_data1_mem); 4993 val64 = RMAC_ADDR_CMD_MEM_WE | 4994 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4995 RMAC_ADDR_CMD_MEM_OFFSET 4996 (config->mc_start_offset + i); 4997 writeq(val64, &bar0->rmac_addr_cmd_mem); 4998 4999 /* Wait for command completes */ 5000 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 5001 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 5002 S2IO_BIT_RESET, may_sleep)) { 5003 DBG_PRINT(ERR_DBG, 5004 "%s: Adding Multicasts failed\n", 5005 dev->name); 5006 return; 5007 } 5008 } 5009 5010 /* Create the new Rx filter list and update the same in H/W. */ 5011 i = 0; 5012 netdev_for_each_mc_addr(ha, dev) { 5013 mac_addr = 0; 5014 for (j = 0; j < ETH_ALEN; j++) { 5015 mac_addr |= ha->addr[j]; 5016 mac_addr <<= 8; 5017 } 5018 mac_addr >>= 8; 5019 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr), 5020 &bar0->rmac_addr_data0_mem); 5021 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), 5022 &bar0->rmac_addr_data1_mem); 5023 val64 = RMAC_ADDR_CMD_MEM_WE | 5024 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 5025 RMAC_ADDR_CMD_MEM_OFFSET 5026 (i + config->mc_start_offset); 5027 writeq(val64, &bar0->rmac_addr_cmd_mem); 5028 5029 /* Wait for command completes */ 5030 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 5031 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 5032 S2IO_BIT_RESET, may_sleep)) { 5033 DBG_PRINT(ERR_DBG, 5034 "%s: Adding Multicasts failed\n", 5035 dev->name); 5036 return; 5037 } 5038 i++; 5039 } 5040 } 5041 } 5042 5043 /* NDO wrapper for s2io_set_multicast */ 5044 static void s2io_ndo_set_multicast(struct net_device *dev) 5045 { 5046 s2io_set_multicast(dev, false); 5047 } 5048 5049 /* read from CAM unicast & multicast addresses and store it in 5050 * def_mac_addr structure 5051 */ 5052 static void do_s2io_store_unicast_mc(struct s2io_nic *sp) 5053 { 5054 int offset; 5055 u64 mac_addr = 0x0; 5056 struct config_param *config = &sp->config; 5057 5058 /* store unicast & multicast mac addresses */ 5059 for (offset = 0; offset < config->max_mc_addr; offset++) { 5060 mac_addr = do_s2io_read_unicast_mc(sp, offset); 5061 /* if read fails disable the entry */ 5062 if (mac_addr == FAILURE) 5063 mac_addr = S2IO_DISABLE_MAC_ENTRY; 5064 do_s2io_copy_mac_addr(sp, offset, mac_addr); 5065 } 5066 } 5067 5068 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */ 5069 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp) 5070 { 5071 int offset; 5072 struct config_param *config = &sp->config; 5073 /* restore unicast mac address */ 5074 for (offset = 0; offset < config->max_mac_addr; offset++) 5075 do_s2io_prog_unicast(sp->dev, 5076 sp->def_mac_addr[offset].mac_addr); 5077 5078 /* restore multicast mac address */ 5079 for (offset = config->mc_start_offset; 5080 offset < config->max_mc_addr; offset++) 5081 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr); 5082 } 5083 5084 /* add a multicast MAC address to CAM */ 5085 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr) 5086 { 5087 int i; 5088 u64 mac_addr = 0; 5089 struct config_param *config = &sp->config; 5090 5091 for (i = 0; i < ETH_ALEN; i++) { 5092 mac_addr <<= 8; 5093 mac_addr |= addr[i]; 5094 } 5095 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY)) 5096 return SUCCESS; 5097 5098 /* check if the multicast mac already preset in CAM */ 5099 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) { 5100 u64 tmp64; 5101 tmp64 = do_s2io_read_unicast_mc(sp, i); 5102 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */ 5103 break; 5104 5105 if (tmp64 == mac_addr) 5106 return SUCCESS; 5107 } 5108 if (i == config->max_mc_addr) { 5109 DBG_PRINT(ERR_DBG, 5110 "CAM full no space left for multicast MAC\n"); 5111 return FAILURE; 5112 } 5113 /* Update the internal structure with this new mac address */ 5114 do_s2io_copy_mac_addr(sp, i, mac_addr); 5115 5116 return do_s2io_add_mac(sp, mac_addr, i); 5117 } 5118 5119 /* add MAC address to CAM */ 5120 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off) 5121 { 5122 u64 val64; 5123 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5124 5125 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr), 5126 &bar0->rmac_addr_data0_mem); 5127 5128 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 5129 RMAC_ADDR_CMD_MEM_OFFSET(off); 5130 writeq(val64, &bar0->rmac_addr_cmd_mem); 5131 5132 /* Wait till command completes */ 5133 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 5134 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 5135 S2IO_BIT_RESET, true)) { 5136 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n"); 5137 return FAILURE; 5138 } 5139 return SUCCESS; 5140 } 5141 /* deletes a specified unicast/multicast mac entry from CAM */ 5142 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr) 5143 { 5144 int offset; 5145 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64; 5146 struct config_param *config = &sp->config; 5147 5148 for (offset = 1; 5149 offset < config->max_mc_addr; offset++) { 5150 tmp64 = do_s2io_read_unicast_mc(sp, offset); 5151 if (tmp64 == addr) { 5152 /* disable the entry by writing 0xffffffffffffULL */ 5153 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE) 5154 return FAILURE; 5155 /* store the new mac list from CAM */ 5156 do_s2io_store_unicast_mc(sp); 5157 return SUCCESS; 5158 } 5159 } 5160 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n", 5161 (unsigned long long)addr); 5162 return FAILURE; 5163 } 5164 5165 /* read mac entries from CAM */ 5166 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset) 5167 { 5168 u64 tmp64, val64; 5169 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5170 5171 /* read mac addr */ 5172 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 5173 RMAC_ADDR_CMD_MEM_OFFSET(offset); 5174 writeq(val64, &bar0->rmac_addr_cmd_mem); 5175 5176 /* Wait till command completes */ 5177 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 5178 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 5179 S2IO_BIT_RESET, true)) { 5180 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n"); 5181 return FAILURE; 5182 } 5183 tmp64 = readq(&bar0->rmac_addr_data0_mem); 5184 5185 return tmp64 >> 16; 5186 } 5187 5188 /* 5189 * s2io_set_mac_addr - driver entry point 5190 */ 5191 5192 static int s2io_set_mac_addr(struct net_device *dev, void *p) 5193 { 5194 struct sockaddr *addr = p; 5195 5196 if (!is_valid_ether_addr(addr->sa_data)) 5197 return -EADDRNOTAVAIL; 5198 5199 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5200 5201 /* store the MAC address in CAM */ 5202 return do_s2io_prog_unicast(dev, dev->dev_addr); 5203 } 5204 /** 5205 * do_s2io_prog_unicast - Programs the Xframe mac address 5206 * @dev : pointer to the device structure. 5207 * @addr: a uchar pointer to the new mac address which is to be set. 5208 * Description : This procedure will program the Xframe to receive 5209 * frames with new Mac Address 5210 * Return value: SUCCESS on success and an appropriate (-)ve integer 5211 * as defined in errno.h file on failure. 5212 */ 5213 5214 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) 5215 { 5216 struct s2io_nic *sp = netdev_priv(dev); 5217 register u64 mac_addr = 0, perm_addr = 0; 5218 int i; 5219 u64 tmp64; 5220 struct config_param *config = &sp->config; 5221 5222 /* 5223 * Set the new MAC address as the new unicast filter and reflect this 5224 * change on the device address registered with the OS. It will be 5225 * at offset 0. 5226 */ 5227 for (i = 0; i < ETH_ALEN; i++) { 5228 mac_addr <<= 8; 5229 mac_addr |= addr[i]; 5230 perm_addr <<= 8; 5231 perm_addr |= sp->def_mac_addr[0].mac_addr[i]; 5232 } 5233 5234 /* check if the dev_addr is different than perm_addr */ 5235 if (mac_addr == perm_addr) 5236 return SUCCESS; 5237 5238 /* check if the mac already preset in CAM */ 5239 for (i = 1; i < config->max_mac_addr; i++) { 5240 tmp64 = do_s2io_read_unicast_mc(sp, i); 5241 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */ 5242 break; 5243 5244 if (tmp64 == mac_addr) { 5245 DBG_PRINT(INFO_DBG, 5246 "MAC addr:0x%llx already present in CAM\n", 5247 (unsigned long long)mac_addr); 5248 return SUCCESS; 5249 } 5250 } 5251 if (i == config->max_mac_addr) { 5252 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n"); 5253 return FAILURE; 5254 } 5255 /* Update the internal structure with this new mac address */ 5256 do_s2io_copy_mac_addr(sp, i, mac_addr); 5257 5258 return do_s2io_add_mac(sp, mac_addr, i); 5259 } 5260 5261 /** 5262 * s2io_ethtool_set_link_ksettings - Sets different link parameters. 5263 * @dev : pointer to netdev 5264 * @cmd: pointer to the structure with parameters given by ethtool to set 5265 * link information. 5266 * Description: 5267 * The function sets different link parameters provided by the user onto 5268 * the NIC. 5269 * Return value: 5270 * 0 on success. 5271 */ 5272 5273 static int 5274 s2io_ethtool_set_link_ksettings(struct net_device *dev, 5275 const struct ethtool_link_ksettings *cmd) 5276 { 5277 struct s2io_nic *sp = netdev_priv(dev); 5278 if ((cmd->base.autoneg == AUTONEG_ENABLE) || 5279 (cmd->base.speed != SPEED_10000) || 5280 (cmd->base.duplex != DUPLEX_FULL)) 5281 return -EINVAL; 5282 else { 5283 s2io_close(sp->dev); 5284 s2io_open(sp->dev); 5285 } 5286 5287 return 0; 5288 } 5289 5290 /** 5291 * s2io_ethtol_get_link_ksettings - Return link specific information. 5292 * @dev: pointer to netdev 5293 * @cmd : pointer to the structure with parameters given by ethtool 5294 * to return link information. 5295 * Description: 5296 * Returns link specific information like speed, duplex etc.. to ethtool. 5297 * Return value : 5298 * return 0 on success. 5299 */ 5300 5301 static int 5302 s2io_ethtool_get_link_ksettings(struct net_device *dev, 5303 struct ethtool_link_ksettings *cmd) 5304 { 5305 struct s2io_nic *sp = netdev_priv(dev); 5306 5307 ethtool_link_ksettings_zero_link_mode(cmd, supported); 5308 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full); 5309 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); 5310 5311 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 5312 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); 5313 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); 5314 5315 cmd->base.port = PORT_FIBRE; 5316 5317 if (netif_carrier_ok(sp->dev)) { 5318 cmd->base.speed = SPEED_10000; 5319 cmd->base.duplex = DUPLEX_FULL; 5320 } else { 5321 cmd->base.speed = SPEED_UNKNOWN; 5322 cmd->base.duplex = DUPLEX_UNKNOWN; 5323 } 5324 5325 cmd->base.autoneg = AUTONEG_DISABLE; 5326 return 0; 5327 } 5328 5329 /** 5330 * s2io_ethtool_gdrvinfo - Returns driver specific information. 5331 * @dev: pointer to netdev 5332 * @info : pointer to the structure with parameters given by ethtool to 5333 * return driver information. 5334 * Description: 5335 * Returns driver specefic information like name, version etc.. to ethtool. 5336 * Return value: 5337 * void 5338 */ 5339 5340 static void s2io_ethtool_gdrvinfo(struct net_device *dev, 5341 struct ethtool_drvinfo *info) 5342 { 5343 struct s2io_nic *sp = netdev_priv(dev); 5344 5345 strlcpy(info->driver, s2io_driver_name, sizeof(info->driver)); 5346 strlcpy(info->version, s2io_driver_version, sizeof(info->version)); 5347 strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); 5348 } 5349 5350 /** 5351 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer. 5352 * @dev: pointer to netdev 5353 * @regs : pointer to the structure with parameters given by ethtool for 5354 * dumping the registers. 5355 * @space: The input argument into which all the registers are dumped. 5356 * Description: 5357 * Dumps the entire register space of xFrame NIC into the user given 5358 * buffer area. 5359 * Return value : 5360 * void . 5361 */ 5362 5363 static void s2io_ethtool_gregs(struct net_device *dev, 5364 struct ethtool_regs *regs, void *space) 5365 { 5366 int i; 5367 u64 reg; 5368 u8 *reg_space = (u8 *)space; 5369 struct s2io_nic *sp = netdev_priv(dev); 5370 5371 regs->len = XENA_REG_SPACE; 5372 regs->version = sp->pdev->subsystem_device; 5373 5374 for (i = 0; i < regs->len; i += 8) { 5375 reg = readq(sp->bar0 + i); 5376 memcpy((reg_space + i), ®, 8); 5377 } 5378 } 5379 5380 /* 5381 * s2io_set_led - control NIC led 5382 */ 5383 static void s2io_set_led(struct s2io_nic *sp, bool on) 5384 { 5385 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5386 u16 subid = sp->pdev->subsystem_device; 5387 u64 val64; 5388 5389 if ((sp->device_type == XFRAME_II_DEVICE) || 5390 ((subid & 0xFF) >= 0x07)) { 5391 val64 = readq(&bar0->gpio_control); 5392 if (on) 5393 val64 |= GPIO_CTRL_GPIO_0; 5394 else 5395 val64 &= ~GPIO_CTRL_GPIO_0; 5396 5397 writeq(val64, &bar0->gpio_control); 5398 } else { 5399 val64 = readq(&bar0->adapter_control); 5400 if (on) 5401 val64 |= ADAPTER_LED_ON; 5402 else 5403 val64 &= ~ADAPTER_LED_ON; 5404 5405 writeq(val64, &bar0->adapter_control); 5406 } 5407 5408 } 5409 5410 /** 5411 * s2io_ethtool_set_led - To physically identify the nic on the system. 5412 * @dev : network device 5413 * @state: led setting 5414 * 5415 * Description: Used to physically identify the NIC on the system. 5416 * The Link LED will blink for a time specified by the user for 5417 * identification. 5418 * NOTE: The Link has to be Up to be able to blink the LED. Hence 5419 * identification is possible only if it's link is up. 5420 */ 5421 5422 static int s2io_ethtool_set_led(struct net_device *dev, 5423 enum ethtool_phys_id_state state) 5424 { 5425 struct s2io_nic *sp = netdev_priv(dev); 5426 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5427 u16 subid = sp->pdev->subsystem_device; 5428 5429 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) { 5430 u64 val64 = readq(&bar0->adapter_control); 5431 if (!(val64 & ADAPTER_CNTL_EN)) { 5432 pr_err("Adapter Link down, cannot blink LED\n"); 5433 return -EAGAIN; 5434 } 5435 } 5436 5437 switch (state) { 5438 case ETHTOOL_ID_ACTIVE: 5439 sp->adapt_ctrl_org = readq(&bar0->gpio_control); 5440 return 1; /* cycle on/off once per second */ 5441 5442 case ETHTOOL_ID_ON: 5443 s2io_set_led(sp, true); 5444 break; 5445 5446 case ETHTOOL_ID_OFF: 5447 s2io_set_led(sp, false); 5448 break; 5449 5450 case ETHTOOL_ID_INACTIVE: 5451 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) 5452 writeq(sp->adapt_ctrl_org, &bar0->gpio_control); 5453 } 5454 5455 return 0; 5456 } 5457 5458 static void s2io_ethtool_gringparam(struct net_device *dev, 5459 struct ethtool_ringparam *ering) 5460 { 5461 struct s2io_nic *sp = netdev_priv(dev); 5462 int i, tx_desc_count = 0, rx_desc_count = 0; 5463 5464 if (sp->rxd_mode == RXD_MODE_1) { 5465 ering->rx_max_pending = MAX_RX_DESC_1; 5466 ering->rx_jumbo_max_pending = MAX_RX_DESC_1; 5467 } else { 5468 ering->rx_max_pending = MAX_RX_DESC_2; 5469 ering->rx_jumbo_max_pending = MAX_RX_DESC_2; 5470 } 5471 5472 ering->tx_max_pending = MAX_TX_DESC; 5473 5474 for (i = 0; i < sp->config.rx_ring_num; i++) 5475 rx_desc_count += sp->config.rx_cfg[i].num_rxd; 5476 ering->rx_pending = rx_desc_count; 5477 ering->rx_jumbo_pending = rx_desc_count; 5478 5479 for (i = 0; i < sp->config.tx_fifo_num; i++) 5480 tx_desc_count += sp->config.tx_cfg[i].fifo_len; 5481 ering->tx_pending = tx_desc_count; 5482 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds); 5483 } 5484 5485 /** 5486 * s2io_ethtool_getpause_data -Pause frame frame generation and reception. 5487 * @dev: pointer to netdev 5488 * @ep : pointer to the structure with pause parameters given by ethtool. 5489 * Description: 5490 * Returns the Pause frame generation and reception capability of the NIC. 5491 * Return value: 5492 * void 5493 */ 5494 static void s2io_ethtool_getpause_data(struct net_device *dev, 5495 struct ethtool_pauseparam *ep) 5496 { 5497 u64 val64; 5498 struct s2io_nic *sp = netdev_priv(dev); 5499 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5500 5501 val64 = readq(&bar0->rmac_pause_cfg); 5502 if (val64 & RMAC_PAUSE_GEN_ENABLE) 5503 ep->tx_pause = true; 5504 if (val64 & RMAC_PAUSE_RX_ENABLE) 5505 ep->rx_pause = true; 5506 ep->autoneg = false; 5507 } 5508 5509 /** 5510 * s2io_ethtool_setpause_data - set/reset pause frame generation. 5511 * @dev: pointer to netdev 5512 * @ep : pointer to the structure with pause parameters given by ethtool. 5513 * Description: 5514 * It can be used to set or reset Pause frame generation or reception 5515 * support of the NIC. 5516 * Return value: 5517 * int, returns 0 on Success 5518 */ 5519 5520 static int s2io_ethtool_setpause_data(struct net_device *dev, 5521 struct ethtool_pauseparam *ep) 5522 { 5523 u64 val64; 5524 struct s2io_nic *sp = netdev_priv(dev); 5525 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5526 5527 val64 = readq(&bar0->rmac_pause_cfg); 5528 if (ep->tx_pause) 5529 val64 |= RMAC_PAUSE_GEN_ENABLE; 5530 else 5531 val64 &= ~RMAC_PAUSE_GEN_ENABLE; 5532 if (ep->rx_pause) 5533 val64 |= RMAC_PAUSE_RX_ENABLE; 5534 else 5535 val64 &= ~RMAC_PAUSE_RX_ENABLE; 5536 writeq(val64, &bar0->rmac_pause_cfg); 5537 return 0; 5538 } 5539 5540 #define S2IO_DEV_ID 5 5541 /** 5542 * read_eeprom - reads 4 bytes of data from user given offset. 5543 * @sp : private member of the device structure, which is a pointer to the 5544 * s2io_nic structure. 5545 * @off : offset at which the data must be written 5546 * @data : Its an output parameter where the data read at the given 5547 * offset is stored. 5548 * Description: 5549 * Will read 4 bytes of data from the user given offset and return the 5550 * read data. 5551 * NOTE: Will allow to read only part of the EEPROM visible through the 5552 * I2C bus. 5553 * Return value: 5554 * -1 on failure and 0 on success. 5555 */ 5556 static int read_eeprom(struct s2io_nic *sp, int off, u64 *data) 5557 { 5558 int ret = -1; 5559 u32 exit_cnt = 0; 5560 u64 val64; 5561 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5562 5563 if (sp->device_type == XFRAME_I_DEVICE) { 5564 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | 5565 I2C_CONTROL_ADDR(off) | 5566 I2C_CONTROL_BYTE_CNT(0x3) | 5567 I2C_CONTROL_READ | 5568 I2C_CONTROL_CNTL_START; 5569 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); 5570 5571 while (exit_cnt < 5) { 5572 val64 = readq(&bar0->i2c_control); 5573 if (I2C_CONTROL_CNTL_END(val64)) { 5574 *data = I2C_CONTROL_GET_DATA(val64); 5575 ret = 0; 5576 break; 5577 } 5578 msleep(50); 5579 exit_cnt++; 5580 } 5581 } 5582 5583 if (sp->device_type == XFRAME_II_DEVICE) { 5584 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 | 5585 SPI_CONTROL_BYTECNT(0x3) | 5586 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off); 5587 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); 5588 val64 |= SPI_CONTROL_REQ; 5589 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); 5590 while (exit_cnt < 5) { 5591 val64 = readq(&bar0->spi_control); 5592 if (val64 & SPI_CONTROL_NACK) { 5593 ret = 1; 5594 break; 5595 } else if (val64 & SPI_CONTROL_DONE) { 5596 *data = readq(&bar0->spi_data); 5597 *data &= 0xffffff; 5598 ret = 0; 5599 break; 5600 } 5601 msleep(50); 5602 exit_cnt++; 5603 } 5604 } 5605 return ret; 5606 } 5607 5608 /** 5609 * write_eeprom - actually writes the relevant part of the data value. 5610 * @sp : private member of the device structure, which is a pointer to the 5611 * s2io_nic structure. 5612 * @off : offset at which the data must be written 5613 * @data : The data that is to be written 5614 * @cnt : Number of bytes of the data that are actually to be written into 5615 * the Eeprom. (max of 3) 5616 * Description: 5617 * Actually writes the relevant part of the data value into the Eeprom 5618 * through the I2C bus. 5619 * Return value: 5620 * 0 on success, -1 on failure. 5621 */ 5622 5623 static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt) 5624 { 5625 int exit_cnt = 0, ret = -1; 5626 u64 val64; 5627 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5628 5629 if (sp->device_type == XFRAME_I_DEVICE) { 5630 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | 5631 I2C_CONTROL_ADDR(off) | 5632 I2C_CONTROL_BYTE_CNT(cnt) | 5633 I2C_CONTROL_SET_DATA((u32)data) | 5634 I2C_CONTROL_CNTL_START; 5635 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); 5636 5637 while (exit_cnt < 5) { 5638 val64 = readq(&bar0->i2c_control); 5639 if (I2C_CONTROL_CNTL_END(val64)) { 5640 if (!(val64 & I2C_CONTROL_NACK)) 5641 ret = 0; 5642 break; 5643 } 5644 msleep(50); 5645 exit_cnt++; 5646 } 5647 } 5648 5649 if (sp->device_type == XFRAME_II_DEVICE) { 5650 int write_cnt = (cnt == 8) ? 0 : cnt; 5651 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data); 5652 5653 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 | 5654 SPI_CONTROL_BYTECNT(write_cnt) | 5655 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off); 5656 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); 5657 val64 |= SPI_CONTROL_REQ; 5658 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); 5659 while (exit_cnt < 5) { 5660 val64 = readq(&bar0->spi_control); 5661 if (val64 & SPI_CONTROL_NACK) { 5662 ret = 1; 5663 break; 5664 } else if (val64 & SPI_CONTROL_DONE) { 5665 ret = 0; 5666 break; 5667 } 5668 msleep(50); 5669 exit_cnt++; 5670 } 5671 } 5672 return ret; 5673 } 5674 static void s2io_vpd_read(struct s2io_nic *nic) 5675 { 5676 u8 *vpd_data; 5677 u8 data; 5678 int i = 0, cnt, len, fail = 0; 5679 int vpd_addr = 0x80; 5680 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat; 5681 5682 if (nic->device_type == XFRAME_II_DEVICE) { 5683 strcpy(nic->product_name, "Xframe II 10GbE network adapter"); 5684 vpd_addr = 0x80; 5685 } else { 5686 strcpy(nic->product_name, "Xframe I 10GbE network adapter"); 5687 vpd_addr = 0x50; 5688 } 5689 strcpy(nic->serial_num, "NOT AVAILABLE"); 5690 5691 vpd_data = kmalloc(256, GFP_KERNEL); 5692 if (!vpd_data) { 5693 swstats->mem_alloc_fail_cnt++; 5694 return; 5695 } 5696 swstats->mem_allocated += 256; 5697 5698 for (i = 0; i < 256; i += 4) { 5699 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); 5700 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); 5701 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0); 5702 for (cnt = 0; cnt < 5; cnt++) { 5703 msleep(2); 5704 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data); 5705 if (data == 0x80) 5706 break; 5707 } 5708 if (cnt >= 5) { 5709 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n"); 5710 fail = 1; 5711 break; 5712 } 5713 pci_read_config_dword(nic->pdev, (vpd_addr + 4), 5714 (u32 *)&vpd_data[i]); 5715 } 5716 5717 if (!fail) { 5718 /* read serial number of adapter */ 5719 for (cnt = 0; cnt < 252; cnt++) { 5720 if ((vpd_data[cnt] == 'S') && 5721 (vpd_data[cnt+1] == 'N')) { 5722 len = vpd_data[cnt+2]; 5723 if (len < min(VPD_STRING_LEN, 256-cnt-2)) { 5724 memcpy(nic->serial_num, 5725 &vpd_data[cnt + 3], 5726 len); 5727 memset(nic->serial_num+len, 5728 0, 5729 VPD_STRING_LEN-len); 5730 break; 5731 } 5732 } 5733 } 5734 } 5735 5736 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) { 5737 len = vpd_data[1]; 5738 memcpy(nic->product_name, &vpd_data[3], len); 5739 nic->product_name[len] = 0; 5740 } 5741 kfree(vpd_data); 5742 swstats->mem_freed += 256; 5743 } 5744 5745 /** 5746 * s2io_ethtool_geeprom - reads the value stored in the Eeprom. 5747 * @dev: pointer to netdev 5748 * @eeprom : pointer to the user level structure provided by ethtool, 5749 * containing all relevant information. 5750 * @data_buf : user defined value to be written into Eeprom. 5751 * Description: Reads the values stored in the Eeprom at given offset 5752 * for a given length. Stores these values int the input argument data 5753 * buffer 'data_buf' and returns these to the caller (ethtool.) 5754 * Return value: 5755 * int 0 on success 5756 */ 5757 5758 static int s2io_ethtool_geeprom(struct net_device *dev, 5759 struct ethtool_eeprom *eeprom, u8 * data_buf) 5760 { 5761 u32 i, valid; 5762 u64 data; 5763 struct s2io_nic *sp = netdev_priv(dev); 5764 5765 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16); 5766 5767 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE)) 5768 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset; 5769 5770 for (i = 0; i < eeprom->len; i += 4) { 5771 if (read_eeprom(sp, (eeprom->offset + i), &data)) { 5772 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n"); 5773 return -EFAULT; 5774 } 5775 valid = INV(data); 5776 memcpy((data_buf + i), &valid, 4); 5777 } 5778 return 0; 5779 } 5780 5781 /** 5782 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom 5783 * @dev: pointer to netdev 5784 * @eeprom : pointer to the user level structure provided by ethtool, 5785 * containing all relevant information. 5786 * @data_buf : user defined value to be written into Eeprom. 5787 * Description: 5788 * Tries to write the user provided value in the Eeprom, at the offset 5789 * given by the user. 5790 * Return value: 5791 * 0 on success, -EFAULT on failure. 5792 */ 5793 5794 static int s2io_ethtool_seeprom(struct net_device *dev, 5795 struct ethtool_eeprom *eeprom, 5796 u8 *data_buf) 5797 { 5798 int len = eeprom->len, cnt = 0; 5799 u64 valid = 0, data; 5800 struct s2io_nic *sp = netdev_priv(dev); 5801 5802 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { 5803 DBG_PRINT(ERR_DBG, 5804 "ETHTOOL_WRITE_EEPROM Err: " 5805 "Magic value is wrong, it is 0x%x should be 0x%x\n", 5806 (sp->pdev->vendor | (sp->pdev->device << 16)), 5807 eeprom->magic); 5808 return -EFAULT; 5809 } 5810 5811 while (len) { 5812 data = (u32)data_buf[cnt] & 0x000000FF; 5813 if (data) 5814 valid = (u32)(data << 24); 5815 else 5816 valid = data; 5817 5818 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) { 5819 DBG_PRINT(ERR_DBG, 5820 "ETHTOOL_WRITE_EEPROM Err: " 5821 "Cannot write into the specified offset\n"); 5822 return -EFAULT; 5823 } 5824 cnt++; 5825 len--; 5826 } 5827 5828 return 0; 5829 } 5830 5831 /** 5832 * s2io_register_test - reads and writes into all clock domains. 5833 * @sp : private member of the device structure, which is a pointer to the 5834 * s2io_nic structure. 5835 * @data : variable that returns the result of each of the test conducted b 5836 * by the driver. 5837 * Description: 5838 * Read and write into all clock domains. The NIC has 3 clock domains, 5839 * see that registers in all the three regions are accessible. 5840 * Return value: 5841 * 0 on success. 5842 */ 5843 5844 static int s2io_register_test(struct s2io_nic *sp, uint64_t *data) 5845 { 5846 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5847 u64 val64 = 0, exp_val; 5848 int fail = 0; 5849 5850 val64 = readq(&bar0->pif_rd_swapper_fb); 5851 if (val64 != 0x123456789abcdefULL) { 5852 fail = 1; 5853 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1); 5854 } 5855 5856 val64 = readq(&bar0->rmac_pause_cfg); 5857 if (val64 != 0xc000ffff00000000ULL) { 5858 fail = 1; 5859 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2); 5860 } 5861 5862 val64 = readq(&bar0->rx_queue_cfg); 5863 if (sp->device_type == XFRAME_II_DEVICE) 5864 exp_val = 0x0404040404040404ULL; 5865 else 5866 exp_val = 0x0808080808080808ULL; 5867 if (val64 != exp_val) { 5868 fail = 1; 5869 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3); 5870 } 5871 5872 val64 = readq(&bar0->xgxs_efifo_cfg); 5873 if (val64 != 0x000000001923141EULL) { 5874 fail = 1; 5875 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4); 5876 } 5877 5878 val64 = 0x5A5A5A5A5A5A5A5AULL; 5879 writeq(val64, &bar0->xmsi_data); 5880 val64 = readq(&bar0->xmsi_data); 5881 if (val64 != 0x5A5A5A5A5A5A5A5AULL) { 5882 fail = 1; 5883 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1); 5884 } 5885 5886 val64 = 0xA5A5A5A5A5A5A5A5ULL; 5887 writeq(val64, &bar0->xmsi_data); 5888 val64 = readq(&bar0->xmsi_data); 5889 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) { 5890 fail = 1; 5891 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2); 5892 } 5893 5894 *data = fail; 5895 return fail; 5896 } 5897 5898 /** 5899 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed. 5900 * @sp : private member of the device structure, which is a pointer to the 5901 * s2io_nic structure. 5902 * @data:variable that returns the result of each of the test conducted by 5903 * the driver. 5904 * Description: 5905 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL 5906 * register. 5907 * Return value: 5908 * 0 on success. 5909 */ 5910 5911 static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data) 5912 { 5913 int fail = 0; 5914 u64 ret_data, org_4F0, org_7F0; 5915 u8 saved_4F0 = 0, saved_7F0 = 0; 5916 struct net_device *dev = sp->dev; 5917 5918 /* Test Write Error at offset 0 */ 5919 /* Note that SPI interface allows write access to all areas 5920 * of EEPROM. Hence doing all negative testing only for Xframe I. 5921 */ 5922 if (sp->device_type == XFRAME_I_DEVICE) 5923 if (!write_eeprom(sp, 0, 0, 3)) 5924 fail = 1; 5925 5926 /* Save current values at offsets 0x4F0 and 0x7F0 */ 5927 if (!read_eeprom(sp, 0x4F0, &org_4F0)) 5928 saved_4F0 = 1; 5929 if (!read_eeprom(sp, 0x7F0, &org_7F0)) 5930 saved_7F0 = 1; 5931 5932 /* Test Write at offset 4f0 */ 5933 if (write_eeprom(sp, 0x4F0, 0x012345, 3)) 5934 fail = 1; 5935 if (read_eeprom(sp, 0x4F0, &ret_data)) 5936 fail = 1; 5937 5938 if (ret_data != 0x012345) { 5939 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. " 5940 "Data written %llx Data read %llx\n", 5941 dev->name, (unsigned long long)0x12345, 5942 (unsigned long long)ret_data); 5943 fail = 1; 5944 } 5945 5946 /* Reset the EEPROM data go FFFF */ 5947 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3); 5948 5949 /* Test Write Request Error at offset 0x7c */ 5950 if (sp->device_type == XFRAME_I_DEVICE) 5951 if (!write_eeprom(sp, 0x07C, 0, 3)) 5952 fail = 1; 5953 5954 /* Test Write Request at offset 0x7f0 */ 5955 if (write_eeprom(sp, 0x7F0, 0x012345, 3)) 5956 fail = 1; 5957 if (read_eeprom(sp, 0x7F0, &ret_data)) 5958 fail = 1; 5959 5960 if (ret_data != 0x012345) { 5961 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. " 5962 "Data written %llx Data read %llx\n", 5963 dev->name, (unsigned long long)0x12345, 5964 (unsigned long long)ret_data); 5965 fail = 1; 5966 } 5967 5968 /* Reset the EEPROM data go FFFF */ 5969 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3); 5970 5971 if (sp->device_type == XFRAME_I_DEVICE) { 5972 /* Test Write Error at offset 0x80 */ 5973 if (!write_eeprom(sp, 0x080, 0, 3)) 5974 fail = 1; 5975 5976 /* Test Write Error at offset 0xfc */ 5977 if (!write_eeprom(sp, 0x0FC, 0, 3)) 5978 fail = 1; 5979 5980 /* Test Write Error at offset 0x100 */ 5981 if (!write_eeprom(sp, 0x100, 0, 3)) 5982 fail = 1; 5983 5984 /* Test Write Error at offset 4ec */ 5985 if (!write_eeprom(sp, 0x4EC, 0, 3)) 5986 fail = 1; 5987 } 5988 5989 /* Restore values at offsets 0x4F0 and 0x7F0 */ 5990 if (saved_4F0) 5991 write_eeprom(sp, 0x4F0, org_4F0, 3); 5992 if (saved_7F0) 5993 write_eeprom(sp, 0x7F0, org_7F0, 3); 5994 5995 *data = fail; 5996 return fail; 5997 } 5998 5999 /** 6000 * s2io_bist_test - invokes the MemBist test of the card . 6001 * @sp : private member of the device structure, which is a pointer to the 6002 * s2io_nic structure. 6003 * @data:variable that returns the result of each of the test conducted by 6004 * the driver. 6005 * Description: 6006 * This invokes the MemBist test of the card. We give around 6007 * 2 secs time for the Test to complete. If it's still not complete 6008 * within this peiod, we consider that the test failed. 6009 * Return value: 6010 * 0 on success and -1 on failure. 6011 */ 6012 6013 static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data) 6014 { 6015 u8 bist = 0; 6016 int cnt = 0, ret = -1; 6017 6018 pci_read_config_byte(sp->pdev, PCI_BIST, &bist); 6019 bist |= PCI_BIST_START; 6020 pci_write_config_word(sp->pdev, PCI_BIST, bist); 6021 6022 while (cnt < 20) { 6023 pci_read_config_byte(sp->pdev, PCI_BIST, &bist); 6024 if (!(bist & PCI_BIST_START)) { 6025 *data = (bist & PCI_BIST_CODE_MASK); 6026 ret = 0; 6027 break; 6028 } 6029 msleep(100); 6030 cnt++; 6031 } 6032 6033 return ret; 6034 } 6035 6036 /** 6037 * s2io_link_test - verifies the link state of the nic 6038 * @sp: private member of the device structure, which is a pointer to the 6039 * s2io_nic structure. 6040 * @data: variable that returns the result of each of the test conducted by 6041 * the driver. 6042 * Description: 6043 * The function verifies the link state of the NIC and updates the input 6044 * argument 'data' appropriately. 6045 * Return value: 6046 * 0 on success. 6047 */ 6048 6049 static int s2io_link_test(struct s2io_nic *sp, uint64_t *data) 6050 { 6051 struct XENA_dev_config __iomem *bar0 = sp->bar0; 6052 u64 val64; 6053 6054 val64 = readq(&bar0->adapter_status); 6055 if (!(LINK_IS_UP(val64))) 6056 *data = 1; 6057 else 6058 *data = 0; 6059 6060 return *data; 6061 } 6062 6063 /** 6064 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC 6065 * @sp: private member of the device structure, which is a pointer to the 6066 * s2io_nic structure. 6067 * @data: variable that returns the result of each of the test 6068 * conducted by the driver. 6069 * Description: 6070 * This is one of the offline test that tests the read and write 6071 * access to the RldRam chip on the NIC. 6072 * Return value: 6073 * 0 on success. 6074 */ 6075 6076 static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data) 6077 { 6078 struct XENA_dev_config __iomem *bar0 = sp->bar0; 6079 u64 val64; 6080 int cnt, iteration = 0, test_fail = 0; 6081 6082 val64 = readq(&bar0->adapter_control); 6083 val64 &= ~ADAPTER_ECC_EN; 6084 writeq(val64, &bar0->adapter_control); 6085 6086 val64 = readq(&bar0->mc_rldram_test_ctrl); 6087 val64 |= MC_RLDRAM_TEST_MODE; 6088 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); 6089 6090 val64 = readq(&bar0->mc_rldram_mrs); 6091 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE; 6092 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); 6093 6094 val64 |= MC_RLDRAM_MRS_ENABLE; 6095 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); 6096 6097 while (iteration < 2) { 6098 val64 = 0x55555555aaaa0000ULL; 6099 if (iteration == 1) 6100 val64 ^= 0xFFFFFFFFFFFF0000ULL; 6101 writeq(val64, &bar0->mc_rldram_test_d0); 6102 6103 val64 = 0xaaaa5a5555550000ULL; 6104 if (iteration == 1) 6105 val64 ^= 0xFFFFFFFFFFFF0000ULL; 6106 writeq(val64, &bar0->mc_rldram_test_d1); 6107 6108 val64 = 0x55aaaaaaaa5a0000ULL; 6109 if (iteration == 1) 6110 val64 ^= 0xFFFFFFFFFFFF0000ULL; 6111 writeq(val64, &bar0->mc_rldram_test_d2); 6112 6113 val64 = (u64) (0x0000003ffffe0100ULL); 6114 writeq(val64, &bar0->mc_rldram_test_add); 6115 6116 val64 = MC_RLDRAM_TEST_MODE | 6117 MC_RLDRAM_TEST_WRITE | 6118 MC_RLDRAM_TEST_GO; 6119 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); 6120 6121 for (cnt = 0; cnt < 5; cnt++) { 6122 val64 = readq(&bar0->mc_rldram_test_ctrl); 6123 if (val64 & MC_RLDRAM_TEST_DONE) 6124 break; 6125 msleep(200); 6126 } 6127 6128 if (cnt == 5) 6129 break; 6130 6131 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO; 6132 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); 6133 6134 for (cnt = 0; cnt < 5; cnt++) { 6135 val64 = readq(&bar0->mc_rldram_test_ctrl); 6136 if (val64 & MC_RLDRAM_TEST_DONE) 6137 break; 6138 msleep(500); 6139 } 6140 6141 if (cnt == 5) 6142 break; 6143 6144 val64 = readq(&bar0->mc_rldram_test_ctrl); 6145 if (!(val64 & MC_RLDRAM_TEST_PASS)) 6146 test_fail = 1; 6147 6148 iteration++; 6149 } 6150 6151 *data = test_fail; 6152 6153 /* Bring the adapter out of test mode */ 6154 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF); 6155 6156 return test_fail; 6157 } 6158 6159 /** 6160 * s2io_ethtool_test - conducts 6 tsets to determine the health of card. 6161 * @dev: pointer to netdev 6162 * @ethtest : pointer to a ethtool command specific structure that will be 6163 * returned to the user. 6164 * @data : variable that returns the result of each of the test 6165 * conducted by the driver. 6166 * Description: 6167 * This function conducts 6 tests ( 4 offline and 2 online) to determine 6168 * the health of the card. 6169 * Return value: 6170 * void 6171 */ 6172 6173 static void s2io_ethtool_test(struct net_device *dev, 6174 struct ethtool_test *ethtest, 6175 uint64_t *data) 6176 { 6177 struct s2io_nic *sp = netdev_priv(dev); 6178 int orig_state = netif_running(sp->dev); 6179 6180 if (ethtest->flags == ETH_TEST_FL_OFFLINE) { 6181 /* Offline Tests. */ 6182 if (orig_state) 6183 s2io_close(sp->dev); 6184 6185 if (s2io_register_test(sp, &data[0])) 6186 ethtest->flags |= ETH_TEST_FL_FAILED; 6187 6188 s2io_reset(sp); 6189 6190 if (s2io_rldram_test(sp, &data[3])) 6191 ethtest->flags |= ETH_TEST_FL_FAILED; 6192 6193 s2io_reset(sp); 6194 6195 if (s2io_eeprom_test(sp, &data[1])) 6196 ethtest->flags |= ETH_TEST_FL_FAILED; 6197 6198 if (s2io_bist_test(sp, &data[4])) 6199 ethtest->flags |= ETH_TEST_FL_FAILED; 6200 6201 if (orig_state) 6202 s2io_open(sp->dev); 6203 6204 data[2] = 0; 6205 } else { 6206 /* Online Tests. */ 6207 if (!orig_state) { 6208 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n", 6209 dev->name); 6210 data[0] = -1; 6211 data[1] = -1; 6212 data[2] = -1; 6213 data[3] = -1; 6214 data[4] = -1; 6215 } 6216 6217 if (s2io_link_test(sp, &data[2])) 6218 ethtest->flags |= ETH_TEST_FL_FAILED; 6219 6220 data[0] = 0; 6221 data[1] = 0; 6222 data[3] = 0; 6223 data[4] = 0; 6224 } 6225 } 6226 6227 static void s2io_get_ethtool_stats(struct net_device *dev, 6228 struct ethtool_stats *estats, 6229 u64 *tmp_stats) 6230 { 6231 int i = 0, k; 6232 struct s2io_nic *sp = netdev_priv(dev); 6233 struct stat_block *stats = sp->mac_control.stats_info; 6234 struct swStat *swstats = &stats->sw_stat; 6235 struct xpakStat *xstats = &stats->xpak_stat; 6236 6237 s2io_updt_stats(sp); 6238 tmp_stats[i++] = 6239 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 | 6240 le32_to_cpu(stats->tmac_frms); 6241 tmp_stats[i++] = 6242 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 | 6243 le32_to_cpu(stats->tmac_data_octets); 6244 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms); 6245 tmp_stats[i++] = 6246 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 | 6247 le32_to_cpu(stats->tmac_mcst_frms); 6248 tmp_stats[i++] = 6249 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 | 6250 le32_to_cpu(stats->tmac_bcst_frms); 6251 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms); 6252 tmp_stats[i++] = 6253 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 | 6254 le32_to_cpu(stats->tmac_ttl_octets); 6255 tmp_stats[i++] = 6256 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 | 6257 le32_to_cpu(stats->tmac_ucst_frms); 6258 tmp_stats[i++] = 6259 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 | 6260 le32_to_cpu(stats->tmac_nucst_frms); 6261 tmp_stats[i++] = 6262 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 | 6263 le32_to_cpu(stats->tmac_any_err_frms); 6264 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets); 6265 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets); 6266 tmp_stats[i++] = 6267 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 | 6268 le32_to_cpu(stats->tmac_vld_ip); 6269 tmp_stats[i++] = 6270 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 | 6271 le32_to_cpu(stats->tmac_drop_ip); 6272 tmp_stats[i++] = 6273 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 | 6274 le32_to_cpu(stats->tmac_icmp); 6275 tmp_stats[i++] = 6276 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 | 6277 le32_to_cpu(stats->tmac_rst_tcp); 6278 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp); 6279 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 | 6280 le32_to_cpu(stats->tmac_udp); 6281 tmp_stats[i++] = 6282 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 | 6283 le32_to_cpu(stats->rmac_vld_frms); 6284 tmp_stats[i++] = 6285 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 | 6286 le32_to_cpu(stats->rmac_data_octets); 6287 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms); 6288 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms); 6289 tmp_stats[i++] = 6290 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 | 6291 le32_to_cpu(stats->rmac_vld_mcst_frms); 6292 tmp_stats[i++] = 6293 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 | 6294 le32_to_cpu(stats->rmac_vld_bcst_frms); 6295 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms); 6296 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms); 6297 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms); 6298 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms); 6299 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms); 6300 tmp_stats[i++] = 6301 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 | 6302 le32_to_cpu(stats->rmac_ttl_octets); 6303 tmp_stats[i++] = 6304 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32 6305 | le32_to_cpu(stats->rmac_accepted_ucst_frms); 6306 tmp_stats[i++] = 6307 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow) 6308 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms); 6309 tmp_stats[i++] = 6310 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 | 6311 le32_to_cpu(stats->rmac_discarded_frms); 6312 tmp_stats[i++] = 6313 (u64)le32_to_cpu(stats->rmac_drop_events_oflow) 6314 << 32 | le32_to_cpu(stats->rmac_drop_events); 6315 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets); 6316 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms); 6317 tmp_stats[i++] = 6318 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 | 6319 le32_to_cpu(stats->rmac_usized_frms); 6320 tmp_stats[i++] = 6321 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 | 6322 le32_to_cpu(stats->rmac_osized_frms); 6323 tmp_stats[i++] = 6324 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 | 6325 le32_to_cpu(stats->rmac_frag_frms); 6326 tmp_stats[i++] = 6327 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 | 6328 le32_to_cpu(stats->rmac_jabber_frms); 6329 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms); 6330 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms); 6331 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms); 6332 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms); 6333 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms); 6334 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms); 6335 tmp_stats[i++] = 6336 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 | 6337 le32_to_cpu(stats->rmac_ip); 6338 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets); 6339 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip); 6340 tmp_stats[i++] = 6341 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 | 6342 le32_to_cpu(stats->rmac_drop_ip); 6343 tmp_stats[i++] = 6344 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 | 6345 le32_to_cpu(stats->rmac_icmp); 6346 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp); 6347 tmp_stats[i++] = 6348 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 | 6349 le32_to_cpu(stats->rmac_udp); 6350 tmp_stats[i++] = 6351 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 | 6352 le32_to_cpu(stats->rmac_err_drp_udp); 6353 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym); 6354 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0); 6355 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1); 6356 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2); 6357 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3); 6358 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4); 6359 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5); 6360 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6); 6361 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7); 6362 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0); 6363 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1); 6364 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2); 6365 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3); 6366 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4); 6367 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5); 6368 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6); 6369 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7); 6370 tmp_stats[i++] = 6371 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 | 6372 le32_to_cpu(stats->rmac_pause_cnt); 6373 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt); 6374 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt); 6375 tmp_stats[i++] = 6376 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 | 6377 le32_to_cpu(stats->rmac_accepted_ip); 6378 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp); 6379 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt); 6380 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt); 6381 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt); 6382 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt); 6383 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt); 6384 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt); 6385 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt); 6386 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt); 6387 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt); 6388 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt); 6389 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt); 6390 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt); 6391 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt); 6392 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt); 6393 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt); 6394 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt); 6395 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt); 6396 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt); 6397 6398 /* Enhanced statistics exist only for Hercules */ 6399 if (sp->device_type == XFRAME_II_DEVICE) { 6400 tmp_stats[i++] = 6401 le64_to_cpu(stats->rmac_ttl_1519_4095_frms); 6402 tmp_stats[i++] = 6403 le64_to_cpu(stats->rmac_ttl_4096_8191_frms); 6404 tmp_stats[i++] = 6405 le64_to_cpu(stats->rmac_ttl_8192_max_frms); 6406 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms); 6407 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms); 6408 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms); 6409 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms); 6410 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms); 6411 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard); 6412 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard); 6413 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard); 6414 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard); 6415 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard); 6416 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard); 6417 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard); 6418 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt); 6419 } 6420 6421 tmp_stats[i++] = 0; 6422 tmp_stats[i++] = swstats->single_ecc_errs; 6423 tmp_stats[i++] = swstats->double_ecc_errs; 6424 tmp_stats[i++] = swstats->parity_err_cnt; 6425 tmp_stats[i++] = swstats->serious_err_cnt; 6426 tmp_stats[i++] = swstats->soft_reset_cnt; 6427 tmp_stats[i++] = swstats->fifo_full_cnt; 6428 for (k = 0; k < MAX_RX_RINGS; k++) 6429 tmp_stats[i++] = swstats->ring_full_cnt[k]; 6430 tmp_stats[i++] = xstats->alarm_transceiver_temp_high; 6431 tmp_stats[i++] = xstats->alarm_transceiver_temp_low; 6432 tmp_stats[i++] = xstats->alarm_laser_bias_current_high; 6433 tmp_stats[i++] = xstats->alarm_laser_bias_current_low; 6434 tmp_stats[i++] = xstats->alarm_laser_output_power_high; 6435 tmp_stats[i++] = xstats->alarm_laser_output_power_low; 6436 tmp_stats[i++] = xstats->warn_transceiver_temp_high; 6437 tmp_stats[i++] = xstats->warn_transceiver_temp_low; 6438 tmp_stats[i++] = xstats->warn_laser_bias_current_high; 6439 tmp_stats[i++] = xstats->warn_laser_bias_current_low; 6440 tmp_stats[i++] = xstats->warn_laser_output_power_high; 6441 tmp_stats[i++] = xstats->warn_laser_output_power_low; 6442 tmp_stats[i++] = swstats->clubbed_frms_cnt; 6443 tmp_stats[i++] = swstats->sending_both; 6444 tmp_stats[i++] = swstats->outof_sequence_pkts; 6445 tmp_stats[i++] = swstats->flush_max_pkts; 6446 if (swstats->num_aggregations) { 6447 u64 tmp = swstats->sum_avg_pkts_aggregated; 6448 int count = 0; 6449 /* 6450 * Since 64-bit divide does not work on all platforms, 6451 * do repeated subtraction. 6452 */ 6453 while (tmp >= swstats->num_aggregations) { 6454 tmp -= swstats->num_aggregations; 6455 count++; 6456 } 6457 tmp_stats[i++] = count; 6458 } else 6459 tmp_stats[i++] = 0; 6460 tmp_stats[i++] = swstats->mem_alloc_fail_cnt; 6461 tmp_stats[i++] = swstats->pci_map_fail_cnt; 6462 tmp_stats[i++] = swstats->watchdog_timer_cnt; 6463 tmp_stats[i++] = swstats->mem_allocated; 6464 tmp_stats[i++] = swstats->mem_freed; 6465 tmp_stats[i++] = swstats->link_up_cnt; 6466 tmp_stats[i++] = swstats->link_down_cnt; 6467 tmp_stats[i++] = swstats->link_up_time; 6468 tmp_stats[i++] = swstats->link_down_time; 6469 6470 tmp_stats[i++] = swstats->tx_buf_abort_cnt; 6471 tmp_stats[i++] = swstats->tx_desc_abort_cnt; 6472 tmp_stats[i++] = swstats->tx_parity_err_cnt; 6473 tmp_stats[i++] = swstats->tx_link_loss_cnt; 6474 tmp_stats[i++] = swstats->tx_list_proc_err_cnt; 6475 6476 tmp_stats[i++] = swstats->rx_parity_err_cnt; 6477 tmp_stats[i++] = swstats->rx_abort_cnt; 6478 tmp_stats[i++] = swstats->rx_parity_abort_cnt; 6479 tmp_stats[i++] = swstats->rx_rda_fail_cnt; 6480 tmp_stats[i++] = swstats->rx_unkn_prot_cnt; 6481 tmp_stats[i++] = swstats->rx_fcs_err_cnt; 6482 tmp_stats[i++] = swstats->rx_buf_size_err_cnt; 6483 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt; 6484 tmp_stats[i++] = swstats->rx_unkn_err_cnt; 6485 tmp_stats[i++] = swstats->tda_err_cnt; 6486 tmp_stats[i++] = swstats->pfc_err_cnt; 6487 tmp_stats[i++] = swstats->pcc_err_cnt; 6488 tmp_stats[i++] = swstats->tti_err_cnt; 6489 tmp_stats[i++] = swstats->tpa_err_cnt; 6490 tmp_stats[i++] = swstats->sm_err_cnt; 6491 tmp_stats[i++] = swstats->lso_err_cnt; 6492 tmp_stats[i++] = swstats->mac_tmac_err_cnt; 6493 tmp_stats[i++] = swstats->mac_rmac_err_cnt; 6494 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt; 6495 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt; 6496 tmp_stats[i++] = swstats->rc_err_cnt; 6497 tmp_stats[i++] = swstats->prc_pcix_err_cnt; 6498 tmp_stats[i++] = swstats->rpa_err_cnt; 6499 tmp_stats[i++] = swstats->rda_err_cnt; 6500 tmp_stats[i++] = swstats->rti_err_cnt; 6501 tmp_stats[i++] = swstats->mc_err_cnt; 6502 } 6503 6504 static int s2io_ethtool_get_regs_len(struct net_device *dev) 6505 { 6506 return XENA_REG_SPACE; 6507 } 6508 6509 6510 static int s2io_get_eeprom_len(struct net_device *dev) 6511 { 6512 return XENA_EEPROM_SPACE; 6513 } 6514 6515 static int s2io_get_sset_count(struct net_device *dev, int sset) 6516 { 6517 struct s2io_nic *sp = netdev_priv(dev); 6518 6519 switch (sset) { 6520 case ETH_SS_TEST: 6521 return S2IO_TEST_LEN; 6522 case ETH_SS_STATS: 6523 switch (sp->device_type) { 6524 case XFRAME_I_DEVICE: 6525 return XFRAME_I_STAT_LEN; 6526 case XFRAME_II_DEVICE: 6527 return XFRAME_II_STAT_LEN; 6528 default: 6529 return 0; 6530 } 6531 default: 6532 return -EOPNOTSUPP; 6533 } 6534 } 6535 6536 static void s2io_ethtool_get_strings(struct net_device *dev, 6537 u32 stringset, u8 *data) 6538 { 6539 int stat_size = 0; 6540 struct s2io_nic *sp = netdev_priv(dev); 6541 6542 switch (stringset) { 6543 case ETH_SS_TEST: 6544 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN); 6545 break; 6546 case ETH_SS_STATS: 6547 stat_size = sizeof(ethtool_xena_stats_keys); 6548 memcpy(data, ðtool_xena_stats_keys, stat_size); 6549 if (sp->device_type == XFRAME_II_DEVICE) { 6550 memcpy(data + stat_size, 6551 ðtool_enhanced_stats_keys, 6552 sizeof(ethtool_enhanced_stats_keys)); 6553 stat_size += sizeof(ethtool_enhanced_stats_keys); 6554 } 6555 6556 memcpy(data + stat_size, ðtool_driver_stats_keys, 6557 sizeof(ethtool_driver_stats_keys)); 6558 } 6559 } 6560 6561 static int s2io_set_features(struct net_device *dev, netdev_features_t features) 6562 { 6563 struct s2io_nic *sp = netdev_priv(dev); 6564 netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO; 6565 6566 if (changed && netif_running(dev)) { 6567 int rc; 6568 6569 s2io_stop_all_tx_queue(sp); 6570 s2io_card_down(sp); 6571 dev->features = features; 6572 rc = s2io_card_up(sp); 6573 if (rc) 6574 s2io_reset(sp); 6575 else 6576 s2io_start_all_tx_queue(sp); 6577 6578 return rc ? rc : 1; 6579 } 6580 6581 return 0; 6582 } 6583 6584 static const struct ethtool_ops netdev_ethtool_ops = { 6585 .get_drvinfo = s2io_ethtool_gdrvinfo, 6586 .get_regs_len = s2io_ethtool_get_regs_len, 6587 .get_regs = s2io_ethtool_gregs, 6588 .get_link = ethtool_op_get_link, 6589 .get_eeprom_len = s2io_get_eeprom_len, 6590 .get_eeprom = s2io_ethtool_geeprom, 6591 .set_eeprom = s2io_ethtool_seeprom, 6592 .get_ringparam = s2io_ethtool_gringparam, 6593 .get_pauseparam = s2io_ethtool_getpause_data, 6594 .set_pauseparam = s2io_ethtool_setpause_data, 6595 .self_test = s2io_ethtool_test, 6596 .get_strings = s2io_ethtool_get_strings, 6597 .set_phys_id = s2io_ethtool_set_led, 6598 .get_ethtool_stats = s2io_get_ethtool_stats, 6599 .get_sset_count = s2io_get_sset_count, 6600 .get_link_ksettings = s2io_ethtool_get_link_ksettings, 6601 .set_link_ksettings = s2io_ethtool_set_link_ksettings, 6602 }; 6603 6604 /** 6605 * s2io_ioctl - Entry point for the Ioctl 6606 * @dev : Device pointer. 6607 * @rq : An IOCTL specefic structure, that can contain a pointer to 6608 * a proprietary structure used to pass information to the driver. 6609 * @cmd : This is used to distinguish between the different commands that 6610 * can be passed to the IOCTL functions. 6611 * Description: 6612 * Currently there are no special functionality supported in IOCTL, hence 6613 * function always return EOPNOTSUPPORTED 6614 */ 6615 6616 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6617 { 6618 return -EOPNOTSUPP; 6619 } 6620 6621 /** 6622 * s2io_change_mtu - entry point to change MTU size for the device. 6623 * @dev : device pointer. 6624 * @new_mtu : the new MTU size for the device. 6625 * Description: A driver entry point to change MTU size for the device. 6626 * Before changing the MTU the device must be stopped. 6627 * Return value: 6628 * 0 on success and an appropriate (-)ve integer as defined in errno.h 6629 * file on failure. 6630 */ 6631 6632 static int s2io_change_mtu(struct net_device *dev, int new_mtu) 6633 { 6634 struct s2io_nic *sp = netdev_priv(dev); 6635 int ret = 0; 6636 6637 dev->mtu = new_mtu; 6638 if (netif_running(dev)) { 6639 s2io_stop_all_tx_queue(sp); 6640 s2io_card_down(sp); 6641 ret = s2io_card_up(sp); 6642 if (ret) { 6643 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", 6644 __func__); 6645 return ret; 6646 } 6647 s2io_wake_all_tx_queue(sp); 6648 } else { /* Device is down */ 6649 struct XENA_dev_config __iomem *bar0 = sp->bar0; 6650 u64 val64 = new_mtu; 6651 6652 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); 6653 } 6654 6655 return ret; 6656 } 6657 6658 /** 6659 * s2io_set_link - Set the LInk status 6660 * @work: work struct containing a pointer to device private structue 6661 * Description: Sets the link status for the adapter 6662 */ 6663 6664 static void s2io_set_link(struct work_struct *work) 6665 { 6666 struct s2io_nic *nic = container_of(work, struct s2io_nic, 6667 set_link_task); 6668 struct net_device *dev = nic->dev; 6669 struct XENA_dev_config __iomem *bar0 = nic->bar0; 6670 register u64 val64; 6671 u16 subid; 6672 6673 rtnl_lock(); 6674 6675 if (!netif_running(dev)) 6676 goto out_unlock; 6677 6678 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) { 6679 /* The card is being reset, no point doing anything */ 6680 goto out_unlock; 6681 } 6682 6683 subid = nic->pdev->subsystem_device; 6684 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) { 6685 /* 6686 * Allow a small delay for the NICs self initiated 6687 * cleanup to complete. 6688 */ 6689 msleep(100); 6690 } 6691 6692 val64 = readq(&bar0->adapter_status); 6693 if (LINK_IS_UP(val64)) { 6694 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) { 6695 if (verify_xena_quiescence(nic)) { 6696 val64 = readq(&bar0->adapter_control); 6697 val64 |= ADAPTER_CNTL_EN; 6698 writeq(val64, &bar0->adapter_control); 6699 if (CARDS_WITH_FAULTY_LINK_INDICATORS( 6700 nic->device_type, subid)) { 6701 val64 = readq(&bar0->gpio_control); 6702 val64 |= GPIO_CTRL_GPIO_0; 6703 writeq(val64, &bar0->gpio_control); 6704 val64 = readq(&bar0->gpio_control); 6705 } else { 6706 val64 |= ADAPTER_LED_ON; 6707 writeq(val64, &bar0->adapter_control); 6708 } 6709 nic->device_enabled_once = true; 6710 } else { 6711 DBG_PRINT(ERR_DBG, 6712 "%s: Error: device is not Quiescent\n", 6713 dev->name); 6714 s2io_stop_all_tx_queue(nic); 6715 } 6716 } 6717 val64 = readq(&bar0->adapter_control); 6718 val64 |= ADAPTER_LED_ON; 6719 writeq(val64, &bar0->adapter_control); 6720 s2io_link(nic, LINK_UP); 6721 } else { 6722 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type, 6723 subid)) { 6724 val64 = readq(&bar0->gpio_control); 6725 val64 &= ~GPIO_CTRL_GPIO_0; 6726 writeq(val64, &bar0->gpio_control); 6727 val64 = readq(&bar0->gpio_control); 6728 } 6729 /* turn off LED */ 6730 val64 = readq(&bar0->adapter_control); 6731 val64 = val64 & (~ADAPTER_LED_ON); 6732 writeq(val64, &bar0->adapter_control); 6733 s2io_link(nic, LINK_DOWN); 6734 } 6735 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state)); 6736 6737 out_unlock: 6738 rtnl_unlock(); 6739 } 6740 6741 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, 6742 struct buffAdd *ba, 6743 struct sk_buff **skb, u64 *temp0, u64 *temp1, 6744 u64 *temp2, int size) 6745 { 6746 struct net_device *dev = sp->dev; 6747 struct swStat *stats = &sp->mac_control.stats_info->sw_stat; 6748 6749 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) { 6750 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp; 6751 /* allocate skb */ 6752 if (*skb) { 6753 DBG_PRINT(INFO_DBG, "SKB is not NULL\n"); 6754 /* 6755 * As Rx frame are not going to be processed, 6756 * using same mapped address for the Rxd 6757 * buffer pointer 6758 */ 6759 rxdp1->Buffer0_ptr = *temp0; 6760 } else { 6761 *skb = netdev_alloc_skb(dev, size); 6762 if (!(*skb)) { 6763 DBG_PRINT(INFO_DBG, 6764 "%s: Out of memory to allocate %s\n", 6765 dev->name, "1 buf mode SKBs"); 6766 stats->mem_alloc_fail_cnt++; 6767 return -ENOMEM ; 6768 } 6769 stats->mem_allocated += (*skb)->truesize; 6770 /* storing the mapped addr in a temp variable 6771 * such it will be used for next rxd whose 6772 * Host Control is NULL 6773 */ 6774 rxdp1->Buffer0_ptr = *temp0 = 6775 dma_map_single(&sp->pdev->dev, (*skb)->data, 6776 size - NET_IP_ALIGN, 6777 DMA_FROM_DEVICE); 6778 if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr)) 6779 goto memalloc_failed; 6780 rxdp->Host_Control = (unsigned long) (*skb); 6781 } 6782 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { 6783 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp; 6784 /* Two buffer Mode */ 6785 if (*skb) { 6786 rxdp3->Buffer2_ptr = *temp2; 6787 rxdp3->Buffer0_ptr = *temp0; 6788 rxdp3->Buffer1_ptr = *temp1; 6789 } else { 6790 *skb = netdev_alloc_skb(dev, size); 6791 if (!(*skb)) { 6792 DBG_PRINT(INFO_DBG, 6793 "%s: Out of memory to allocate %s\n", 6794 dev->name, 6795 "2 buf mode SKBs"); 6796 stats->mem_alloc_fail_cnt++; 6797 return -ENOMEM; 6798 } 6799 stats->mem_allocated += (*skb)->truesize; 6800 rxdp3->Buffer2_ptr = *temp2 = 6801 dma_map_single(&sp->pdev->dev, (*skb)->data, 6802 dev->mtu + 4, DMA_FROM_DEVICE); 6803 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr)) 6804 goto memalloc_failed; 6805 rxdp3->Buffer0_ptr = *temp0 = 6806 dma_map_single(&sp->pdev->dev, ba->ba_0, 6807 BUF0_LEN, DMA_FROM_DEVICE); 6808 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) { 6809 dma_unmap_single(&sp->pdev->dev, 6810 (dma_addr_t)rxdp3->Buffer2_ptr, 6811 dev->mtu + 4, 6812 DMA_FROM_DEVICE); 6813 goto memalloc_failed; 6814 } 6815 rxdp->Host_Control = (unsigned long) (*skb); 6816 6817 /* Buffer-1 will be dummy buffer not used */ 6818 rxdp3->Buffer1_ptr = *temp1 = 6819 dma_map_single(&sp->pdev->dev, ba->ba_1, 6820 BUF1_LEN, DMA_FROM_DEVICE); 6821 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) { 6822 dma_unmap_single(&sp->pdev->dev, 6823 (dma_addr_t)rxdp3->Buffer0_ptr, 6824 BUF0_LEN, DMA_FROM_DEVICE); 6825 dma_unmap_single(&sp->pdev->dev, 6826 (dma_addr_t)rxdp3->Buffer2_ptr, 6827 dev->mtu + 4, 6828 DMA_FROM_DEVICE); 6829 goto memalloc_failed; 6830 } 6831 } 6832 } 6833 return 0; 6834 6835 memalloc_failed: 6836 stats->pci_map_fail_cnt++; 6837 stats->mem_freed += (*skb)->truesize; 6838 dev_kfree_skb(*skb); 6839 return -ENOMEM; 6840 } 6841 6842 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, 6843 int size) 6844 { 6845 struct net_device *dev = sp->dev; 6846 if (sp->rxd_mode == RXD_MODE_1) { 6847 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); 6848 } else if (sp->rxd_mode == RXD_MODE_3B) { 6849 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 6850 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 6851 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4); 6852 } 6853 } 6854 6855 static int rxd_owner_bit_reset(struct s2io_nic *sp) 6856 { 6857 int i, j, k, blk_cnt = 0, size; 6858 struct config_param *config = &sp->config; 6859 struct mac_info *mac_control = &sp->mac_control; 6860 struct net_device *dev = sp->dev; 6861 struct RxD_t *rxdp = NULL; 6862 struct sk_buff *skb = NULL; 6863 struct buffAdd *ba = NULL; 6864 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0; 6865 6866 /* Calculate the size based on ring mode */ 6867 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + 6868 HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 6869 if (sp->rxd_mode == RXD_MODE_1) 6870 size += NET_IP_ALIGN; 6871 else if (sp->rxd_mode == RXD_MODE_3B) 6872 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; 6873 6874 for (i = 0; i < config->rx_ring_num; i++) { 6875 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; 6876 struct ring_info *ring = &mac_control->rings[i]; 6877 6878 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1); 6879 6880 for (j = 0; j < blk_cnt; j++) { 6881 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { 6882 rxdp = ring->rx_blocks[j].rxds[k].virt_addr; 6883 if (sp->rxd_mode == RXD_MODE_3B) 6884 ba = &ring->ba[j][k]; 6885 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb, 6886 &temp0_64, 6887 &temp1_64, 6888 &temp2_64, 6889 size) == -ENOMEM) { 6890 return 0; 6891 } 6892 6893 set_rxd_buffer_size(sp, rxdp, size); 6894 dma_wmb(); 6895 /* flip the Ownership bit to Hardware */ 6896 rxdp->Control_1 |= RXD_OWN_XENA; 6897 } 6898 } 6899 } 6900 return 0; 6901 6902 } 6903 6904 static int s2io_add_isr(struct s2io_nic *sp) 6905 { 6906 int ret = 0; 6907 struct net_device *dev = sp->dev; 6908 int err = 0; 6909 6910 if (sp->config.intr_type == MSI_X) 6911 ret = s2io_enable_msi_x(sp); 6912 if (ret) { 6913 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); 6914 sp->config.intr_type = INTA; 6915 } 6916 6917 /* 6918 * Store the values of the MSIX table in 6919 * the struct s2io_nic structure 6920 */ 6921 store_xmsi_data(sp); 6922 6923 /* After proper initialization of H/W, register ISR */ 6924 if (sp->config.intr_type == MSI_X) { 6925 int i, msix_rx_cnt = 0; 6926 6927 for (i = 0; i < sp->num_entries; i++) { 6928 if (sp->s2io_entries[i].in_use == MSIX_FLG) { 6929 if (sp->s2io_entries[i].type == 6930 MSIX_RING_TYPE) { 6931 snprintf(sp->desc[i], 6932 sizeof(sp->desc[i]), 6933 "%s:MSI-X-%d-RX", 6934 dev->name, i); 6935 err = request_irq(sp->entries[i].vector, 6936 s2io_msix_ring_handle, 6937 0, 6938 sp->desc[i], 6939 sp->s2io_entries[i].arg); 6940 } else if (sp->s2io_entries[i].type == 6941 MSIX_ALARM_TYPE) { 6942 snprintf(sp->desc[i], 6943 sizeof(sp->desc[i]), 6944 "%s:MSI-X-%d-TX", 6945 dev->name, i); 6946 err = request_irq(sp->entries[i].vector, 6947 s2io_msix_fifo_handle, 6948 0, 6949 sp->desc[i], 6950 sp->s2io_entries[i].arg); 6951 6952 } 6953 /* if either data or addr is zero print it. */ 6954 if (!(sp->msix_info[i].addr && 6955 sp->msix_info[i].data)) { 6956 DBG_PRINT(ERR_DBG, 6957 "%s @Addr:0x%llx Data:0x%llx\n", 6958 sp->desc[i], 6959 (unsigned long long) 6960 sp->msix_info[i].addr, 6961 (unsigned long long) 6962 ntohl(sp->msix_info[i].data)); 6963 } else 6964 msix_rx_cnt++; 6965 if (err) { 6966 remove_msix_isr(sp); 6967 6968 DBG_PRINT(ERR_DBG, 6969 "%s:MSI-X-%d registration " 6970 "failed\n", dev->name, i); 6971 6972 DBG_PRINT(ERR_DBG, 6973 "%s: Defaulting to INTA\n", 6974 dev->name); 6975 sp->config.intr_type = INTA; 6976 break; 6977 } 6978 sp->s2io_entries[i].in_use = 6979 MSIX_REGISTERED_SUCCESS; 6980 } 6981 } 6982 if (!err) { 6983 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt); 6984 DBG_PRINT(INFO_DBG, 6985 "MSI-X-TX entries enabled through alarm vector\n"); 6986 } 6987 } 6988 if (sp->config.intr_type == INTA) { 6989 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED, 6990 sp->name, dev); 6991 if (err) { 6992 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", 6993 dev->name); 6994 return -1; 6995 } 6996 } 6997 return 0; 6998 } 6999 7000 static void s2io_rem_isr(struct s2io_nic *sp) 7001 { 7002 if (sp->config.intr_type == MSI_X) 7003 remove_msix_isr(sp); 7004 else 7005 remove_inta_isr(sp); 7006 } 7007 7008 static void do_s2io_card_down(struct s2io_nic *sp, int do_io) 7009 { 7010 int cnt = 0; 7011 struct XENA_dev_config __iomem *bar0 = sp->bar0; 7012 register u64 val64 = 0; 7013 struct config_param *config; 7014 config = &sp->config; 7015 7016 if (!is_s2io_card_up(sp)) 7017 return; 7018 7019 del_timer_sync(&sp->alarm_timer); 7020 /* If s2io_set_link task is executing, wait till it completes. */ 7021 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) 7022 msleep(50); 7023 clear_bit(__S2IO_STATE_CARD_UP, &sp->state); 7024 7025 /* Disable napi */ 7026 if (sp->config.napi) { 7027 int off = 0; 7028 if (config->intr_type == MSI_X) { 7029 for (; off < sp->config.rx_ring_num; off++) 7030 napi_disable(&sp->mac_control.rings[off].napi); 7031 } 7032 else 7033 napi_disable(&sp->napi); 7034 } 7035 7036 /* disable Tx and Rx traffic on the NIC */ 7037 if (do_io) 7038 stop_nic(sp); 7039 7040 s2io_rem_isr(sp); 7041 7042 /* stop the tx queue, indicate link down */ 7043 s2io_link(sp, LINK_DOWN); 7044 7045 /* Check if the device is Quiescent and then Reset the NIC */ 7046 while (do_io) { 7047 /* As per the HW requirement we need to replenish the 7048 * receive buffer to avoid the ring bump. Since there is 7049 * no intention of processing the Rx frame at this pointwe are 7050 * just setting the ownership bit of rxd in Each Rx 7051 * ring to HW and set the appropriate buffer size 7052 * based on the ring mode 7053 */ 7054 rxd_owner_bit_reset(sp); 7055 7056 val64 = readq(&bar0->adapter_status); 7057 if (verify_xena_quiescence(sp)) { 7058 if (verify_pcc_quiescent(sp, sp->device_enabled_once)) 7059 break; 7060 } 7061 7062 msleep(50); 7063 cnt++; 7064 if (cnt == 10) { 7065 DBG_PRINT(ERR_DBG, "Device not Quiescent - " 7066 "adapter status reads 0x%llx\n", 7067 (unsigned long long)val64); 7068 break; 7069 } 7070 } 7071 if (do_io) 7072 s2io_reset(sp); 7073 7074 /* Free all Tx buffers */ 7075 free_tx_buffers(sp); 7076 7077 /* Free all Rx buffers */ 7078 free_rx_buffers(sp); 7079 7080 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state)); 7081 } 7082 7083 static void s2io_card_down(struct s2io_nic *sp) 7084 { 7085 do_s2io_card_down(sp, 1); 7086 } 7087 7088 static int s2io_card_up(struct s2io_nic *sp) 7089 { 7090 int i, ret = 0; 7091 struct config_param *config; 7092 struct mac_info *mac_control; 7093 struct net_device *dev = sp->dev; 7094 u16 interruptible; 7095 7096 /* Initialize the H/W I/O registers */ 7097 ret = init_nic(sp); 7098 if (ret != 0) { 7099 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", 7100 dev->name); 7101 if (ret != -EIO) 7102 s2io_reset(sp); 7103 return ret; 7104 } 7105 7106 /* 7107 * Initializing the Rx buffers. For now we are considering only 1 7108 * Rx ring and initializing buffers into 30 Rx blocks 7109 */ 7110 config = &sp->config; 7111 mac_control = &sp->mac_control; 7112 7113 for (i = 0; i < config->rx_ring_num; i++) { 7114 struct ring_info *ring = &mac_control->rings[i]; 7115 7116 ring->mtu = dev->mtu; 7117 ring->lro = !!(dev->features & NETIF_F_LRO); 7118 ret = fill_rx_buffers(sp, ring, 1); 7119 if (ret) { 7120 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", 7121 dev->name); 7122 s2io_reset(sp); 7123 free_rx_buffers(sp); 7124 return -ENOMEM; 7125 } 7126 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, 7127 ring->rx_bufs_left); 7128 } 7129 7130 /* Initialise napi */ 7131 if (config->napi) { 7132 if (config->intr_type == MSI_X) { 7133 for (i = 0; i < sp->config.rx_ring_num; i++) 7134 napi_enable(&sp->mac_control.rings[i].napi); 7135 } else { 7136 napi_enable(&sp->napi); 7137 } 7138 } 7139 7140 /* Maintain the state prior to the open */ 7141 if (sp->promisc_flg) 7142 sp->promisc_flg = 0; 7143 if (sp->m_cast_flg) { 7144 sp->m_cast_flg = 0; 7145 sp->all_multi_pos = 0; 7146 } 7147 7148 /* Setting its receive mode */ 7149 s2io_set_multicast(dev, true); 7150 7151 if (dev->features & NETIF_F_LRO) { 7152 /* Initialize max aggregatable pkts per session based on MTU */ 7153 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; 7154 /* Check if we can use (if specified) user provided value */ 7155 if (lro_max_pkts < sp->lro_max_aggr_per_sess) 7156 sp->lro_max_aggr_per_sess = lro_max_pkts; 7157 } 7158 7159 /* Enable Rx Traffic and interrupts on the NIC */ 7160 if (start_nic(sp)) { 7161 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); 7162 s2io_reset(sp); 7163 free_rx_buffers(sp); 7164 return -ENODEV; 7165 } 7166 7167 /* Add interrupt service routine */ 7168 if (s2io_add_isr(sp) != 0) { 7169 if (sp->config.intr_type == MSI_X) 7170 s2io_rem_isr(sp); 7171 s2io_reset(sp); 7172 free_rx_buffers(sp); 7173 return -ENODEV; 7174 } 7175 7176 timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0); 7177 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 7178 7179 set_bit(__S2IO_STATE_CARD_UP, &sp->state); 7180 7181 /* Enable select interrupts */ 7182 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); 7183 if (sp->config.intr_type != INTA) { 7184 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR; 7185 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); 7186 } else { 7187 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; 7188 interruptible |= TX_PIC_INTR; 7189 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); 7190 } 7191 7192 return 0; 7193 } 7194 7195 /** 7196 * s2io_restart_nic - Resets the NIC. 7197 * @work : work struct containing a pointer to the device private structure 7198 * Description: 7199 * This function is scheduled to be run by the s2io_tx_watchdog 7200 * function after 0.5 secs to reset the NIC. The idea is to reduce 7201 * the run time of the watch dog routine which is run holding a 7202 * spin lock. 7203 */ 7204 7205 static void s2io_restart_nic(struct work_struct *work) 7206 { 7207 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task); 7208 struct net_device *dev = sp->dev; 7209 7210 rtnl_lock(); 7211 7212 if (!netif_running(dev)) 7213 goto out_unlock; 7214 7215 s2io_card_down(sp); 7216 if (s2io_card_up(sp)) { 7217 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name); 7218 } 7219 s2io_wake_all_tx_queue(sp); 7220 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name); 7221 out_unlock: 7222 rtnl_unlock(); 7223 } 7224 7225 /** 7226 * s2io_tx_watchdog - Watchdog for transmit side. 7227 * @dev : Pointer to net device structure 7228 * @txqueue: index of the hanging queue 7229 * Description: 7230 * This function is triggered if the Tx Queue is stopped 7231 * for a pre-defined amount of time when the Interface is still up. 7232 * If the Interface is jammed in such a situation, the hardware is 7233 * reset (by s2io_close) and restarted again (by s2io_open) to 7234 * overcome any problem that might have been caused in the hardware. 7235 * Return value: 7236 * void 7237 */ 7238 7239 static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue) 7240 { 7241 struct s2io_nic *sp = netdev_priv(dev); 7242 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 7243 7244 if (netif_carrier_ok(dev)) { 7245 swstats->watchdog_timer_cnt++; 7246 schedule_work(&sp->rst_timer_task); 7247 swstats->soft_reset_cnt++; 7248 } 7249 } 7250 7251 /** 7252 * rx_osm_handler - To perform some OS related operations on SKB. 7253 * @ring_data : the ring from which this RxD was extracted. 7254 * @rxdp: descriptor 7255 * Description: 7256 * This function is called by the Rx interrupt serivce routine to perform 7257 * some OS related operations on the SKB before passing it to the upper 7258 * layers. It mainly checks if the checksum is OK, if so adds it to the 7259 * SKBs cksum variable, increments the Rx packet count and passes the SKB 7260 * to the upper layer. If the checksum is wrong, it increments the Rx 7261 * packet error count, frees the SKB and returns error. 7262 * Return value: 7263 * SUCCESS on success and -1 on failure. 7264 */ 7265 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) 7266 { 7267 struct s2io_nic *sp = ring_data->nic; 7268 struct net_device *dev = ring_data->dev; 7269 struct sk_buff *skb = (struct sk_buff *) 7270 ((unsigned long)rxdp->Host_Control); 7271 int ring_no = ring_data->ring_no; 7272 u16 l3_csum, l4_csum; 7273 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; 7274 struct lro *lro; 7275 u8 err_mask; 7276 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 7277 7278 skb->dev = dev; 7279 7280 if (err) { 7281 /* Check for parity error */ 7282 if (err & 0x1) 7283 swstats->parity_err_cnt++; 7284 7285 err_mask = err >> 48; 7286 switch (err_mask) { 7287 case 1: 7288 swstats->rx_parity_err_cnt++; 7289 break; 7290 7291 case 2: 7292 swstats->rx_abort_cnt++; 7293 break; 7294 7295 case 3: 7296 swstats->rx_parity_abort_cnt++; 7297 break; 7298 7299 case 4: 7300 swstats->rx_rda_fail_cnt++; 7301 break; 7302 7303 case 5: 7304 swstats->rx_unkn_prot_cnt++; 7305 break; 7306 7307 case 6: 7308 swstats->rx_fcs_err_cnt++; 7309 break; 7310 7311 case 7: 7312 swstats->rx_buf_size_err_cnt++; 7313 break; 7314 7315 case 8: 7316 swstats->rx_rxd_corrupt_cnt++; 7317 break; 7318 7319 case 15: 7320 swstats->rx_unkn_err_cnt++; 7321 break; 7322 } 7323 /* 7324 * Drop the packet if bad transfer code. Exception being 7325 * 0x5, which could be due to unsupported IPv6 extension header. 7326 * In this case, we let stack handle the packet. 7327 * Note that in this case, since checksum will be incorrect, 7328 * stack will validate the same. 7329 */ 7330 if (err_mask != 0x5) { 7331 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n", 7332 dev->name, err_mask); 7333 dev->stats.rx_crc_errors++; 7334 swstats->mem_freed 7335 += skb->truesize; 7336 dev_kfree_skb(skb); 7337 ring_data->rx_bufs_left -= 1; 7338 rxdp->Host_Control = 0; 7339 return 0; 7340 } 7341 } 7342 7343 rxdp->Host_Control = 0; 7344 if (sp->rxd_mode == RXD_MODE_1) { 7345 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); 7346 7347 skb_put(skb, len); 7348 } else if (sp->rxd_mode == RXD_MODE_3B) { 7349 int get_block = ring_data->rx_curr_get_info.block_index; 7350 int get_off = ring_data->rx_curr_get_info.offset; 7351 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2); 7352 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2); 7353 unsigned char *buff = skb_push(skb, buf0_len); 7354 7355 struct buffAdd *ba = &ring_data->ba[get_block][get_off]; 7356 memcpy(buff, ba->ba_0, buf0_len); 7357 skb_put(skb, buf2_len); 7358 } 7359 7360 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && 7361 ((!ring_data->lro) || 7362 (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) && 7363 (dev->features & NETIF_F_RXCSUM)) { 7364 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); 7365 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); 7366 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) { 7367 /* 7368 * NIC verifies if the Checksum of the received 7369 * frame is Ok or not and accordingly returns 7370 * a flag in the RxD. 7371 */ 7372 skb->ip_summed = CHECKSUM_UNNECESSARY; 7373 if (ring_data->lro) { 7374 u32 tcp_len = 0; 7375 u8 *tcp; 7376 int ret = 0; 7377 7378 ret = s2io_club_tcp_session(ring_data, 7379 skb->data, &tcp, 7380 &tcp_len, &lro, 7381 rxdp, sp); 7382 switch (ret) { 7383 case 3: /* Begin anew */ 7384 lro->parent = skb; 7385 goto aggregate; 7386 case 1: /* Aggregate */ 7387 lro_append_pkt(sp, lro, skb, tcp_len); 7388 goto aggregate; 7389 case 4: /* Flush session */ 7390 lro_append_pkt(sp, lro, skb, tcp_len); 7391 queue_rx_frame(lro->parent, 7392 lro->vlan_tag); 7393 clear_lro_session(lro); 7394 swstats->flush_max_pkts++; 7395 goto aggregate; 7396 case 2: /* Flush both */ 7397 lro->parent->data_len = lro->frags_len; 7398 swstats->sending_both++; 7399 queue_rx_frame(lro->parent, 7400 lro->vlan_tag); 7401 clear_lro_session(lro); 7402 goto send_up; 7403 case 0: /* sessions exceeded */ 7404 case -1: /* non-TCP or not L2 aggregatable */ 7405 case 5: /* 7406 * First pkt in session not 7407 * L3/L4 aggregatable 7408 */ 7409 break; 7410 default: 7411 DBG_PRINT(ERR_DBG, 7412 "%s: Samadhana!!\n", 7413 __func__); 7414 BUG(); 7415 } 7416 } 7417 } else { 7418 /* 7419 * Packet with erroneous checksum, let the 7420 * upper layers deal with it. 7421 */ 7422 skb_checksum_none_assert(skb); 7423 } 7424 } else 7425 skb_checksum_none_assert(skb); 7426 7427 swstats->mem_freed += skb->truesize; 7428 send_up: 7429 skb_record_rx_queue(skb, ring_no); 7430 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); 7431 aggregate: 7432 sp->mac_control.rings[ring_no].rx_bufs_left -= 1; 7433 return SUCCESS; 7434 } 7435 7436 /** 7437 * s2io_link - stops/starts the Tx queue. 7438 * @sp : private member of the device structure, which is a pointer to the 7439 * s2io_nic structure. 7440 * @link : inidicates whether link is UP/DOWN. 7441 * Description: 7442 * This function stops/starts the Tx queue depending on whether the link 7443 * status of the NIC is is down or up. This is called by the Alarm 7444 * interrupt handler whenever a link change interrupt comes up. 7445 * Return value: 7446 * void. 7447 */ 7448 7449 static void s2io_link(struct s2io_nic *sp, int link) 7450 { 7451 struct net_device *dev = sp->dev; 7452 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 7453 7454 if (link != sp->last_link_state) { 7455 init_tti(sp, link, false); 7456 if (link == LINK_DOWN) { 7457 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); 7458 s2io_stop_all_tx_queue(sp); 7459 netif_carrier_off(dev); 7460 if (swstats->link_up_cnt) 7461 swstats->link_up_time = 7462 jiffies - sp->start_time; 7463 swstats->link_down_cnt++; 7464 } else { 7465 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name); 7466 if (swstats->link_down_cnt) 7467 swstats->link_down_time = 7468 jiffies - sp->start_time; 7469 swstats->link_up_cnt++; 7470 netif_carrier_on(dev); 7471 s2io_wake_all_tx_queue(sp); 7472 } 7473 } 7474 sp->last_link_state = link; 7475 sp->start_time = jiffies; 7476 } 7477 7478 /** 7479 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers . 7480 * @sp : private member of the device structure, which is a pointer to the 7481 * s2io_nic structure. 7482 * Description: 7483 * This function initializes a few of the PCI and PCI-X configuration registers 7484 * with recommended values. 7485 * Return value: 7486 * void 7487 */ 7488 7489 static void s2io_init_pci(struct s2io_nic *sp) 7490 { 7491 u16 pci_cmd = 0, pcix_cmd = 0; 7492 7493 /* Enable Data Parity Error Recovery in PCI-X command register. */ 7494 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 7495 &(pcix_cmd)); 7496 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 7497 (pcix_cmd | 1)); 7498 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 7499 &(pcix_cmd)); 7500 7501 /* Set the PErr Response bit in PCI command register. */ 7502 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 7503 pci_write_config_word(sp->pdev, PCI_COMMAND, 7504 (pci_cmd | PCI_COMMAND_PARITY)); 7505 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 7506 } 7507 7508 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, 7509 u8 *dev_multiq) 7510 { 7511 int i; 7512 7513 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) { 7514 DBG_PRINT(ERR_DBG, "Requested number of tx fifos " 7515 "(%d) not supported\n", tx_fifo_num); 7516 7517 if (tx_fifo_num < 1) 7518 tx_fifo_num = 1; 7519 else 7520 tx_fifo_num = MAX_TX_FIFOS; 7521 7522 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num); 7523 } 7524 7525 if (multiq) 7526 *dev_multiq = multiq; 7527 7528 if (tx_steering_type && (1 == tx_fifo_num)) { 7529 if (tx_steering_type != TX_DEFAULT_STEERING) 7530 DBG_PRINT(ERR_DBG, 7531 "Tx steering is not supported with " 7532 "one fifo. Disabling Tx steering.\n"); 7533 tx_steering_type = NO_STEERING; 7534 } 7535 7536 if ((tx_steering_type < NO_STEERING) || 7537 (tx_steering_type > TX_DEFAULT_STEERING)) { 7538 DBG_PRINT(ERR_DBG, 7539 "Requested transmit steering not supported\n"); 7540 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n"); 7541 tx_steering_type = NO_STEERING; 7542 } 7543 7544 if (rx_ring_num > MAX_RX_RINGS) { 7545 DBG_PRINT(ERR_DBG, 7546 "Requested number of rx rings not supported\n"); 7547 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n", 7548 MAX_RX_RINGS); 7549 rx_ring_num = MAX_RX_RINGS; 7550 } 7551 7552 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { 7553 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. " 7554 "Defaulting to INTA\n"); 7555 *dev_intr_type = INTA; 7556 } 7557 7558 if ((*dev_intr_type == MSI_X) && 7559 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) && 7560 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) { 7561 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. " 7562 "Defaulting to INTA\n"); 7563 *dev_intr_type = INTA; 7564 } 7565 7566 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) { 7567 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n"); 7568 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n"); 7569 rx_ring_mode = 1; 7570 } 7571 7572 for (i = 0; i < MAX_RX_RINGS; i++) 7573 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) { 7574 DBG_PRINT(ERR_DBG, "Requested rx ring size not " 7575 "supported\nDefaulting to %d\n", 7576 MAX_RX_BLOCKS_PER_RING); 7577 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING; 7578 } 7579 7580 return SUCCESS; 7581 } 7582 7583 /** 7584 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively. 7585 * @nic: device private variable 7586 * @ds_codepoint: data 7587 * @ring: ring index 7588 * Description: The function configures the receive steering to 7589 * desired receive ring. 7590 * Return Value: SUCCESS on success and 7591 * '-1' on failure (endian settings incorrect). 7592 */ 7593 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring) 7594 { 7595 struct XENA_dev_config __iomem *bar0 = nic->bar0; 7596 register u64 val64 = 0; 7597 7598 if (ds_codepoint > 63) 7599 return FAILURE; 7600 7601 val64 = RTS_DS_MEM_DATA(ring); 7602 writeq(val64, &bar0->rts_ds_mem_data); 7603 7604 val64 = RTS_DS_MEM_CTRL_WE | 7605 RTS_DS_MEM_CTRL_STROBE_NEW_CMD | 7606 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint); 7607 7608 writeq(val64, &bar0->rts_ds_mem_ctrl); 7609 7610 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl, 7611 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED, 7612 S2IO_BIT_RESET, true); 7613 } 7614 7615 static const struct net_device_ops s2io_netdev_ops = { 7616 .ndo_open = s2io_open, 7617 .ndo_stop = s2io_close, 7618 .ndo_get_stats = s2io_get_stats, 7619 .ndo_start_xmit = s2io_xmit, 7620 .ndo_validate_addr = eth_validate_addr, 7621 .ndo_set_rx_mode = s2io_ndo_set_multicast, 7622 .ndo_do_ioctl = s2io_ioctl, 7623 .ndo_set_mac_address = s2io_set_mac_addr, 7624 .ndo_change_mtu = s2io_change_mtu, 7625 .ndo_set_features = s2io_set_features, 7626 .ndo_tx_timeout = s2io_tx_watchdog, 7627 #ifdef CONFIG_NET_POLL_CONTROLLER 7628 .ndo_poll_controller = s2io_netpoll, 7629 #endif 7630 }; 7631 7632 /** 7633 * s2io_init_nic - Initialization of the adapter . 7634 * @pdev : structure containing the PCI related information of the device. 7635 * @pre: List of PCI devices supported by the driver listed in s2io_tbl. 7636 * Description: 7637 * The function initializes an adapter identified by the pci_dec structure. 7638 * All OS related initialization including memory and device structure and 7639 * initlaization of the device private variable is done. Also the swapper 7640 * control register is initialized to enable read and write into the I/O 7641 * registers of the device. 7642 * Return value: 7643 * returns 0 on success and negative on failure. 7644 */ 7645 7646 static int 7647 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) 7648 { 7649 struct s2io_nic *sp; 7650 struct net_device *dev; 7651 int i, j, ret; 7652 int dma_flag = false; 7653 u32 mac_up, mac_down; 7654 u64 val64 = 0, tmp64 = 0; 7655 struct XENA_dev_config __iomem *bar0 = NULL; 7656 u16 subid; 7657 struct config_param *config; 7658 struct mac_info *mac_control; 7659 int mode; 7660 u8 dev_intr_type = intr_type; 7661 u8 dev_multiq = 0; 7662 7663 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq); 7664 if (ret) 7665 return ret; 7666 7667 ret = pci_enable_device(pdev); 7668 if (ret) { 7669 DBG_PRINT(ERR_DBG, 7670 "%s: pci_enable_device failed\n", __func__); 7671 return ret; 7672 } 7673 7674 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 7675 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__); 7676 dma_flag = true; 7677 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 7678 DBG_PRINT(ERR_DBG, 7679 "Unable to obtain 64bit DMA for coherent allocations\n"); 7680 pci_disable_device(pdev); 7681 return -ENOMEM; 7682 } 7683 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 7684 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__); 7685 } else { 7686 pci_disable_device(pdev); 7687 return -ENOMEM; 7688 } 7689 ret = pci_request_regions(pdev, s2io_driver_name); 7690 if (ret) { 7691 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n", 7692 __func__, ret); 7693 pci_disable_device(pdev); 7694 return -ENODEV; 7695 } 7696 if (dev_multiq) 7697 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num); 7698 else 7699 dev = alloc_etherdev(sizeof(struct s2io_nic)); 7700 if (dev == NULL) { 7701 pci_disable_device(pdev); 7702 pci_release_regions(pdev); 7703 return -ENODEV; 7704 } 7705 7706 pci_set_master(pdev); 7707 pci_set_drvdata(pdev, dev); 7708 SET_NETDEV_DEV(dev, &pdev->dev); 7709 7710 /* Private member variable initialized to s2io NIC structure */ 7711 sp = netdev_priv(dev); 7712 sp->dev = dev; 7713 sp->pdev = pdev; 7714 sp->high_dma_flag = dma_flag; 7715 sp->device_enabled_once = false; 7716 if (rx_ring_mode == 1) 7717 sp->rxd_mode = RXD_MODE_1; 7718 if (rx_ring_mode == 2) 7719 sp->rxd_mode = RXD_MODE_3B; 7720 7721 sp->config.intr_type = dev_intr_type; 7722 7723 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || 7724 (pdev->device == PCI_DEVICE_ID_HERC_UNI)) 7725 sp->device_type = XFRAME_II_DEVICE; 7726 else 7727 sp->device_type = XFRAME_I_DEVICE; 7728 7729 7730 /* Initialize some PCI/PCI-X fields of the NIC. */ 7731 s2io_init_pci(sp); 7732 7733 /* 7734 * Setting the device configuration parameters. 7735 * Most of these parameters can be specified by the user during 7736 * module insertion as they are module loadable parameters. If 7737 * these parameters are not not specified during load time, they 7738 * are initialized with default values. 7739 */ 7740 config = &sp->config; 7741 mac_control = &sp->mac_control; 7742 7743 config->napi = napi; 7744 config->tx_steering_type = tx_steering_type; 7745 7746 /* Tx side parameters. */ 7747 if (config->tx_steering_type == TX_PRIORITY_STEERING) 7748 config->tx_fifo_num = MAX_TX_FIFOS; 7749 else 7750 config->tx_fifo_num = tx_fifo_num; 7751 7752 /* Initialize the fifos used for tx steering */ 7753 if (config->tx_fifo_num < 5) { 7754 if (config->tx_fifo_num == 1) 7755 sp->total_tcp_fifos = 1; 7756 else 7757 sp->total_tcp_fifos = config->tx_fifo_num - 1; 7758 sp->udp_fifo_idx = config->tx_fifo_num - 1; 7759 sp->total_udp_fifos = 1; 7760 sp->other_fifo_idx = sp->total_tcp_fifos - 1; 7761 } else { 7762 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM - 7763 FIFO_OTHER_MAX_NUM); 7764 sp->udp_fifo_idx = sp->total_tcp_fifos; 7765 sp->total_udp_fifos = FIFO_UDP_MAX_NUM; 7766 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM; 7767 } 7768 7769 config->multiq = dev_multiq; 7770 for (i = 0; i < config->tx_fifo_num; i++) { 7771 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; 7772 7773 tx_cfg->fifo_len = tx_fifo_len[i]; 7774 tx_cfg->fifo_priority = i; 7775 } 7776 7777 /* mapping the QoS priority to the configured fifos */ 7778 for (i = 0; i < MAX_TX_FIFOS; i++) 7779 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i]; 7780 7781 /* map the hashing selector table to the configured fifos */ 7782 for (i = 0; i < config->tx_fifo_num; i++) 7783 sp->fifo_selector[i] = fifo_selector[i]; 7784 7785 7786 config->tx_intr_type = TXD_INT_TYPE_UTILZ; 7787 for (i = 0; i < config->tx_fifo_num; i++) { 7788 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; 7789 7790 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER); 7791 if (tx_cfg->fifo_len < 65) { 7792 config->tx_intr_type = TXD_INT_TYPE_PER_LIST; 7793 break; 7794 } 7795 } 7796 /* + 2 because one Txd for skb->data and one Txd for UFO */ 7797 config->max_txds = MAX_SKB_FRAGS + 2; 7798 7799 /* Rx side parameters. */ 7800 config->rx_ring_num = rx_ring_num; 7801 for (i = 0; i < config->rx_ring_num; i++) { 7802 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; 7803 struct ring_info *ring = &mac_control->rings[i]; 7804 7805 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1); 7806 rx_cfg->ring_priority = i; 7807 ring->rx_bufs_left = 0; 7808 ring->rxd_mode = sp->rxd_mode; 7809 ring->rxd_count = rxd_count[sp->rxd_mode]; 7810 ring->pdev = sp->pdev; 7811 ring->dev = sp->dev; 7812 } 7813 7814 for (i = 0; i < rx_ring_num; i++) { 7815 struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; 7816 7817 rx_cfg->ring_org = RING_ORG_BUFF1; 7818 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER); 7819 } 7820 7821 /* Setting Mac Control parameters */ 7822 mac_control->rmac_pause_time = rmac_pause_time; 7823 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3; 7824 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7; 7825 7826 7827 /* initialize the shared memory used by the NIC and the host */ 7828 if (init_shared_mem(sp)) { 7829 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name); 7830 ret = -ENOMEM; 7831 goto mem_alloc_failed; 7832 } 7833 7834 sp->bar0 = pci_ioremap_bar(pdev, 0); 7835 if (!sp->bar0) { 7836 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n", 7837 dev->name); 7838 ret = -ENOMEM; 7839 goto bar0_remap_failed; 7840 } 7841 7842 sp->bar1 = pci_ioremap_bar(pdev, 2); 7843 if (!sp->bar1) { 7844 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n", 7845 dev->name); 7846 ret = -ENOMEM; 7847 goto bar1_remap_failed; 7848 } 7849 7850 /* Initializing the BAR1 address as the start of the FIFO pointer. */ 7851 for (j = 0; j < MAX_TX_FIFOS; j++) { 7852 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000); 7853 } 7854 7855 /* Driver entry points */ 7856 dev->netdev_ops = &s2io_netdev_ops; 7857 dev->ethtool_ops = &netdev_ethtool_ops; 7858 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 7859 NETIF_F_TSO | NETIF_F_TSO6 | 7860 NETIF_F_RXCSUM | NETIF_F_LRO; 7861 dev->features |= dev->hw_features | 7862 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 7863 if (sp->high_dma_flag == true) 7864 dev->features |= NETIF_F_HIGHDMA; 7865 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; 7866 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); 7867 INIT_WORK(&sp->set_link_task, s2io_set_link); 7868 7869 pci_save_state(sp->pdev); 7870 7871 /* Setting swapper control on the NIC, for proper reset operation */ 7872 if (s2io_set_swapper(sp)) { 7873 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n", 7874 dev->name); 7875 ret = -EAGAIN; 7876 goto set_swap_failed; 7877 } 7878 7879 /* Verify if the Herc works on the slot its placed into */ 7880 if (sp->device_type & XFRAME_II_DEVICE) { 7881 mode = s2io_verify_pci_mode(sp); 7882 if (mode < 0) { 7883 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n", 7884 __func__); 7885 ret = -EBADSLT; 7886 goto set_swap_failed; 7887 } 7888 } 7889 7890 if (sp->config.intr_type == MSI_X) { 7891 sp->num_entries = config->rx_ring_num + 1; 7892 ret = s2io_enable_msi_x(sp); 7893 7894 if (!ret) { 7895 ret = s2io_test_msi(sp); 7896 /* rollback MSI-X, will re-enable during add_isr() */ 7897 remove_msix_isr(sp); 7898 } 7899 if (ret) { 7900 7901 DBG_PRINT(ERR_DBG, 7902 "MSI-X requested but failed to enable\n"); 7903 sp->config.intr_type = INTA; 7904 } 7905 } 7906 7907 if (config->intr_type == MSI_X) { 7908 for (i = 0; i < config->rx_ring_num ; i++) { 7909 struct ring_info *ring = &mac_control->rings[i]; 7910 7911 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64); 7912 } 7913 } else { 7914 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64); 7915 } 7916 7917 /* Not needed for Herc */ 7918 if (sp->device_type & XFRAME_I_DEVICE) { 7919 /* 7920 * Fix for all "FFs" MAC address problems observed on 7921 * Alpha platforms 7922 */ 7923 fix_mac_address(sp); 7924 s2io_reset(sp); 7925 } 7926 7927 /* 7928 * MAC address initialization. 7929 * For now only one mac address will be read and used. 7930 */ 7931 bar0 = sp->bar0; 7932 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 7933 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET); 7934 writeq(val64, &bar0->rmac_addr_cmd_mem); 7935 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, 7936 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 7937 S2IO_BIT_RESET, true); 7938 tmp64 = readq(&bar0->rmac_addr_data0_mem); 7939 mac_down = (u32)tmp64; 7940 mac_up = (u32) (tmp64 >> 32); 7941 7942 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up); 7943 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8); 7944 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16); 7945 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24); 7946 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16); 7947 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24); 7948 7949 /* Set the factory defined MAC address initially */ 7950 dev->addr_len = ETH_ALEN; 7951 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); 7952 7953 /* initialize number of multicast & unicast MAC entries variables */ 7954 if (sp->device_type == XFRAME_I_DEVICE) { 7955 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES; 7956 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES; 7957 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET; 7958 } else if (sp->device_type == XFRAME_II_DEVICE) { 7959 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES; 7960 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES; 7961 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET; 7962 } 7963 7964 /* MTU range: 46 - 9600 */ 7965 dev->min_mtu = MIN_MTU; 7966 dev->max_mtu = S2IO_JUMBO_SIZE; 7967 7968 /* store mac addresses from CAM to s2io_nic structure */ 7969 do_s2io_store_unicast_mc(sp); 7970 7971 /* Configure MSIX vector for number of rings configured plus one */ 7972 if ((sp->device_type == XFRAME_II_DEVICE) && 7973 (config->intr_type == MSI_X)) 7974 sp->num_entries = config->rx_ring_num + 1; 7975 7976 /* Store the values of the MSIX table in the s2io_nic structure */ 7977 store_xmsi_data(sp); 7978 /* reset Nic and bring it to known state */ 7979 s2io_reset(sp); 7980 7981 /* 7982 * Initialize link state flags 7983 * and the card state parameter 7984 */ 7985 sp->state = 0; 7986 7987 /* Initialize spinlocks */ 7988 for (i = 0; i < sp->config.tx_fifo_num; i++) { 7989 struct fifo_info *fifo = &mac_control->fifos[i]; 7990 7991 spin_lock_init(&fifo->tx_lock); 7992 } 7993 7994 /* 7995 * SXE-002: Configure link and activity LED to init state 7996 * on driver load. 7997 */ 7998 subid = sp->pdev->subsystem_device; 7999 if ((subid & 0xFF) >= 0x07) { 8000 val64 = readq(&bar0->gpio_control); 8001 val64 |= 0x0000800000000000ULL; 8002 writeq(val64, &bar0->gpio_control); 8003 val64 = 0x0411040400000000ULL; 8004 writeq(val64, (void __iomem *)bar0 + 0x2700); 8005 val64 = readq(&bar0->gpio_control); 8006 } 8007 8008 sp->rx_csum = 1; /* Rx chksum verify enabled by default */ 8009 8010 if (register_netdev(dev)) { 8011 DBG_PRINT(ERR_DBG, "Device registration failed\n"); 8012 ret = -ENODEV; 8013 goto register_failed; 8014 } 8015 s2io_vpd_read(sp); 8016 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n"); 8017 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name, 8018 sp->product_name, pdev->revision); 8019 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, 8020 s2io_driver_version); 8021 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr); 8022 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num); 8023 if (sp->device_type & XFRAME_II_DEVICE) { 8024 mode = s2io_print_pci_mode(sp); 8025 if (mode < 0) { 8026 ret = -EBADSLT; 8027 unregister_netdev(dev); 8028 goto set_swap_failed; 8029 } 8030 } 8031 switch (sp->rxd_mode) { 8032 case RXD_MODE_1: 8033 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n", 8034 dev->name); 8035 break; 8036 case RXD_MODE_3B: 8037 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", 8038 dev->name); 8039 break; 8040 } 8041 8042 switch (sp->config.napi) { 8043 case 0: 8044 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name); 8045 break; 8046 case 1: 8047 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); 8048 break; 8049 } 8050 8051 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, 8052 sp->config.tx_fifo_num); 8053 8054 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name, 8055 sp->config.rx_ring_num); 8056 8057 switch (sp->config.intr_type) { 8058 case INTA: 8059 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); 8060 break; 8061 case MSI_X: 8062 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); 8063 break; 8064 } 8065 if (sp->config.multiq) { 8066 for (i = 0; i < sp->config.tx_fifo_num; i++) { 8067 struct fifo_info *fifo = &mac_control->fifos[i]; 8068 8069 fifo->multiq = config->multiq; 8070 } 8071 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n", 8072 dev->name); 8073 } else 8074 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n", 8075 dev->name); 8076 8077 switch (sp->config.tx_steering_type) { 8078 case NO_STEERING: 8079 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n", 8080 dev->name); 8081 break; 8082 case TX_PRIORITY_STEERING: 8083 DBG_PRINT(ERR_DBG, 8084 "%s: Priority steering enabled for transmit\n", 8085 dev->name); 8086 break; 8087 case TX_DEFAULT_STEERING: 8088 DBG_PRINT(ERR_DBG, 8089 "%s: Default steering enabled for transmit\n", 8090 dev->name); 8091 } 8092 8093 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", 8094 dev->name); 8095 /* Initialize device name */ 8096 snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name, 8097 sp->product_name); 8098 8099 if (vlan_tag_strip) 8100 sp->vlan_strip_flag = 1; 8101 else 8102 sp->vlan_strip_flag = 0; 8103 8104 /* 8105 * Make Link state as off at this point, when the Link change 8106 * interrupt comes the state will be automatically changed to 8107 * the right state. 8108 */ 8109 netif_carrier_off(dev); 8110 8111 return 0; 8112 8113 register_failed: 8114 set_swap_failed: 8115 iounmap(sp->bar1); 8116 bar1_remap_failed: 8117 iounmap(sp->bar0); 8118 bar0_remap_failed: 8119 mem_alloc_failed: 8120 free_shared_mem(sp); 8121 pci_disable_device(pdev); 8122 pci_release_regions(pdev); 8123 free_netdev(dev); 8124 8125 return ret; 8126 } 8127 8128 /** 8129 * s2io_rem_nic - Free the PCI device 8130 * @pdev: structure containing the PCI related information of the device. 8131 * Description: This function is called by the Pci subsystem to release a 8132 * PCI device and free up all resource held up by the device. This could 8133 * be in response to a Hot plug event or when the driver is to be removed 8134 * from memory. 8135 */ 8136 8137 static void s2io_rem_nic(struct pci_dev *pdev) 8138 { 8139 struct net_device *dev = pci_get_drvdata(pdev); 8140 struct s2io_nic *sp; 8141 8142 if (dev == NULL) { 8143 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n"); 8144 return; 8145 } 8146 8147 sp = netdev_priv(dev); 8148 8149 cancel_work_sync(&sp->rst_timer_task); 8150 cancel_work_sync(&sp->set_link_task); 8151 8152 unregister_netdev(dev); 8153 8154 free_shared_mem(sp); 8155 iounmap(sp->bar0); 8156 iounmap(sp->bar1); 8157 pci_release_regions(pdev); 8158 free_netdev(dev); 8159 pci_disable_device(pdev); 8160 } 8161 8162 module_pci_driver(s2io_driver); 8163 8164 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, 8165 struct tcphdr **tcp, struct RxD_t *rxdp, 8166 struct s2io_nic *sp) 8167 { 8168 int ip_off; 8169 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; 8170 8171 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { 8172 DBG_PRINT(INIT_DBG, 8173 "%s: Non-TCP frames not supported for LRO\n", 8174 __func__); 8175 return -1; 8176 } 8177 8178 /* Checking for DIX type or DIX type with VLAN */ 8179 if ((l2_type == 0) || (l2_type == 4)) { 8180 ip_off = HEADER_ETHERNET_II_802_3_SIZE; 8181 /* 8182 * If vlan stripping is disabled and the frame is VLAN tagged, 8183 * shift the offset by the VLAN header size bytes. 8184 */ 8185 if ((!sp->vlan_strip_flag) && 8186 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG)) 8187 ip_off += HEADER_VLAN_SIZE; 8188 } else { 8189 /* LLC, SNAP etc are considered non-mergeable */ 8190 return -1; 8191 } 8192 8193 *ip = (struct iphdr *)(buffer + ip_off); 8194 ip_len = (u8)((*ip)->ihl); 8195 ip_len <<= 2; 8196 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len); 8197 8198 return 0; 8199 } 8200 8201 static int check_for_socket_match(struct lro *lro, struct iphdr *ip, 8202 struct tcphdr *tcp) 8203 { 8204 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); 8205 if ((lro->iph->saddr != ip->saddr) || 8206 (lro->iph->daddr != ip->daddr) || 8207 (lro->tcph->source != tcp->source) || 8208 (lro->tcph->dest != tcp->dest)) 8209 return -1; 8210 return 0; 8211 } 8212 8213 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp) 8214 { 8215 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2); 8216 } 8217 8218 static void initiate_new_session(struct lro *lro, u8 *l2h, 8219 struct iphdr *ip, struct tcphdr *tcp, 8220 u32 tcp_pyld_len, u16 vlan_tag) 8221 { 8222 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); 8223 lro->l2h = l2h; 8224 lro->iph = ip; 8225 lro->tcph = tcp; 8226 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq); 8227 lro->tcp_ack = tcp->ack_seq; 8228 lro->sg_num = 1; 8229 lro->total_len = ntohs(ip->tot_len); 8230 lro->frags_len = 0; 8231 lro->vlan_tag = vlan_tag; 8232 /* 8233 * Check if we saw TCP timestamp. 8234 * Other consistency checks have already been done. 8235 */ 8236 if (tcp->doff == 8) { 8237 __be32 *ptr; 8238 ptr = (__be32 *)(tcp+1); 8239 lro->saw_ts = 1; 8240 lro->cur_tsval = ntohl(*(ptr+1)); 8241 lro->cur_tsecr = *(ptr+2); 8242 } 8243 lro->in_use = 1; 8244 } 8245 8246 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro) 8247 { 8248 struct iphdr *ip = lro->iph; 8249 struct tcphdr *tcp = lro->tcph; 8250 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 8251 8252 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); 8253 8254 /* Update L3 header */ 8255 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len)); 8256 ip->tot_len = htons(lro->total_len); 8257 8258 /* Update L4 header */ 8259 tcp->ack_seq = lro->tcp_ack; 8260 tcp->window = lro->window; 8261 8262 /* Update tsecr field if this session has timestamps enabled */ 8263 if (lro->saw_ts) { 8264 __be32 *ptr = (__be32 *)(tcp + 1); 8265 *(ptr+2) = lro->cur_tsecr; 8266 } 8267 8268 /* Update counters required for calculation of 8269 * average no. of packets aggregated. 8270 */ 8271 swstats->sum_avg_pkts_aggregated += lro->sg_num; 8272 swstats->num_aggregations++; 8273 } 8274 8275 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip, 8276 struct tcphdr *tcp, u32 l4_pyld) 8277 { 8278 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); 8279 lro->total_len += l4_pyld; 8280 lro->frags_len += l4_pyld; 8281 lro->tcp_next_seq += l4_pyld; 8282 lro->sg_num++; 8283 8284 /* Update ack seq no. and window ad(from this pkt) in LRO object */ 8285 lro->tcp_ack = tcp->ack_seq; 8286 lro->window = tcp->window; 8287 8288 if (lro->saw_ts) { 8289 __be32 *ptr; 8290 /* Update tsecr and tsval from this packet */ 8291 ptr = (__be32 *)(tcp+1); 8292 lro->cur_tsval = ntohl(*(ptr+1)); 8293 lro->cur_tsecr = *(ptr + 2); 8294 } 8295 } 8296 8297 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip, 8298 struct tcphdr *tcp, u32 tcp_pyld_len) 8299 { 8300 u8 *ptr; 8301 8302 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); 8303 8304 if (!tcp_pyld_len) { 8305 /* Runt frame or a pure ack */ 8306 return -1; 8307 } 8308 8309 if (ip->ihl != 5) /* IP has options */ 8310 return -1; 8311 8312 /* If we see CE codepoint in IP header, packet is not mergeable */ 8313 if (INET_ECN_is_ce(ipv4_get_dsfield(ip))) 8314 return -1; 8315 8316 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */ 8317 if (tcp->urg || tcp->psh || tcp->rst || 8318 tcp->syn || tcp->fin || 8319 tcp->ece || tcp->cwr || !tcp->ack) { 8320 /* 8321 * Currently recognize only the ack control word and 8322 * any other control field being set would result in 8323 * flushing the LRO session 8324 */ 8325 return -1; 8326 } 8327 8328 /* 8329 * Allow only one TCP timestamp option. Don't aggregate if 8330 * any other options are detected. 8331 */ 8332 if (tcp->doff != 5 && tcp->doff != 8) 8333 return -1; 8334 8335 if (tcp->doff == 8) { 8336 ptr = (u8 *)(tcp + 1); 8337 while (*ptr == TCPOPT_NOP) 8338 ptr++; 8339 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP) 8340 return -1; 8341 8342 /* Ensure timestamp value increases monotonically */ 8343 if (l_lro) 8344 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2)))) 8345 return -1; 8346 8347 /* timestamp echo reply should be non-zero */ 8348 if (*((__be32 *)(ptr+6)) == 0) 8349 return -1; 8350 } 8351 8352 return 0; 8353 } 8354 8355 static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, 8356 u8 **tcp, u32 *tcp_len, struct lro **lro, 8357 struct RxD_t *rxdp, struct s2io_nic *sp) 8358 { 8359 struct iphdr *ip; 8360 struct tcphdr *tcph; 8361 int ret = 0, i; 8362 u16 vlan_tag = 0; 8363 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 8364 8365 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp, 8366 rxdp, sp); 8367 if (ret) 8368 return ret; 8369 8370 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr); 8371 8372 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2); 8373 tcph = (struct tcphdr *)*tcp; 8374 *tcp_len = get_l4_pyld_length(ip, tcph); 8375 for (i = 0; i < MAX_LRO_SESSIONS; i++) { 8376 struct lro *l_lro = &ring_data->lro0_n[i]; 8377 if (l_lro->in_use) { 8378 if (check_for_socket_match(l_lro, ip, tcph)) 8379 continue; 8380 /* Sock pair matched */ 8381 *lro = l_lro; 8382 8383 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) { 8384 DBG_PRINT(INFO_DBG, "%s: Out of sequence. " 8385 "expected 0x%x, actual 0x%x\n", 8386 __func__, 8387 (*lro)->tcp_next_seq, 8388 ntohl(tcph->seq)); 8389 8390 swstats->outof_sequence_pkts++; 8391 ret = 2; 8392 break; 8393 } 8394 8395 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph, 8396 *tcp_len)) 8397 ret = 1; /* Aggregate */ 8398 else 8399 ret = 2; /* Flush both */ 8400 break; 8401 } 8402 } 8403 8404 if (ret == 0) { 8405 /* Before searching for available LRO objects, 8406 * check if the pkt is L3/L4 aggregatable. If not 8407 * don't create new LRO session. Just send this 8408 * packet up. 8409 */ 8410 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) 8411 return 5; 8412 8413 for (i = 0; i < MAX_LRO_SESSIONS; i++) { 8414 struct lro *l_lro = &ring_data->lro0_n[i]; 8415 if (!(l_lro->in_use)) { 8416 *lro = l_lro; 8417 ret = 3; /* Begin anew */ 8418 break; 8419 } 8420 } 8421 } 8422 8423 if (ret == 0) { /* sessions exceeded */ 8424 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n", 8425 __func__); 8426 *lro = NULL; 8427 return ret; 8428 } 8429 8430 switch (ret) { 8431 case 3: 8432 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len, 8433 vlan_tag); 8434 break; 8435 case 2: 8436 update_L3L4_header(sp, *lro); 8437 break; 8438 case 1: 8439 aggregate_new_rx(*lro, ip, tcph, *tcp_len); 8440 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) { 8441 update_L3L4_header(sp, *lro); 8442 ret = 4; /* Flush the LRO */ 8443 } 8444 break; 8445 default: 8446 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__); 8447 break; 8448 } 8449 8450 return ret; 8451 } 8452 8453 static void clear_lro_session(struct lro *lro) 8454 { 8455 static u16 lro_struct_size = sizeof(struct lro); 8456 8457 memset(lro, 0, lro_struct_size); 8458 } 8459 8460 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag) 8461 { 8462 struct net_device *dev = skb->dev; 8463 struct s2io_nic *sp = netdev_priv(dev); 8464 8465 skb->protocol = eth_type_trans(skb, dev); 8466 if (vlan_tag && sp->vlan_strip_flag) 8467 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 8468 if (sp->config.napi) 8469 netif_receive_skb(skb); 8470 else 8471 netif_rx(skb); 8472 } 8473 8474 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, 8475 struct sk_buff *skb, u32 tcp_len) 8476 { 8477 struct sk_buff *first = lro->parent; 8478 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 8479 8480 first->len += tcp_len; 8481 first->data_len = lro->frags_len; 8482 skb_pull(skb, (skb->len - tcp_len)); 8483 if (skb_shinfo(first)->frag_list) 8484 lro->last_frag->next = skb; 8485 else 8486 skb_shinfo(first)->frag_list = skb; 8487 first->truesize += skb->truesize; 8488 lro->last_frag = skb; 8489 swstats->clubbed_frms_cnt++; 8490 } 8491 8492 /** 8493 * s2io_io_error_detected - called when PCI error is detected 8494 * @pdev: Pointer to PCI device 8495 * @state: The current pci connection state 8496 * 8497 * This function is called after a PCI bus error affecting 8498 * this device has been detected. 8499 */ 8500 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev, 8501 pci_channel_state_t state) 8502 { 8503 struct net_device *netdev = pci_get_drvdata(pdev); 8504 struct s2io_nic *sp = netdev_priv(netdev); 8505 8506 netif_device_detach(netdev); 8507 8508 if (state == pci_channel_io_perm_failure) 8509 return PCI_ERS_RESULT_DISCONNECT; 8510 8511 if (netif_running(netdev)) { 8512 /* Bring down the card, while avoiding PCI I/O */ 8513 do_s2io_card_down(sp, 0); 8514 } 8515 pci_disable_device(pdev); 8516 8517 return PCI_ERS_RESULT_NEED_RESET; 8518 } 8519 8520 /** 8521 * s2io_io_slot_reset - called after the pci bus has been reset. 8522 * @pdev: Pointer to PCI device 8523 * 8524 * Restart the card from scratch, as if from a cold-boot. 8525 * At this point, the card has exprienced a hard reset, 8526 * followed by fixups by BIOS, and has its config space 8527 * set up identically to what it was at cold boot. 8528 */ 8529 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev) 8530 { 8531 struct net_device *netdev = pci_get_drvdata(pdev); 8532 struct s2io_nic *sp = netdev_priv(netdev); 8533 8534 if (pci_enable_device(pdev)) { 8535 pr_err("Cannot re-enable PCI device after reset.\n"); 8536 return PCI_ERS_RESULT_DISCONNECT; 8537 } 8538 8539 pci_set_master(pdev); 8540 s2io_reset(sp); 8541 8542 return PCI_ERS_RESULT_RECOVERED; 8543 } 8544 8545 /** 8546 * s2io_io_resume - called when traffic can start flowing again. 8547 * @pdev: Pointer to PCI device 8548 * 8549 * This callback is called when the error recovery driver tells 8550 * us that its OK to resume normal operation. 8551 */ 8552 static void s2io_io_resume(struct pci_dev *pdev) 8553 { 8554 struct net_device *netdev = pci_get_drvdata(pdev); 8555 struct s2io_nic *sp = netdev_priv(netdev); 8556 8557 if (netif_running(netdev)) { 8558 if (s2io_card_up(sp)) { 8559 pr_err("Can't bring device back up after reset.\n"); 8560 return; 8561 } 8562 8563 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) { 8564 s2io_card_down(sp); 8565 pr_err("Can't restore mac addr after reset.\n"); 8566 return; 8567 } 8568 } 8569 8570 netif_device_attach(netdev); 8571 netif_tx_wake_all_queues(netdev); 8572 } 8573