1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /**************************************************************************/ 3 /* */ 4 /* IBM System i and System p Virtual NIC Device Driver */ 5 /* Copyright (C) 2014 IBM Corp. */ 6 /* Santiago Leon (santi_leon@yahoo.com) */ 7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 8 /* John Allen (jallen@linux.vnet.ibm.com) */ 9 /* */ 10 /* */ 11 /* This module contains the implementation of a virtual ethernet device */ 12 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */ 13 /* option of the RS/6000 Platform Architecture to interface with virtual */ 14 /* ethernet NICs that are presented to the partition by the hypervisor. */ 15 /* */ 16 /**************************************************************************/ 17 18 #define IBMVNIC_NAME "ibmvnic" 19 #define IBMVNIC_DRIVER_VERSION "1.0.1" 20 #define IBMVNIC_INVALID_MAP -1 21 #define IBMVNIC_OPEN_FAILED 3 22 23 /* basic structures plus 100 2k buffers */ 24 #define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305 25 26 /* Initial module_parameters */ 27 #define IBMVNIC_RX_WEIGHT 16 28 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */ 29 #define IBMVNIC_BUFFS_PER_POOL 100 30 #define IBMVNIC_MAX_QUEUES 16 31 #define IBMVNIC_MAX_QUEUE_SZ 4096 32 #define IBMVNIC_MAX_IND_DESCS 16 33 #define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32) 34 35 #define IBMVNIC_TSO_BUF_SZ 65536 36 #define IBMVNIC_TSO_BUFS 64 37 #define IBMVNIC_TSO_POOL_MASK 0x80000000 38 39 #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE) 40 #define IBMVNIC_BUFFER_HLEN 500 41 42 #define IBMVNIC_RESET_DELAY 100 43 44 struct ibmvnic_login_buffer { 45 __be32 len; 46 __be32 version; 47 #define INITIAL_VERSION_LB 1 48 __be32 num_txcomp_subcrqs; 49 __be32 off_txcomp_subcrqs; 50 __be32 num_rxcomp_subcrqs; 51 __be32 off_rxcomp_subcrqs; 52 __be32 login_rsp_ioba; 53 __be32 login_rsp_len; 54 __be32 client_data_offset; 55 __be32 client_data_len; 56 } __packed __aligned(8); 57 58 struct ibmvnic_login_rsp_buffer { 59 __be32 len; 60 __be32 version; 61 #define INITIAL_VERSION_LRB 1 62 __be32 num_txsubm_subcrqs; 63 __be32 off_txsubm_subcrqs; 64 __be32 num_rxadd_subcrqs; 65 __be32 off_rxadd_subcrqs; 66 __be32 off_rxadd_buff_size; 67 __be32 num_supp_tx_desc; 68 __be32 off_supp_tx_desc; 69 } __packed __aligned(8); 70 71 struct ibmvnic_query_ip_offload_buffer { 72 __be32 len; 73 __be32 version; 74 #define INITIAL_VERSION_IOB 1 75 u8 ipv4_chksum; 76 u8 ipv6_chksum; 77 u8 tcp_ipv4_chksum; 78 u8 tcp_ipv6_chksum; 79 u8 udp_ipv4_chksum; 80 u8 udp_ipv6_chksum; 81 u8 large_tx_ipv4; 82 u8 large_tx_ipv6; 83 u8 large_rx_ipv4; 84 u8 large_rx_ipv6; 85 u8 reserved1[14]; 86 __be16 max_ipv4_header_size; 87 __be16 max_ipv6_header_size; 88 __be16 max_tcp_header_size; 89 __be16 max_udp_header_size; 90 __be32 max_large_tx_size; 91 __be32 max_large_rx_size; 92 u8 reserved2[16]; 93 u8 ipv6_extension_header; 94 #define IPV6_EH_NOT_SUPPORTED 0x00 95 #define IPV6_EH_SUPPORTED_LIM 0x01 96 #define IPV6_EH_SUPPORTED 0xFF 97 u8 tcp_pseudosum_req; 98 #define TCP_PS_NOT_REQUIRED 0x00 99 #define TCP_PS_REQUIRED 0x01 100 u8 reserved3[30]; 101 __be16 num_ipv6_ext_headers; 102 __be32 off_ipv6_ext_headers; 103 u8 reserved4[154]; 104 } __packed __aligned(8); 105 106 struct ibmvnic_control_ip_offload_buffer { 107 __be32 len; 108 __be32 version; 109 #define INITIAL_VERSION_IOB 1 110 u8 ipv4_chksum; 111 u8 ipv6_chksum; 112 u8 tcp_ipv4_chksum; 113 u8 tcp_ipv6_chksum; 114 u8 udp_ipv4_chksum; 115 u8 udp_ipv6_chksum; 116 u8 large_tx_ipv4; 117 u8 large_tx_ipv6; 118 u8 bad_packet_rx; 119 u8 large_rx_ipv4; 120 u8 large_rx_ipv6; 121 u8 reserved4[111]; 122 } __packed __aligned(8); 123 124 struct ibmvnic_fw_component { 125 u8 name[48]; 126 __be32 trace_buff_size; 127 u8 correlator; 128 u8 trace_level; 129 u8 parent_correlator; 130 u8 error_check_level; 131 u8 trace_on; 132 u8 reserved[7]; 133 u8 description[192]; 134 } __packed __aligned(8); 135 136 struct ibmvnic_fw_trace_entry { 137 __be32 trace_id; 138 u8 num_valid_data; 139 u8 reserved[3]; 140 __be64 pmc_registers; 141 __be64 timebase; 142 __be64 trace_data[5]; 143 } __packed __aligned(8); 144 145 struct ibmvnic_statistics { 146 __be32 version; 147 __be32 promiscuous; 148 __be64 rx_packets; 149 __be64 rx_bytes; 150 __be64 tx_packets; 151 __be64 tx_bytes; 152 __be64 ucast_tx_packets; 153 __be64 ucast_rx_packets; 154 __be64 mcast_tx_packets; 155 __be64 mcast_rx_packets; 156 __be64 bcast_tx_packets; 157 __be64 bcast_rx_packets; 158 __be64 align_errors; 159 __be64 fcs_errors; 160 __be64 single_collision_frames; 161 __be64 multi_collision_frames; 162 __be64 sqe_test_errors; 163 __be64 deferred_tx; 164 __be64 late_collisions; 165 __be64 excess_collisions; 166 __be64 internal_mac_tx_errors; 167 __be64 carrier_sense; 168 __be64 too_long_frames; 169 __be64 internal_mac_rx_errors; 170 u8 reserved[72]; 171 } __packed __aligned(8); 172 173 #define NUM_TX_STATS 3 174 struct ibmvnic_tx_queue_stats { 175 u64 packets; 176 u64 bytes; 177 u64 dropped_packets; 178 }; 179 180 #define NUM_RX_STATS 3 181 struct ibmvnic_rx_queue_stats { 182 u64 packets; 183 u64 bytes; 184 u64 interrupts; 185 }; 186 187 struct ibmvnic_acl_buffer { 188 __be32 len; 189 __be32 version; 190 #define INITIAL_VERSION_IOB 1 191 u8 mac_acls_restrict; 192 u8 vlan_acls_restrict; 193 u8 reserved1[22]; 194 __be32 num_mac_addrs; 195 __be32 offset_mac_addrs; 196 __be32 num_vlan_ids; 197 __be32 offset_vlan_ids; 198 u8 reserved2[80]; 199 } __packed __aligned(8); 200 201 /* descriptors have been changed, how should this be defined? 1? 4? */ 202 203 #define IBMVNIC_TX_DESC_VERSIONS 3 204 205 /* is this still needed? */ 206 struct ibmvnic_tx_comp_desc { 207 u8 first; 208 u8 num_comps; 209 __be16 rcs[5]; 210 __be32 correlators[5]; 211 } __packed __aligned(8); 212 213 /* some flags that included in v0 descriptor, which is gone 214 * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM 215 * and only in some offload_flags variable that doesn't seem 216 * to be used anywhere, can probably be removed? 217 */ 218 219 #define IBMVNIC_TCP_CHKSUM 0x20 220 #define IBMVNIC_UDP_CHKSUM 0x08 221 222 struct ibmvnic_tx_desc { 223 u8 first; 224 u8 type; 225 226 #define IBMVNIC_TX_DESC 0x10 227 u8 n_crq_elem; 228 u8 n_sge; 229 u8 flags1; 230 #define IBMVNIC_TX_COMP_NEEDED 0x80 231 #define IBMVNIC_TX_CHKSUM_OFFLOAD 0x40 232 #define IBMVNIC_TX_LSO 0x20 233 #define IBMVNIC_TX_PROT_TCP 0x10 234 #define IBMVNIC_TX_PROT_UDP 0x08 235 #define IBMVNIC_TX_PROT_IPV4 0x04 236 #define IBMVNIC_TX_PROT_IPV6 0x02 237 #define IBMVNIC_TX_VLAN_PRESENT 0x01 238 u8 flags2; 239 #define IBMVNIC_TX_VLAN_INSERT 0x80 240 __be16 mss; 241 u8 reserved[4]; 242 __be32 correlator; 243 __be16 vlan_id; 244 __be16 dma_reg; 245 __be32 sge_len; 246 __be64 ioba; 247 } __packed __aligned(8); 248 249 struct ibmvnic_hdr_desc { 250 u8 first; 251 u8 type; 252 #define IBMVNIC_HDR_DESC 0x11 253 u8 len; 254 u8 l2_len; 255 __be16 l3_len; 256 u8 l4_len; 257 u8 flag; 258 u8 data[24]; 259 } __packed __aligned(8); 260 261 struct ibmvnic_hdr_ext_desc { 262 u8 first; 263 u8 type; 264 #define IBMVNIC_HDR_EXT_DESC 0x12 265 u8 len; 266 u8 data[29]; 267 } __packed __aligned(8); 268 269 struct ibmvnic_sge_desc { 270 u8 first; 271 u8 type; 272 #define IBMVNIC_SGE_DESC 0x30 273 __be16 sge1_dma_reg; 274 __be32 sge1_len; 275 __be64 sge1_ioba; 276 __be16 reserved; 277 __be16 sge2_dma_reg; 278 __be32 sge2_len; 279 __be64 sge2_ioba; 280 } __packed __aligned(8); 281 282 struct ibmvnic_rx_comp_desc { 283 u8 first; 284 u8 flags; 285 #define IBMVNIC_IP_CHKSUM_GOOD 0x80 286 #define IBMVNIC_TCP_UDP_CHKSUM_GOOD 0x40 287 #define IBMVNIC_END_FRAME 0x20 288 #define IBMVNIC_EXACT_MC 0x10 289 #define IBMVNIC_VLAN_STRIPPED 0x08 290 __be16 off_frame_data; 291 __be32 len; 292 __be64 correlator; 293 __be16 vlan_tci; 294 __be16 rc; 295 u8 reserved[12]; 296 } __packed __aligned(8); 297 298 struct ibmvnic_generic_scrq { 299 u8 first; 300 u8 reserved[31]; 301 } __packed __aligned(8); 302 303 struct ibmvnic_rx_buff_add_desc { 304 u8 first; 305 u8 reserved[7]; 306 __be64 correlator; 307 __be32 ioba; 308 u8 map_id; 309 __be32 len:24; 310 u8 reserved2[8]; 311 } __packed __aligned(8); 312 313 struct ibmvnic_rc { 314 u8 code; /* one of enum ibmvnic_rc_codes */ 315 u8 detailed_data[3]; 316 } __packed __aligned(4); 317 318 struct ibmvnic_generic_crq { 319 u8 first; 320 u8 cmd; 321 u8 params[10]; 322 struct ibmvnic_rc rc; 323 } __packed __aligned(8); 324 325 struct ibmvnic_version_exchange { 326 u8 first; 327 u8 cmd; 328 __be16 version; 329 #define IBMVNIC_INITIAL_VERSION 1 330 u8 reserved[8]; 331 struct ibmvnic_rc rc; 332 } __packed __aligned(8); 333 334 struct ibmvnic_capability { 335 u8 first; 336 u8 cmd; 337 __be16 capability; /* one of ibmvnic_capabilities */ 338 __be64 number; 339 struct ibmvnic_rc rc; 340 } __packed __aligned(8); 341 342 struct ibmvnic_login { 343 u8 first; 344 u8 cmd; 345 u8 reserved[6]; 346 __be32 ioba; 347 __be32 len; 348 } __packed __aligned(8); 349 350 struct ibmvnic_phys_parms { 351 u8 first; 352 u8 cmd; 353 u8 flags1; 354 #define IBMVNIC_EXTERNAL_LOOPBACK 0x80 355 #define IBMVNIC_INTERNAL_LOOPBACK 0x40 356 #define IBMVNIC_PROMISC 0x20 357 #define IBMVNIC_PHYS_LINK_ACTIVE 0x10 358 #define IBMVNIC_AUTONEG_DUPLEX 0x08 359 #define IBMVNIC_FULL_DUPLEX 0x04 360 #define IBMVNIC_HALF_DUPLEX 0x02 361 #define IBMVNIC_CAN_CHG_PHYS_PARMS 0x01 362 u8 flags2; 363 #define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80 364 __be32 speed; 365 #define IBMVNIC_AUTONEG 0x80000000 366 #define IBMVNIC_10MBPS 0x40000000 367 #define IBMVNIC_100MBPS 0x20000000 368 #define IBMVNIC_1GBPS 0x10000000 369 #define IBMVNIC_10GBPS 0x08000000 370 #define IBMVNIC_40GBPS 0x04000000 371 #define IBMVNIC_100GBPS 0x02000000 372 #define IBMVNIC_25GBPS 0x01000000 373 #define IBMVNIC_50GBPS 0x00800000 374 #define IBMVNIC_200GBPS 0x00400000 375 __be32 mtu; 376 struct ibmvnic_rc rc; 377 } __packed __aligned(8); 378 379 struct ibmvnic_logical_link_state { 380 u8 first; 381 u8 cmd; 382 u8 link_state; 383 #define IBMVNIC_LOGICAL_LNK_DN 0x00 384 #define IBMVNIC_LOGICAL_LNK_UP 0x01 385 #define IBMVNIC_LOGICAL_LNK_QUERY 0xff 386 u8 reserved[9]; 387 struct ibmvnic_rc rc; 388 } __packed __aligned(8); 389 390 struct ibmvnic_query_ip_offload { 391 u8 first; 392 u8 cmd; 393 u8 reserved[2]; 394 __be32 len; 395 __be32 ioba; 396 struct ibmvnic_rc rc; 397 } __packed __aligned(8); 398 399 struct ibmvnic_control_ip_offload { 400 u8 first; 401 u8 cmd; 402 u8 reserved[2]; 403 __be32 ioba; 404 __be32 len; 405 struct ibmvnic_rc rc; 406 } __packed __aligned(8); 407 408 struct ibmvnic_request_statistics { 409 u8 first; 410 u8 cmd; 411 u8 flags; 412 #define IBMVNIC_PHYSICAL_PORT 0x80 413 u8 reserved1; 414 __be32 ioba; 415 __be32 len; 416 u8 reserved[4]; 417 } __packed __aligned(8); 418 419 struct ibmvnic_error_indication { 420 u8 first; 421 u8 cmd; 422 u8 flags; 423 #define IBMVNIC_FATAL_ERROR 0x80 424 u8 reserved1; 425 __be32 error_id; 426 __be32 detail_error_sz; 427 __be16 error_cause; 428 u8 reserved2[2]; 429 } __packed __aligned(8); 430 431 struct ibmvnic_link_state_indication { 432 u8 first; 433 u8 cmd; 434 u8 reserved1[2]; 435 u8 phys_link_state; 436 u8 logical_link_state; 437 u8 reserved2[10]; 438 } __packed __aligned(8); 439 440 struct ibmvnic_change_mac_addr { 441 u8 first; 442 u8 cmd; 443 u8 mac_addr[6]; 444 u8 reserved[4]; 445 struct ibmvnic_rc rc; 446 } __packed __aligned(8); 447 448 struct ibmvnic_multicast_ctrl { 449 u8 first; 450 u8 cmd; 451 u8 mac_addr[6]; 452 u8 flags; 453 #define IBMVNIC_ENABLE_MC 0x80 454 #define IBMVNIC_DISABLE_MC 0x40 455 #define IBMVNIC_ENABLE_ALL 0x20 456 #define IBMVNIC_DISABLE_ALL 0x10 457 u8 reserved1; 458 __be16 reserved2; /* was num_enabled_mc_addr; */ 459 struct ibmvnic_rc rc; 460 } __packed __aligned(8); 461 462 struct ibmvnic_get_vpd_size { 463 u8 first; 464 u8 cmd; 465 u8 reserved[14]; 466 } __packed __aligned(8); 467 468 struct ibmvnic_get_vpd_size_rsp { 469 u8 first; 470 u8 cmd; 471 u8 reserved[2]; 472 __be64 len; 473 struct ibmvnic_rc rc; 474 } __packed __aligned(8); 475 476 struct ibmvnic_get_vpd { 477 u8 first; 478 u8 cmd; 479 u8 reserved1[2]; 480 __be32 ioba; 481 __be32 len; 482 u8 reserved[4]; 483 } __packed __aligned(8); 484 485 struct ibmvnic_get_vpd_rsp { 486 u8 first; 487 u8 cmd; 488 u8 reserved[10]; 489 struct ibmvnic_rc rc; 490 } __packed __aligned(8); 491 492 struct ibmvnic_acl_change_indication { 493 u8 first; 494 u8 cmd; 495 __be16 change_type; 496 #define IBMVNIC_MAC_ACL 0 497 #define IBMVNIC_VLAN_ACL 1 498 u8 reserved[12]; 499 } __packed __aligned(8); 500 501 struct ibmvnic_acl_query { 502 u8 first; 503 u8 cmd; 504 u8 reserved1[2]; 505 __be32 ioba; 506 __be32 len; 507 u8 reserved2[4]; 508 } __packed __aligned(8); 509 510 struct ibmvnic_tune { 511 u8 first; 512 u8 cmd; 513 u8 reserved1[2]; 514 __be32 ioba; 515 __be32 len; 516 u8 reserved2[4]; 517 } __packed __aligned(8); 518 519 struct ibmvnic_request_map { 520 u8 first; 521 u8 cmd; 522 u8 reserved1; 523 u8 map_id; 524 __be32 ioba; 525 __be32 len; 526 u8 reserved2[4]; 527 } __packed __aligned(8); 528 529 struct ibmvnic_request_map_rsp { 530 u8 first; 531 u8 cmd; 532 u8 reserved1; 533 u8 map_id; 534 u8 reserved2[8]; 535 struct ibmvnic_rc rc; 536 } __packed __aligned(8); 537 538 struct ibmvnic_request_unmap { 539 u8 first; 540 u8 cmd; 541 u8 reserved1; 542 u8 map_id; 543 u8 reserved2[12]; 544 } __packed __aligned(8); 545 546 struct ibmvnic_request_unmap_rsp { 547 u8 first; 548 u8 cmd; 549 u8 reserved1; 550 u8 map_id; 551 u8 reserved2[8]; 552 struct ibmvnic_rc rc; 553 } __packed __aligned(8); 554 555 struct ibmvnic_query_map { 556 u8 first; 557 u8 cmd; 558 u8 reserved[14]; 559 } __packed __aligned(8); 560 561 struct ibmvnic_query_map_rsp { 562 u8 first; 563 u8 cmd; 564 u8 reserved; 565 u8 page_size; 566 __be32 tot_pages; 567 __be32 free_pages; 568 struct ibmvnic_rc rc; 569 } __packed __aligned(8); 570 571 union ibmvnic_crq { 572 struct ibmvnic_generic_crq generic; 573 struct ibmvnic_version_exchange version_exchange; 574 struct ibmvnic_version_exchange version_exchange_rsp; 575 struct ibmvnic_capability query_capability; 576 struct ibmvnic_capability query_capability_rsp; 577 struct ibmvnic_capability request_capability; 578 struct ibmvnic_capability request_capability_rsp; 579 struct ibmvnic_login login; 580 struct ibmvnic_generic_crq login_rsp; 581 struct ibmvnic_phys_parms query_phys_parms; 582 struct ibmvnic_phys_parms query_phys_parms_rsp; 583 struct ibmvnic_phys_parms query_phys_capabilities; 584 struct ibmvnic_phys_parms query_phys_capabilities_rsp; 585 struct ibmvnic_phys_parms set_phys_parms; 586 struct ibmvnic_phys_parms set_phys_parms_rsp; 587 struct ibmvnic_logical_link_state logical_link_state; 588 struct ibmvnic_logical_link_state logical_link_state_rsp; 589 struct ibmvnic_query_ip_offload query_ip_offload; 590 struct ibmvnic_query_ip_offload query_ip_offload_rsp; 591 struct ibmvnic_control_ip_offload control_ip_offload; 592 struct ibmvnic_control_ip_offload control_ip_offload_rsp; 593 struct ibmvnic_request_statistics request_statistics; 594 struct ibmvnic_generic_crq request_statistics_rsp; 595 struct ibmvnic_error_indication error_indication; 596 struct ibmvnic_link_state_indication link_state_indication; 597 struct ibmvnic_change_mac_addr change_mac_addr; 598 struct ibmvnic_change_mac_addr change_mac_addr_rsp; 599 struct ibmvnic_multicast_ctrl multicast_ctrl; 600 struct ibmvnic_multicast_ctrl multicast_ctrl_rsp; 601 struct ibmvnic_get_vpd_size get_vpd_size; 602 struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp; 603 struct ibmvnic_get_vpd get_vpd; 604 struct ibmvnic_get_vpd_rsp get_vpd_rsp; 605 struct ibmvnic_acl_change_indication acl_change_indication; 606 struct ibmvnic_acl_query acl_query; 607 struct ibmvnic_generic_crq acl_query_rsp; 608 struct ibmvnic_tune tune; 609 struct ibmvnic_generic_crq tune_rsp; 610 struct ibmvnic_request_map request_map; 611 struct ibmvnic_request_map_rsp request_map_rsp; 612 struct ibmvnic_request_unmap request_unmap; 613 struct ibmvnic_request_unmap_rsp request_unmap_rsp; 614 struct ibmvnic_query_map query_map; 615 struct ibmvnic_query_map_rsp query_map_rsp; 616 }; 617 618 enum ibmvnic_rc_codes { 619 SUCCESS = 0, 620 PARTIALSUCCESS = 1, 621 PERMISSION = 2, 622 NOMEMORY = 3, 623 PARAMETER = 4, 624 UNKNOWNCOMMAND = 5, 625 ABORTED = 6, 626 INVALIDSTATE = 7, 627 INVALIDIOBA = 8, 628 INVALIDLENGTH = 9, 629 UNSUPPORTEDOPTION = 10, 630 }; 631 632 enum ibmvnic_capabilities { 633 MIN_TX_QUEUES = 1, 634 MIN_RX_QUEUES = 2, 635 MIN_RX_ADD_QUEUES = 3, 636 MAX_TX_QUEUES = 4, 637 MAX_RX_QUEUES = 5, 638 MAX_RX_ADD_QUEUES = 6, 639 REQ_TX_QUEUES = 7, 640 REQ_RX_QUEUES = 8, 641 REQ_RX_ADD_QUEUES = 9, 642 MIN_TX_ENTRIES_PER_SUBCRQ = 10, 643 MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11, 644 MAX_TX_ENTRIES_PER_SUBCRQ = 12, 645 MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13, 646 REQ_TX_ENTRIES_PER_SUBCRQ = 14, 647 REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15, 648 TCP_IP_OFFLOAD = 16, 649 PROMISC_REQUESTED = 17, 650 PROMISC_SUPPORTED = 18, 651 MIN_MTU = 19, 652 MAX_MTU = 20, 653 REQ_MTU = 21, 654 MAX_MULTICAST_FILTERS = 22, 655 VLAN_HEADER_INSERTION = 23, 656 RX_VLAN_HEADER_INSERTION = 24, 657 MAX_TX_SG_ENTRIES = 25, 658 RX_SG_SUPPORTED = 26, 659 RX_SG_REQUESTED = 27, 660 OPT_TX_COMP_SUB_QUEUES = 28, 661 OPT_RX_COMP_QUEUES = 29, 662 OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30, 663 OPT_TX_ENTRIES_PER_SUBCRQ = 31, 664 OPT_RXBA_ENTRIES_PER_SUBCRQ = 32, 665 TX_RX_DESC_REQ = 33, 666 }; 667 668 enum ibmvnic_error_cause { 669 ADAPTER_PROBLEM = 0, 670 BUS_PROBLEM = 1, 671 FW_PROBLEM = 2, 672 DD_PROBLEM = 3, 673 EEH_RECOVERY = 4, 674 FW_UPDATED = 5, 675 LOW_MEMORY = 6, 676 }; 677 678 enum ibmvnic_commands { 679 VERSION_EXCHANGE = 0x01, 680 VERSION_EXCHANGE_RSP = 0x81, 681 QUERY_CAPABILITY = 0x02, 682 QUERY_CAPABILITY_RSP = 0x82, 683 REQUEST_CAPABILITY = 0x03, 684 REQUEST_CAPABILITY_RSP = 0x83, 685 LOGIN = 0x04, 686 LOGIN_RSP = 0x84, 687 QUERY_PHYS_PARMS = 0x05, 688 QUERY_PHYS_PARMS_RSP = 0x85, 689 QUERY_PHYS_CAPABILITIES = 0x06, 690 QUERY_PHYS_CAPABILITIES_RSP = 0x86, 691 SET_PHYS_PARMS = 0x07, 692 SET_PHYS_PARMS_RSP = 0x87, 693 ERROR_INDICATION = 0x08, 694 LOGICAL_LINK_STATE = 0x0C, 695 LOGICAL_LINK_STATE_RSP = 0x8C, 696 REQUEST_STATISTICS = 0x0D, 697 REQUEST_STATISTICS_RSP = 0x8D, 698 COLLECT_FW_TRACE = 0x11, 699 COLLECT_FW_TRACE_RSP = 0x91, 700 LINK_STATE_INDICATION = 0x12, 701 CHANGE_MAC_ADDR = 0x13, 702 CHANGE_MAC_ADDR_RSP = 0x93, 703 MULTICAST_CTRL = 0x14, 704 MULTICAST_CTRL_RSP = 0x94, 705 GET_VPD_SIZE = 0x15, 706 GET_VPD_SIZE_RSP = 0x95, 707 GET_VPD = 0x16, 708 GET_VPD_RSP = 0x96, 709 TUNE = 0x17, 710 TUNE_RSP = 0x97, 711 QUERY_IP_OFFLOAD = 0x18, 712 QUERY_IP_OFFLOAD_RSP = 0x98, 713 CONTROL_IP_OFFLOAD = 0x19, 714 CONTROL_IP_OFFLOAD_RSP = 0x99, 715 ACL_CHANGE_INDICATION = 0x1A, 716 ACL_QUERY = 0x1B, 717 ACL_QUERY_RSP = 0x9B, 718 QUERY_MAP = 0x1D, 719 QUERY_MAP_RSP = 0x9D, 720 REQUEST_MAP = 0x1E, 721 REQUEST_MAP_RSP = 0x9E, 722 REQUEST_UNMAP = 0x1F, 723 REQUEST_UNMAP_RSP = 0x9F, 724 VLAN_CTRL = 0x20, 725 VLAN_CTRL_RSP = 0xA0, 726 }; 727 728 enum ibmvnic_crq_type { 729 IBMVNIC_CRQ_CMD = 0x80, 730 IBMVNIC_CRQ_CMD_RSP = 0x80, 731 IBMVNIC_CRQ_INIT_CMD = 0xC0, 732 IBMVNIC_CRQ_INIT_RSP = 0xC0, 733 IBMVNIC_CRQ_XPORT_EVENT = 0xFF, 734 }; 735 736 enum ibmvfc_crq_format { 737 IBMVNIC_CRQ_INIT = 0x01, 738 IBMVNIC_CRQ_INIT_COMPLETE = 0x02, 739 IBMVNIC_PARTITION_MIGRATED = 0x06, 740 IBMVNIC_DEVICE_FAILOVER = 0x08, 741 }; 742 743 struct ibmvnic_crq_queue { 744 union ibmvnic_crq *msgs; 745 int size, cur; 746 dma_addr_t msg_token; 747 /* Used for serialization of msgs, cur */ 748 spinlock_t lock; 749 bool active; 750 char name[32]; 751 }; 752 753 union sub_crq { 754 struct ibmvnic_generic_scrq generic; 755 struct ibmvnic_tx_comp_desc tx_comp; 756 struct ibmvnic_tx_desc v1; 757 struct ibmvnic_hdr_desc hdr; 758 struct ibmvnic_hdr_ext_desc hdr_ext; 759 struct ibmvnic_sge_desc sge; 760 struct ibmvnic_rx_comp_desc rx_comp; 761 struct ibmvnic_rx_buff_add_desc rx_add; 762 }; 763 764 struct ibmvnic_ind_xmit_queue { 765 union sub_crq *indir_arr; 766 dma_addr_t indir_dma; 767 int index; 768 }; 769 770 struct ibmvnic_sub_crq_queue { 771 union sub_crq *msgs; 772 int size, cur; 773 dma_addr_t msg_token; 774 unsigned long crq_num; 775 unsigned long hw_irq; 776 unsigned int irq; 777 unsigned int pool_index; 778 int scrq_num; 779 /* Used for serialization of msgs, cur */ 780 spinlock_t lock; 781 struct sk_buff *rx_skb_top; 782 struct ibmvnic_adapter *adapter; 783 struct ibmvnic_ind_xmit_queue ind_buf; 784 atomic_t used; 785 char name[32]; 786 u64 handle; 787 } ____cacheline_aligned; 788 789 struct ibmvnic_long_term_buff { 790 unsigned char *buff; 791 dma_addr_t addr; 792 u64 size; 793 u8 map_id; 794 }; 795 796 struct ibmvnic_tx_buff { 797 struct sk_buff *skb; 798 int index; 799 int pool_index; 800 int num_entries; 801 }; 802 803 struct ibmvnic_tx_pool { 804 struct ibmvnic_tx_buff *tx_buff; 805 int *free_map; 806 int consumer_index; 807 int producer_index; 808 struct ibmvnic_long_term_buff long_term_buff; 809 int num_buffers; 810 int buf_size; 811 } ____cacheline_aligned; 812 813 struct ibmvnic_rx_buff { 814 struct sk_buff *skb; 815 dma_addr_t dma; 816 unsigned char *data; 817 int size; 818 int pool_index; 819 }; 820 821 struct ibmvnic_rx_pool { 822 struct ibmvnic_rx_buff *rx_buff; 823 int size; /* # of buffers in the pool */ 824 int index; 825 int buff_size; 826 atomic_t available; 827 int *free_map; 828 int next_free; 829 int next_alloc; 830 int active; 831 struct ibmvnic_long_term_buff long_term_buff; 832 } ____cacheline_aligned; 833 834 struct ibmvnic_vpd { 835 unsigned char *buff; 836 dma_addr_t dma_addr; 837 u64 len; 838 }; 839 840 enum vnic_state {VNIC_PROBING = 1, 841 VNIC_PROBED, 842 VNIC_OPENING, 843 VNIC_OPEN, 844 VNIC_CLOSING, 845 VNIC_CLOSED, 846 VNIC_REMOVING, 847 VNIC_REMOVED, 848 VNIC_DOWN}; 849 850 enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1, 851 VNIC_RESET_MOBILITY, 852 VNIC_RESET_FATAL, 853 VNIC_RESET_NON_FATAL, 854 VNIC_RESET_TIMEOUT, 855 VNIC_RESET_CHANGE_PARAM, 856 VNIC_RESET_PASSIVE_INIT}; 857 858 struct ibmvnic_rwi { 859 enum ibmvnic_reset_reason reset_reason; 860 struct list_head list; 861 }; 862 863 struct ibmvnic_tunables { 864 u64 rx_queues; 865 u64 tx_queues; 866 u64 rx_entries; 867 u64 tx_entries; 868 u64 mtu; 869 }; 870 871 struct ibmvnic_adapter { 872 struct vio_dev *vdev; 873 struct net_device *netdev; 874 struct ibmvnic_crq_queue crq; 875 u8 mac_addr[ETH_ALEN]; 876 struct ibmvnic_query_ip_offload_buffer ip_offload_buf; 877 dma_addr_t ip_offload_tok; 878 struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; 879 dma_addr_t ip_offload_ctrl_tok; 880 u32 msg_enable; 881 882 /* Vital Product Data (VPD) */ 883 struct ibmvnic_vpd *vpd; 884 char fw_version[32]; 885 886 /* Statistics */ 887 struct ibmvnic_statistics stats; 888 dma_addr_t stats_token; 889 struct completion stats_done; 890 int replenish_no_mem; 891 int replenish_add_buff_success; 892 int replenish_add_buff_failure; 893 int replenish_task_cycles; 894 int tx_send_failed; 895 int tx_map_failed; 896 897 struct ibmvnic_tx_queue_stats *tx_stats_buffers; 898 struct ibmvnic_rx_queue_stats *rx_stats_buffers; 899 900 int phys_link_state; 901 int logical_link_state; 902 903 u32 speed; 904 u8 duplex; 905 906 /* login data */ 907 struct ibmvnic_login_buffer *login_buf; 908 dma_addr_t login_buf_token; 909 int login_buf_sz; 910 911 struct ibmvnic_login_rsp_buffer *login_rsp_buf; 912 dma_addr_t login_rsp_buf_token; 913 int login_rsp_buf_sz; 914 915 atomic_t running_cap_crqs; 916 917 struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned; 918 struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned; 919 920 /* rx structs */ 921 struct napi_struct *napi; 922 struct ibmvnic_rx_pool *rx_pool; 923 u64 promisc; 924 925 struct ibmvnic_tx_pool *tx_pool; 926 struct ibmvnic_tx_pool *tso_pool; 927 struct completion probe_done; 928 struct completion init_done; 929 int init_done_rc; 930 931 struct completion fw_done; 932 /* Used for serialization of device commands */ 933 struct mutex fw_lock; 934 int fw_done_rc; 935 936 struct completion reset_done; 937 int reset_done_rc; 938 bool wait_for_reset; 939 940 /* partner capabilities */ 941 u64 min_tx_queues; 942 u64 min_rx_queues; 943 u64 min_rx_add_queues; 944 u64 max_tx_queues; 945 u64 max_rx_queues; 946 u64 max_rx_add_queues; 947 u64 req_tx_queues; 948 u64 req_rx_queues; 949 u64 req_rx_add_queues; 950 u64 min_tx_entries_per_subcrq; 951 u64 min_rx_add_entries_per_subcrq; 952 u64 max_tx_entries_per_subcrq; 953 u64 max_rx_add_entries_per_subcrq; 954 u64 req_tx_entries_per_subcrq; 955 u64 req_rx_add_entries_per_subcrq; 956 u64 tcp_ip_offload; 957 u64 promisc_requested; 958 u64 promisc_supported; 959 u64 min_mtu; 960 u64 max_mtu; 961 u64 req_mtu; 962 u64 prev_mtu; 963 u64 max_multicast_filters; 964 u64 vlan_header_insertion; 965 u64 rx_vlan_header_insertion; 966 u64 max_tx_sg_entries; 967 u64 rx_sg_supported; 968 u64 rx_sg_requested; 969 u64 opt_tx_comp_sub_queues; 970 u64 opt_rx_comp_queues; 971 u64 opt_rx_bufadd_q_per_rx_comp_q; 972 u64 opt_tx_entries_per_subcrq; 973 u64 opt_rxba_entries_per_subcrq; 974 __be64 tx_rx_desc_req; 975 #define MAX_MAP_ID 255 976 DECLARE_BITMAP(map_ids, MAX_MAP_ID); 977 u32 num_active_rx_scrqs; 978 u32 num_active_rx_pools; 979 u32 num_active_rx_napi; 980 u32 num_active_tx_scrqs; 981 u32 num_active_tx_pools; 982 983 u32 prev_rx_pool_size; 984 u32 prev_tx_pool_size; 985 u32 cur_rx_buf_sz; 986 u32 prev_rx_buf_sz; 987 988 struct tasklet_struct tasklet; 989 enum vnic_state state; 990 /* Used for serialization of state field. When taking both state 991 * and rwi locks, take state lock first. 992 */ 993 spinlock_t state_lock; 994 enum ibmvnic_reset_reason reset_reason; 995 struct list_head rwi_list; 996 /* Used for serialization of rwi_list. When taking both state 997 * and rwi locks, take state lock first 998 */ 999 spinlock_t rwi_lock; 1000 struct work_struct ibmvnic_reset; 1001 struct delayed_work ibmvnic_delayed_reset; 1002 unsigned long resetting; 1003 /* last device reset time */ 1004 unsigned long last_reset_time; 1005 1006 bool napi_enabled; 1007 bool from_passive_init; 1008 bool login_pending; 1009 /* protected by rcu */ 1010 bool tx_queues_active; 1011 bool failover_pending; 1012 bool force_reset_recovery; 1013 1014 struct ibmvnic_tunables desired; 1015 struct ibmvnic_tunables fallback; 1016 }; 1017