1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Huawei HiNIC PCI Express Linux driver 4 * Copyright(c) 2017 Huawei Technologies Co., Ltd 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/moduleparam.h> 10 #include <linux/pci.h> 11 #include <linux/device.h> 12 #include <linux/errno.h> 13 #include <linux/types.h> 14 #include <linux/etherdevice.h> 15 #include <linux/netdevice.h> 16 #include <linux/slab.h> 17 #include <linux/if_vlan.h> 18 #include <linux/semaphore.h> 19 #include <linux/workqueue.h> 20 #include <net/ip.h> 21 #include <linux/bitops.h> 22 #include <linux/bitmap.h> 23 #include <linux/delay.h> 24 #include <linux/err.h> 25 26 #include "hinic_hw_qp.h" 27 #include "hinic_hw_dev.h" 28 #include "hinic_port.h" 29 #include "hinic_tx.h" 30 #include "hinic_rx.h" 31 #include "hinic_dev.h" 32 #include "hinic_sriov.h" 33 34 MODULE_AUTHOR("Huawei Technologies CO., Ltd"); 35 MODULE_DESCRIPTION("Huawei Intelligent NIC driver"); 36 MODULE_LICENSE("GPL"); 37 38 static unsigned int tx_weight = 64; 39 module_param(tx_weight, uint, 0644); 40 MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)"); 41 42 static unsigned int rx_weight = 64; 43 module_param(rx_weight, uint, 0644); 44 MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); 45 46 #define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822 47 #define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200 48 #define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205 49 #define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210 50 #define HINIC_DEV_ID_VF 0x375e 51 52 #define HINIC_WQ_NAME "hinic_dev" 53 54 #define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 55 NETIF_MSG_IFUP | \ 56 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) 57 58 #define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8 59 60 #define HINIC_LRO_RX_TIMER_DEFAULT 16 61 62 #define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8) 63 64 #define work_to_rx_mode_work(work) \ 65 container_of(work, struct hinic_rx_mode_work, work) 66 67 #define rx_mode_work_to_nic_dev(rx_mode_work) \ 68 container_of(rx_mode_work, struct hinic_dev, rx_mode_work) 69 70 #define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000 71 72 static int change_mac_addr(struct net_device *netdev, const u8 *addr); 73 74 static int set_features(struct hinic_dev *nic_dev, 75 netdev_features_t pre_features, 76 netdev_features_t features, bool force_change); 77 78 static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq) 79 { 80 struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats; 81 struct hinic_rxq_stats rx_stats; 82 83 u64_stats_init(&rx_stats.syncp); 84 85 hinic_rxq_get_stats(rxq, &rx_stats); 86 87 u64_stats_update_begin(&nic_rx_stats->syncp); 88 nic_rx_stats->bytes += rx_stats.bytes; 89 nic_rx_stats->pkts += rx_stats.pkts; 90 nic_rx_stats->errors += rx_stats.errors; 91 nic_rx_stats->csum_errors += rx_stats.csum_errors; 92 nic_rx_stats->other_errors += rx_stats.other_errors; 93 u64_stats_update_end(&nic_rx_stats->syncp); 94 95 hinic_rxq_clean_stats(rxq); 96 } 97 98 static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq) 99 { 100 struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats; 101 struct hinic_txq_stats tx_stats; 102 103 u64_stats_init(&tx_stats.syncp); 104 105 hinic_txq_get_stats(txq, &tx_stats); 106 107 u64_stats_update_begin(&nic_tx_stats->syncp); 108 nic_tx_stats->bytes += tx_stats.bytes; 109 nic_tx_stats->pkts += tx_stats.pkts; 110 nic_tx_stats->tx_busy += tx_stats.tx_busy; 111 nic_tx_stats->tx_wake += tx_stats.tx_wake; 112 nic_tx_stats->tx_dropped += tx_stats.tx_dropped; 113 nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts; 114 u64_stats_update_end(&nic_tx_stats->syncp); 115 116 hinic_txq_clean_stats(txq); 117 } 118 119 static void update_nic_stats(struct hinic_dev *nic_dev) 120 { 121 int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); 122 123 for (i = 0; i < num_qps; i++) 124 update_rx_stats(nic_dev, &nic_dev->rxqs[i]); 125 126 for (i = 0; i < num_qps; i++) 127 update_tx_stats(nic_dev, &nic_dev->txqs[i]); 128 } 129 130 /** 131 * create_txqs - Create the Logical Tx Queues of specific NIC device 132 * @nic_dev: the specific NIC device 133 * 134 * Return 0 - Success, negative - Failure 135 **/ 136 static int create_txqs(struct hinic_dev *nic_dev) 137 { 138 int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); 139 struct net_device *netdev = nic_dev->netdev; 140 size_t txq_size; 141 142 if (nic_dev->txqs) 143 return -EINVAL; 144 145 txq_size = num_txqs * sizeof(*nic_dev->txqs); 146 nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL); 147 if (!nic_dev->txqs) 148 return -ENOMEM; 149 150 for (i = 0; i < num_txqs; i++) { 151 struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i); 152 153 err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev); 154 if (err) { 155 netif_err(nic_dev, drv, netdev, 156 "Failed to init Txq\n"); 157 goto err_init_txq; 158 } 159 } 160 161 return 0; 162 163 err_init_txq: 164 for (j = 0; j < i; j++) 165 hinic_clean_txq(&nic_dev->txqs[j]); 166 167 devm_kfree(&netdev->dev, nic_dev->txqs); 168 return err; 169 } 170 171 /** 172 * free_txqs - Free the Logical Tx Queues of specific NIC device 173 * @nic_dev: the specific NIC device 174 **/ 175 static void free_txqs(struct hinic_dev *nic_dev) 176 { 177 int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); 178 struct net_device *netdev = nic_dev->netdev; 179 180 if (!nic_dev->txqs) 181 return; 182 183 for (i = 0; i < num_txqs; i++) 184 hinic_clean_txq(&nic_dev->txqs[i]); 185 186 devm_kfree(&netdev->dev, nic_dev->txqs); 187 nic_dev->txqs = NULL; 188 } 189 190 /** 191 * create_txqs - Create the Logical Rx Queues of specific NIC device 192 * @nic_dev: the specific NIC device 193 * 194 * Return 0 - Success, negative - Failure 195 **/ 196 static int create_rxqs(struct hinic_dev *nic_dev) 197 { 198 int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); 199 struct net_device *netdev = nic_dev->netdev; 200 size_t rxq_size; 201 202 if (nic_dev->rxqs) 203 return -EINVAL; 204 205 rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); 206 nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL); 207 if (!nic_dev->rxqs) 208 return -ENOMEM; 209 210 for (i = 0; i < num_rxqs; i++) { 211 struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i); 212 213 err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev); 214 if (err) { 215 netif_err(nic_dev, drv, netdev, 216 "Failed to init rxq\n"); 217 goto err_init_rxq; 218 } 219 } 220 221 return 0; 222 223 err_init_rxq: 224 for (j = 0; j < i; j++) 225 hinic_clean_rxq(&nic_dev->rxqs[j]); 226 227 devm_kfree(&netdev->dev, nic_dev->rxqs); 228 return err; 229 } 230 231 /** 232 * free_txqs - Free the Logical Rx Queues of specific NIC device 233 * @nic_dev: the specific NIC device 234 **/ 235 static void free_rxqs(struct hinic_dev *nic_dev) 236 { 237 int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); 238 struct net_device *netdev = nic_dev->netdev; 239 240 if (!nic_dev->rxqs) 241 return; 242 243 for (i = 0; i < num_rxqs; i++) 244 hinic_clean_rxq(&nic_dev->rxqs[i]); 245 246 devm_kfree(&netdev->dev, nic_dev->rxqs); 247 nic_dev->rxqs = NULL; 248 } 249 250 static int hinic_configure_max_qnum(struct hinic_dev *nic_dev) 251 { 252 int err; 253 254 err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps); 255 if (err) 256 return err; 257 258 return 0; 259 } 260 261 static int hinic_rss_init(struct hinic_dev *nic_dev) 262 { 263 u8 default_rss_key[HINIC_RSS_KEY_SIZE]; 264 u8 tmpl_idx = nic_dev->rss_tmpl_idx; 265 u32 *indir_tbl; 266 int err, i; 267 268 indir_tbl = kcalloc(HINIC_RSS_INDIR_SIZE, sizeof(u32), GFP_KERNEL); 269 if (!indir_tbl) 270 return -ENOMEM; 271 272 netdev_rss_key_fill(default_rss_key, sizeof(default_rss_key)); 273 for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) 274 indir_tbl[i] = ethtool_rxfh_indir_default(i, nic_dev->num_rss); 275 276 err = hinic_rss_set_template_tbl(nic_dev, tmpl_idx, default_rss_key); 277 if (err) 278 goto out; 279 280 err = hinic_rss_set_indir_tbl(nic_dev, tmpl_idx, indir_tbl); 281 if (err) 282 goto out; 283 284 err = hinic_set_rss_type(nic_dev, tmpl_idx, nic_dev->rss_type); 285 if (err) 286 goto out; 287 288 err = hinic_rss_set_hash_engine(nic_dev, tmpl_idx, 289 nic_dev->rss_hash_engine); 290 if (err) 291 goto out; 292 293 err = hinic_rss_cfg(nic_dev, 1, tmpl_idx); 294 if (err) 295 goto out; 296 297 out: 298 kfree(indir_tbl); 299 return err; 300 } 301 302 static void hinic_rss_deinit(struct hinic_dev *nic_dev) 303 { 304 hinic_rss_cfg(nic_dev, 0, nic_dev->rss_tmpl_idx); 305 } 306 307 static void hinic_init_rss_parameters(struct hinic_dev *nic_dev) 308 { 309 nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR; 310 nic_dev->rss_type.tcp_ipv6_ext = 1; 311 nic_dev->rss_type.ipv6_ext = 1; 312 nic_dev->rss_type.tcp_ipv6 = 1; 313 nic_dev->rss_type.ipv6 = 1; 314 nic_dev->rss_type.tcp_ipv4 = 1; 315 nic_dev->rss_type.ipv4 = 1; 316 nic_dev->rss_type.udp_ipv6 = 1; 317 nic_dev->rss_type.udp_ipv4 = 1; 318 } 319 320 static void hinic_enable_rss(struct hinic_dev *nic_dev) 321 { 322 struct net_device *netdev = nic_dev->netdev; 323 struct hinic_hwdev *hwdev = nic_dev->hwdev; 324 struct hinic_hwif *hwif = hwdev->hwif; 325 struct pci_dev *pdev = hwif->pdev; 326 int i, node, err = 0; 327 u16 num_cpus = 0; 328 329 if (nic_dev->max_qps <= 1) { 330 nic_dev->flags &= ~HINIC_RSS_ENABLE; 331 nic_dev->rss_limit = nic_dev->max_qps; 332 nic_dev->num_qps = nic_dev->max_qps; 333 nic_dev->num_rss = nic_dev->max_qps; 334 335 return; 336 } 337 338 err = hinic_rss_template_alloc(nic_dev, &nic_dev->rss_tmpl_idx); 339 if (err) { 340 netif_err(nic_dev, drv, netdev, 341 "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n"); 342 nic_dev->flags &= ~HINIC_RSS_ENABLE; 343 nic_dev->max_qps = 1; 344 nic_dev->rss_limit = nic_dev->max_qps; 345 nic_dev->num_qps = nic_dev->max_qps; 346 nic_dev->num_rss = nic_dev->max_qps; 347 348 return; 349 } 350 351 nic_dev->flags |= HINIC_RSS_ENABLE; 352 353 for (i = 0; i < num_online_cpus(); i++) { 354 node = cpu_to_node(i); 355 if (node == dev_to_node(&pdev->dev)) 356 num_cpus++; 357 } 358 359 if (!num_cpus) 360 num_cpus = num_online_cpus(); 361 362 nic_dev->num_qps = hinic_hwdev_num_qps(hwdev); 363 nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus); 364 365 nic_dev->rss_limit = nic_dev->num_qps; 366 nic_dev->num_rss = nic_dev->num_qps; 367 368 hinic_init_rss_parameters(nic_dev); 369 err = hinic_rss_init(nic_dev); 370 if (err) 371 netif_err(nic_dev, drv, netdev, "Failed to init rss\n"); 372 } 373 374 int hinic_open(struct net_device *netdev) 375 { 376 struct hinic_dev *nic_dev = netdev_priv(netdev); 377 enum hinic_port_link_state link_state; 378 int err, ret; 379 380 if (!(nic_dev->flags & HINIC_INTF_UP)) { 381 err = hinic_hwdev_ifup(nic_dev->hwdev, nic_dev->sq_depth, 382 nic_dev->rq_depth); 383 if (err) { 384 netif_err(nic_dev, drv, netdev, 385 "Failed - HW interface up\n"); 386 return err; 387 } 388 } 389 390 err = create_txqs(nic_dev); 391 if (err) { 392 netif_err(nic_dev, drv, netdev, 393 "Failed to create Tx queues\n"); 394 goto err_create_txqs; 395 } 396 397 err = create_rxqs(nic_dev); 398 if (err) { 399 netif_err(nic_dev, drv, netdev, 400 "Failed to create Rx queues\n"); 401 goto err_create_rxqs; 402 } 403 404 hinic_enable_rss(nic_dev); 405 406 err = hinic_configure_max_qnum(nic_dev); 407 if (err) { 408 netif_err(nic_dev, drv, nic_dev->netdev, 409 "Failed to configure the maximum number of queues\n"); 410 goto err_port_state; 411 } 412 413 netif_set_real_num_tx_queues(netdev, nic_dev->num_qps); 414 netif_set_real_num_rx_queues(netdev, nic_dev->num_qps); 415 416 err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE); 417 if (err) { 418 netif_err(nic_dev, drv, netdev, 419 "Failed to set port state\n"); 420 goto err_port_state; 421 } 422 423 err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE); 424 if (err) { 425 netif_err(nic_dev, drv, netdev, 426 "Failed to set func port state\n"); 427 goto err_func_port_state; 428 } 429 430 down(&nic_dev->mgmt_lock); 431 432 err = hinic_port_link_state(nic_dev, &link_state); 433 if (err) { 434 netif_err(nic_dev, drv, netdev, "Failed to get link state\n"); 435 goto err_port_link; 436 } 437 438 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 439 hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state); 440 441 if (link_state == HINIC_LINK_STATE_UP) 442 nic_dev->flags |= HINIC_LINK_UP; 443 444 nic_dev->flags |= HINIC_INTF_UP; 445 446 if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == 447 (HINIC_LINK_UP | HINIC_INTF_UP)) { 448 netif_info(nic_dev, drv, netdev, "link + intf UP\n"); 449 netif_carrier_on(netdev); 450 netif_tx_wake_all_queues(netdev); 451 } 452 453 up(&nic_dev->mgmt_lock); 454 455 netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n"); 456 return 0; 457 458 err_port_link: 459 up(&nic_dev->mgmt_lock); 460 ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); 461 if (ret) 462 netif_warn(nic_dev, drv, netdev, 463 "Failed to revert func port state\n"); 464 465 err_func_port_state: 466 ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); 467 if (ret) 468 netif_warn(nic_dev, drv, netdev, 469 "Failed to revert port state\n"); 470 err_port_state: 471 free_rxqs(nic_dev); 472 if (nic_dev->flags & HINIC_RSS_ENABLE) { 473 hinic_rss_deinit(nic_dev); 474 hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx); 475 } 476 477 err_create_rxqs: 478 free_txqs(nic_dev); 479 480 err_create_txqs: 481 if (!(nic_dev->flags & HINIC_INTF_UP)) 482 hinic_hwdev_ifdown(nic_dev->hwdev); 483 return err; 484 } 485 486 int hinic_close(struct net_device *netdev) 487 { 488 struct hinic_dev *nic_dev = netdev_priv(netdev); 489 unsigned int flags; 490 491 down(&nic_dev->mgmt_lock); 492 493 flags = nic_dev->flags; 494 nic_dev->flags &= ~HINIC_INTF_UP; 495 496 netif_carrier_off(netdev); 497 netif_tx_disable(netdev); 498 499 update_nic_stats(nic_dev); 500 501 up(&nic_dev->mgmt_lock); 502 503 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 504 hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0); 505 506 hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); 507 508 hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); 509 510 if (nic_dev->flags & HINIC_RSS_ENABLE) { 511 hinic_rss_deinit(nic_dev); 512 hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx); 513 } 514 515 free_rxqs(nic_dev); 516 free_txqs(nic_dev); 517 518 if (flags & HINIC_INTF_UP) 519 hinic_hwdev_ifdown(nic_dev->hwdev); 520 521 netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n"); 522 return 0; 523 } 524 525 static int hinic_change_mtu(struct net_device *netdev, int new_mtu) 526 { 527 struct hinic_dev *nic_dev = netdev_priv(netdev); 528 int err; 529 530 netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu); 531 532 err = hinic_port_set_mtu(nic_dev, new_mtu); 533 if (err) 534 netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n"); 535 else 536 netdev->mtu = new_mtu; 537 538 return err; 539 } 540 541 /** 542 * change_mac_addr - change the main mac address of network device 543 * @netdev: network device 544 * @addr: mac address to set 545 * 546 * Return 0 - Success, negative - Failure 547 **/ 548 static int change_mac_addr(struct net_device *netdev, const u8 *addr) 549 { 550 struct hinic_dev *nic_dev = netdev_priv(netdev); 551 u16 vid = 0; 552 int err; 553 554 if (!is_valid_ether_addr(addr)) 555 return -EADDRNOTAVAIL; 556 557 netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n", 558 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 559 560 down(&nic_dev->mgmt_lock); 561 562 do { 563 err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid); 564 if (err) { 565 netif_err(nic_dev, drv, netdev, 566 "Failed to delete mac\n"); 567 break; 568 } 569 570 err = hinic_port_add_mac(nic_dev, addr, vid); 571 if (err) { 572 netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); 573 break; 574 } 575 576 vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); 577 } while (vid != VLAN_N_VID); 578 579 up(&nic_dev->mgmt_lock); 580 return err; 581 } 582 583 static int hinic_set_mac_addr(struct net_device *netdev, void *addr) 584 { 585 unsigned char new_mac[ETH_ALEN]; 586 struct sockaddr *saddr = addr; 587 int err; 588 589 memcpy(new_mac, saddr->sa_data, ETH_ALEN); 590 591 err = change_mac_addr(netdev, new_mac); 592 if (!err) 593 memcpy(netdev->dev_addr, new_mac, ETH_ALEN); 594 595 return err; 596 } 597 598 /** 599 * add_mac_addr - add mac address to network device 600 * @netdev: network device 601 * @addr: mac address to add 602 * 603 * Return 0 - Success, negative - Failure 604 **/ 605 static int add_mac_addr(struct net_device *netdev, const u8 *addr) 606 { 607 struct hinic_dev *nic_dev = netdev_priv(netdev); 608 u16 vid = 0; 609 int err; 610 611 netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n", 612 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 613 614 down(&nic_dev->mgmt_lock); 615 616 do { 617 err = hinic_port_add_mac(nic_dev, addr, vid); 618 if (err) { 619 netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); 620 break; 621 } 622 623 vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); 624 } while (vid != VLAN_N_VID); 625 626 up(&nic_dev->mgmt_lock); 627 return err; 628 } 629 630 /** 631 * remove_mac_addr - remove mac address from network device 632 * @netdev: network device 633 * @addr: mac address to remove 634 * 635 * Return 0 - Success, negative - Failure 636 **/ 637 static int remove_mac_addr(struct net_device *netdev, const u8 *addr) 638 { 639 struct hinic_dev *nic_dev = netdev_priv(netdev); 640 u16 vid = 0; 641 int err; 642 643 if (!is_valid_ether_addr(addr)) 644 return -EADDRNOTAVAIL; 645 646 netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n", 647 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); 648 649 down(&nic_dev->mgmt_lock); 650 651 do { 652 err = hinic_port_del_mac(nic_dev, addr, vid); 653 if (err) { 654 netif_err(nic_dev, drv, netdev, 655 "Failed to delete mac\n"); 656 break; 657 } 658 659 vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); 660 } while (vid != VLAN_N_VID); 661 662 up(&nic_dev->mgmt_lock); 663 return err; 664 } 665 666 static int hinic_vlan_rx_add_vid(struct net_device *netdev, 667 __always_unused __be16 proto, u16 vid) 668 { 669 struct hinic_dev *nic_dev = netdev_priv(netdev); 670 int ret, err; 671 672 netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid); 673 674 down(&nic_dev->mgmt_lock); 675 676 err = hinic_port_add_vlan(nic_dev, vid); 677 if (err) { 678 netif_err(nic_dev, drv, netdev, "Failed to add vlan\n"); 679 goto err_vlan_add; 680 } 681 682 err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid); 683 if (err && err != HINIC_PF_SET_VF_ALREADY) { 684 netif_err(nic_dev, drv, netdev, "Failed to set mac\n"); 685 goto err_add_mac; 686 } 687 688 bitmap_set(nic_dev->vlan_bitmap, vid, 1); 689 690 up(&nic_dev->mgmt_lock); 691 return 0; 692 693 err_add_mac: 694 ret = hinic_port_del_vlan(nic_dev, vid); 695 if (ret) 696 netif_err(nic_dev, drv, netdev, 697 "Failed to revert by removing vlan\n"); 698 699 err_vlan_add: 700 up(&nic_dev->mgmt_lock); 701 return err; 702 } 703 704 static int hinic_vlan_rx_kill_vid(struct net_device *netdev, 705 __always_unused __be16 proto, u16 vid) 706 { 707 struct hinic_dev *nic_dev = netdev_priv(netdev); 708 int err; 709 710 netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid); 711 712 down(&nic_dev->mgmt_lock); 713 714 err = hinic_port_del_vlan(nic_dev, vid); 715 if (err) { 716 netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); 717 goto err_del_vlan; 718 } 719 720 bitmap_clear(nic_dev->vlan_bitmap, vid, 1); 721 722 up(&nic_dev->mgmt_lock); 723 return 0; 724 725 err_del_vlan: 726 up(&nic_dev->mgmt_lock); 727 return err; 728 } 729 730 static void set_rx_mode(struct work_struct *work) 731 { 732 struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work); 733 struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work); 734 735 hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode); 736 737 __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); 738 __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); 739 } 740 741 static void hinic_set_rx_mode(struct net_device *netdev) 742 { 743 struct hinic_dev *nic_dev = netdev_priv(netdev); 744 struct hinic_rx_mode_work *rx_mode_work; 745 u32 rx_mode; 746 747 rx_mode_work = &nic_dev->rx_mode_work; 748 749 rx_mode = HINIC_RX_MODE_UC | 750 HINIC_RX_MODE_MC | 751 HINIC_RX_MODE_BC; 752 753 if (netdev->flags & IFF_PROMISC) { 754 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 755 rx_mode |= HINIC_RX_MODE_PROMISC; 756 } else if (netdev->flags & IFF_ALLMULTI) { 757 rx_mode |= HINIC_RX_MODE_MC_ALL; 758 } 759 760 rx_mode_work->rx_mode = rx_mode; 761 762 queue_work(nic_dev->workq, &rx_mode_work->work); 763 } 764 765 static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 766 { 767 struct hinic_dev *nic_dev = netdev_priv(netdev); 768 u16 sw_pi, hw_ci, sw_ci; 769 struct hinic_sq *sq; 770 u16 num_sqs, q_id; 771 772 num_sqs = hinic_hwdev_num_qps(nic_dev->hwdev); 773 774 netif_err(nic_dev, drv, netdev, "Tx timeout\n"); 775 776 for (q_id = 0; q_id < num_sqs; q_id++) { 777 if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) 778 continue; 779 780 sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id); 781 sw_pi = atomic_read(&sq->wq->prod_idx) & sq->wq->mask; 782 hw_ci = be16_to_cpu(*(u16 *)(sq->hw_ci_addr)) & sq->wq->mask; 783 sw_ci = atomic_read(&sq->wq->cons_idx) & sq->wq->mask; 784 netif_err(nic_dev, drv, netdev, "Txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx\n", 785 q_id, sw_pi, hw_ci, sw_ci, 786 nic_dev->txqs[q_id].napi.state); 787 } 788 } 789 790 static void hinic_get_stats64(struct net_device *netdev, 791 struct rtnl_link_stats64 *stats) 792 { 793 struct hinic_dev *nic_dev = netdev_priv(netdev); 794 struct hinic_rxq_stats *nic_rx_stats; 795 struct hinic_txq_stats *nic_tx_stats; 796 797 nic_rx_stats = &nic_dev->rx_stats; 798 nic_tx_stats = &nic_dev->tx_stats; 799 800 down(&nic_dev->mgmt_lock); 801 802 if (nic_dev->flags & HINIC_INTF_UP) 803 update_nic_stats(nic_dev); 804 805 up(&nic_dev->mgmt_lock); 806 807 stats->rx_bytes = nic_rx_stats->bytes; 808 stats->rx_packets = nic_rx_stats->pkts; 809 stats->rx_errors = nic_rx_stats->errors; 810 811 stats->tx_bytes = nic_tx_stats->bytes; 812 stats->tx_packets = nic_tx_stats->pkts; 813 stats->tx_errors = nic_tx_stats->tx_dropped; 814 } 815 816 static int hinic_set_features(struct net_device *netdev, 817 netdev_features_t features) 818 { 819 struct hinic_dev *nic_dev = netdev_priv(netdev); 820 821 return set_features(nic_dev, nic_dev->netdev->features, 822 features, false); 823 } 824 825 static netdev_features_t hinic_fix_features(struct net_device *netdev, 826 netdev_features_t features) 827 { 828 struct hinic_dev *nic_dev = netdev_priv(netdev); 829 830 /* If Rx checksum is disabled, then LRO should also be disabled */ 831 if (!(features & NETIF_F_RXCSUM)) { 832 netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n"); 833 features &= ~NETIF_F_LRO; 834 } 835 836 return features; 837 } 838 839 static const struct net_device_ops hinic_netdev_ops = { 840 .ndo_open = hinic_open, 841 .ndo_stop = hinic_close, 842 .ndo_change_mtu = hinic_change_mtu, 843 .ndo_set_mac_address = hinic_set_mac_addr, 844 .ndo_validate_addr = eth_validate_addr, 845 .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, 846 .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, 847 .ndo_set_rx_mode = hinic_set_rx_mode, 848 .ndo_start_xmit = hinic_xmit_frame, 849 .ndo_tx_timeout = hinic_tx_timeout, 850 .ndo_get_stats64 = hinic_get_stats64, 851 .ndo_fix_features = hinic_fix_features, 852 .ndo_set_features = hinic_set_features, 853 .ndo_set_vf_mac = hinic_ndo_set_vf_mac, 854 .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan, 855 .ndo_get_vf_config = hinic_ndo_get_vf_config, 856 .ndo_set_vf_trust = hinic_ndo_set_vf_trust, 857 .ndo_set_vf_rate = hinic_ndo_set_vf_bw, 858 .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk, 859 .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state, 860 }; 861 862 static const struct net_device_ops hinicvf_netdev_ops = { 863 .ndo_open = hinic_open, 864 .ndo_stop = hinic_close, 865 .ndo_change_mtu = hinic_change_mtu, 866 .ndo_set_mac_address = hinic_set_mac_addr, 867 .ndo_validate_addr = eth_validate_addr, 868 .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, 869 .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, 870 .ndo_set_rx_mode = hinic_set_rx_mode, 871 .ndo_start_xmit = hinic_xmit_frame, 872 .ndo_tx_timeout = hinic_tx_timeout, 873 .ndo_get_stats64 = hinic_get_stats64, 874 .ndo_fix_features = hinic_fix_features, 875 .ndo_set_features = hinic_set_features, 876 }; 877 878 static void netdev_features_init(struct net_device *netdev) 879 { 880 netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | 881 NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | 882 NETIF_F_RXCSUM | NETIF_F_LRO | 883 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 884 885 netdev->vlan_features = netdev->hw_features; 886 887 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; 888 } 889 890 /** 891 * link_status_event_handler - link event handler 892 * @handle: nic device for the handler 893 * @buf_in: input buffer 894 * @in_size: input size 895 * @buf_in: output buffer 896 * @out_size: returned output size 897 * 898 * Return 0 - Success, negative - Failure 899 **/ 900 static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, 901 void *buf_out, u16 *out_size) 902 { 903 struct hinic_port_link_status *link_status, *ret_link_status; 904 struct hinic_dev *nic_dev = handle; 905 906 link_status = buf_in; 907 908 if (link_status->link == HINIC_LINK_STATE_UP) { 909 down(&nic_dev->mgmt_lock); 910 911 nic_dev->flags |= HINIC_LINK_UP; 912 913 if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == 914 (HINIC_LINK_UP | HINIC_INTF_UP)) { 915 netif_carrier_on(nic_dev->netdev); 916 netif_tx_wake_all_queues(nic_dev->netdev); 917 } 918 919 up(&nic_dev->mgmt_lock); 920 921 netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n"); 922 } else { 923 down(&nic_dev->mgmt_lock); 924 925 nic_dev->flags &= ~HINIC_LINK_UP; 926 927 netif_carrier_off(nic_dev->netdev); 928 netif_tx_disable(nic_dev->netdev); 929 930 up(&nic_dev->mgmt_lock); 931 932 netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n"); 933 } 934 935 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) 936 hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 937 link_status->link); 938 939 ret_link_status = buf_out; 940 ret_link_status->status = 0; 941 942 *out_size = sizeof(*ret_link_status); 943 } 944 945 static int set_features(struct hinic_dev *nic_dev, 946 netdev_features_t pre_features, 947 netdev_features_t features, bool force_change) 948 { 949 netdev_features_t changed = force_change ? ~0 : pre_features ^ features; 950 u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN; 951 int err = 0; 952 953 if (changed & NETIF_F_TSO) 954 err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ? 955 HINIC_TSO_ENABLE : HINIC_TSO_DISABLE); 956 957 if (changed & NETIF_F_RXCSUM) 958 err = hinic_set_rx_csum_offload(nic_dev, csum_en); 959 960 if (changed & NETIF_F_LRO) { 961 err = hinic_set_rx_lro_state(nic_dev, 962 !!(features & NETIF_F_LRO), 963 HINIC_LRO_RX_TIMER_DEFAULT, 964 HINIC_LRO_MAX_WQE_NUM_DEFAULT); 965 } 966 967 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 968 err = hinic_set_rx_vlan_offload(nic_dev, 969 !!(features & 970 NETIF_F_HW_VLAN_CTAG_RX)); 971 972 return err; 973 } 974 975 /** 976 * nic_dev_init - Initialize the NIC device 977 * @pdev: the NIC pci device 978 * 979 * Return 0 - Success, negative - Failure 980 **/ 981 static int nic_dev_init(struct pci_dev *pdev) 982 { 983 struct hinic_rx_mode_work *rx_mode_work; 984 struct hinic_txq_stats *tx_stats; 985 struct hinic_rxq_stats *rx_stats; 986 struct hinic_dev *nic_dev; 987 struct net_device *netdev; 988 struct hinic_hwdev *hwdev; 989 int err, num_qps; 990 991 hwdev = hinic_init_hwdev(pdev); 992 if (IS_ERR(hwdev)) { 993 dev_err(&pdev->dev, "Failed to initialize HW device\n"); 994 return PTR_ERR(hwdev); 995 } 996 997 num_qps = hinic_hwdev_num_qps(hwdev); 998 if (num_qps <= 0) { 999 dev_err(&pdev->dev, "Invalid number of QPS\n"); 1000 err = -EINVAL; 1001 goto err_num_qps; 1002 } 1003 1004 netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps); 1005 if (!netdev) { 1006 dev_err(&pdev->dev, "Failed to allocate Ethernet device\n"); 1007 err = -ENOMEM; 1008 goto err_alloc_etherdev; 1009 } 1010 1011 hinic_set_ethtool_ops(netdev); 1012 1013 if (!HINIC_IS_VF(hwdev->hwif)) 1014 netdev->netdev_ops = &hinic_netdev_ops; 1015 else 1016 netdev->netdev_ops = &hinicvf_netdev_ops; 1017 1018 netdev->max_mtu = ETH_MAX_MTU; 1019 1020 nic_dev = netdev_priv(netdev); 1021 nic_dev->netdev = netdev; 1022 nic_dev->hwdev = hwdev; 1023 nic_dev->msg_enable = MSG_ENABLE_DEFAULT; 1024 nic_dev->flags = 0; 1025 nic_dev->txqs = NULL; 1026 nic_dev->rxqs = NULL; 1027 nic_dev->tx_weight = tx_weight; 1028 nic_dev->rx_weight = rx_weight; 1029 nic_dev->sq_depth = HINIC_SQ_DEPTH; 1030 nic_dev->rq_depth = HINIC_RQ_DEPTH; 1031 nic_dev->sriov_info.hwdev = hwdev; 1032 nic_dev->sriov_info.pdev = pdev; 1033 nic_dev->max_qps = num_qps; 1034 1035 sema_init(&nic_dev->mgmt_lock, 1); 1036 1037 tx_stats = &nic_dev->tx_stats; 1038 rx_stats = &nic_dev->rx_stats; 1039 1040 u64_stats_init(&tx_stats->syncp); 1041 u64_stats_init(&rx_stats->syncp); 1042 1043 nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev, 1044 VLAN_BITMAP_SIZE(nic_dev), 1045 GFP_KERNEL); 1046 if (!nic_dev->vlan_bitmap) { 1047 err = -ENOMEM; 1048 goto err_vlan_bitmap; 1049 } 1050 1051 nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME); 1052 if (!nic_dev->workq) { 1053 err = -ENOMEM; 1054 goto err_workq; 1055 } 1056 1057 pci_set_drvdata(pdev, netdev); 1058 1059 err = hinic_port_get_mac(nic_dev, netdev->dev_addr); 1060 if (err) { 1061 dev_err(&pdev->dev, "Failed to get mac address\n"); 1062 goto err_get_mac; 1063 } 1064 1065 if (!is_valid_ether_addr(netdev->dev_addr)) { 1066 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { 1067 dev_err(&pdev->dev, "Invalid MAC address\n"); 1068 err = -EIO; 1069 goto err_add_mac; 1070 } 1071 1072 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", 1073 netdev->dev_addr); 1074 eth_hw_addr_random(netdev); 1075 } 1076 1077 err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0); 1078 if (err && err != HINIC_PF_SET_VF_ALREADY) { 1079 dev_err(&pdev->dev, "Failed to add mac\n"); 1080 goto err_add_mac; 1081 } 1082 1083 err = hinic_port_set_mtu(nic_dev, netdev->mtu); 1084 if (err) { 1085 dev_err(&pdev->dev, "Failed to set mtu\n"); 1086 goto err_set_mtu; 1087 } 1088 1089 rx_mode_work = &nic_dev->rx_mode_work; 1090 INIT_WORK(&rx_mode_work->work, set_rx_mode); 1091 1092 netdev_features_init(netdev); 1093 1094 netif_carrier_off(netdev); 1095 1096 hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, 1097 nic_dev, link_status_event_handler); 1098 1099 err = set_features(nic_dev, 0, nic_dev->netdev->features, true); 1100 if (err) 1101 goto err_set_features; 1102 1103 SET_NETDEV_DEV(netdev, &pdev->dev); 1104 1105 err = register_netdev(netdev); 1106 if (err) { 1107 dev_err(&pdev->dev, "Failed to register netdev\n"); 1108 goto err_reg_netdev; 1109 } 1110 1111 return 0; 1112 1113 err_reg_netdev: 1114 err_set_features: 1115 hinic_hwdev_cb_unregister(nic_dev->hwdev, 1116 HINIC_MGMT_MSG_CMD_LINK_STATUS); 1117 cancel_work_sync(&rx_mode_work->work); 1118 1119 err_set_mtu: 1120 err_get_mac: 1121 err_add_mac: 1122 pci_set_drvdata(pdev, NULL); 1123 destroy_workqueue(nic_dev->workq); 1124 1125 err_workq: 1126 err_vlan_bitmap: 1127 free_netdev(netdev); 1128 1129 err_alloc_etherdev: 1130 err_num_qps: 1131 hinic_free_hwdev(hwdev); 1132 return err; 1133 } 1134 1135 static int hinic_probe(struct pci_dev *pdev, 1136 const struct pci_device_id *id) 1137 { 1138 int err = pci_enable_device(pdev); 1139 1140 if (err) { 1141 dev_err(&pdev->dev, "Failed to enable PCI device\n"); 1142 return err; 1143 } 1144 1145 err = pci_request_regions(pdev, HINIC_DRV_NAME); 1146 if (err) { 1147 dev_err(&pdev->dev, "Failed to request PCI regions\n"); 1148 goto err_pci_regions; 1149 } 1150 1151 pci_set_master(pdev); 1152 1153 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1154 if (err) { 1155 dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); 1156 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1157 if (err) { 1158 dev_err(&pdev->dev, "Failed to set DMA mask\n"); 1159 goto err_dma_mask; 1160 } 1161 } 1162 1163 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1164 if (err) { 1165 dev_warn(&pdev->dev, 1166 "Couldn't set 64-bit consistent DMA mask\n"); 1167 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1168 if (err) { 1169 dev_err(&pdev->dev, 1170 "Failed to set consistent DMA mask\n"); 1171 goto err_dma_consistent_mask; 1172 } 1173 } 1174 1175 err = nic_dev_init(pdev); 1176 if (err) { 1177 dev_err(&pdev->dev, "Failed to initialize NIC device\n"); 1178 goto err_nic_dev_init; 1179 } 1180 1181 dev_info(&pdev->dev, "HiNIC driver - probed\n"); 1182 return 0; 1183 1184 err_nic_dev_init: 1185 err_dma_consistent_mask: 1186 err_dma_mask: 1187 pci_release_regions(pdev); 1188 1189 err_pci_regions: 1190 pci_disable_device(pdev); 1191 return err; 1192 } 1193 1194 #define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000 1195 1196 static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev) 1197 { 1198 struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info; 1199 u32 loop_cnt = 0; 1200 1201 set_bit(HINIC_FUNC_REMOVE, &sriov_info->state); 1202 usleep_range(9900, 10000); 1203 1204 while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) { 1205 if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) && 1206 !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state)) 1207 return; 1208 1209 usleep_range(9900, 10000); 1210 loop_cnt++; 1211 } 1212 } 1213 1214 static void hinic_remove(struct pci_dev *pdev) 1215 { 1216 struct net_device *netdev = pci_get_drvdata(pdev); 1217 struct hinic_dev *nic_dev = netdev_priv(netdev); 1218 struct hinic_rx_mode_work *rx_mode_work; 1219 1220 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { 1221 wait_sriov_cfg_complete(nic_dev); 1222 hinic_pci_sriov_disable(pdev); 1223 } 1224 1225 unregister_netdev(netdev); 1226 1227 hinic_port_del_mac(nic_dev, netdev->dev_addr, 0); 1228 1229 hinic_hwdev_cb_unregister(nic_dev->hwdev, 1230 HINIC_MGMT_MSG_CMD_LINK_STATUS); 1231 1232 rx_mode_work = &nic_dev->rx_mode_work; 1233 cancel_work_sync(&rx_mode_work->work); 1234 1235 pci_set_drvdata(pdev, NULL); 1236 1237 destroy_workqueue(nic_dev->workq); 1238 1239 hinic_vf_func_free(nic_dev->hwdev); 1240 1241 hinic_free_hwdev(nic_dev->hwdev); 1242 1243 free_netdev(netdev); 1244 1245 pci_release_regions(pdev); 1246 pci_disable_device(pdev); 1247 1248 dev_info(&pdev->dev, "HiNIC driver - removed\n"); 1249 } 1250 1251 static void hinic_shutdown(struct pci_dev *pdev) 1252 { 1253 pci_disable_device(pdev); 1254 } 1255 1256 static const struct pci_device_id hinic_pci_table[] = { 1257 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0}, 1258 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0}, 1259 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0}, 1260 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0}, 1261 { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_VF), 0}, 1262 { 0, 0} 1263 }; 1264 MODULE_DEVICE_TABLE(pci, hinic_pci_table); 1265 1266 static struct pci_driver hinic_driver = { 1267 .name = HINIC_DRV_NAME, 1268 .id_table = hinic_pci_table, 1269 .probe = hinic_probe, 1270 .remove = hinic_remove, 1271 .shutdown = hinic_shutdown, 1272 .sriov_configure = hinic_pci_sriov_configure, 1273 }; 1274 1275 module_pci_driver(hinic_driver); 1276