1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/interrupt.h> 11 #include <linux/pci.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/ethtool.h> 15 #include <linux/log2.h> 16 #include <linux/prefetch.h> 17 #include <linux/irq.h> 18 19 #include "nic_reg.h" 20 #include "nic.h" 21 #include "nicvf_queues.h" 22 #include "thunder_bgx.h" 23 24 #define DRV_NAME "thunder-nicvf" 25 #define DRV_VERSION "1.0" 26 27 /* Supported devices */ 28 static const struct pci_device_id nicvf_id_table[] = { 29 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, 30 PCI_DEVICE_ID_THUNDER_NIC_VF, 31 PCI_VENDOR_ID_CAVIUM, 0xA11E) }, 32 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, 33 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, 34 PCI_VENDOR_ID_CAVIUM, 0xA11E) }, 35 { 0, } /* end of table */ 36 }; 37 38 MODULE_AUTHOR("Sunil Goutham"); 39 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver"); 40 MODULE_LICENSE("GPL v2"); 41 MODULE_VERSION(DRV_VERSION); 42 MODULE_DEVICE_TABLE(pci, nicvf_id_table); 43 44 static int debug = 0x00; 45 module_param(debug, int, 0644); 46 MODULE_PARM_DESC(debug, "Debug message level bitmap"); 47 48 static int cpi_alg = CPI_ALG_NONE; 49 module_param(cpi_alg, int, S_IRUGO); 50 MODULE_PARM_DESC(cpi_alg, 51 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); 52 53 static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, 54 struct sk_buff *skb) 55 { 56 if (skb->len <= 64) 57 nic->drv_stats.rx_frames_64++; 58 else if (skb->len <= 127) 59 nic->drv_stats.rx_frames_127++; 60 else if (skb->len <= 255) 61 nic->drv_stats.rx_frames_255++; 62 else if (skb->len <= 511) 63 nic->drv_stats.rx_frames_511++; 64 else if (skb->len <= 1023) 65 nic->drv_stats.rx_frames_1023++; 66 else if (skb->len <= 1518) 67 nic->drv_stats.rx_frames_1518++; 68 else 69 nic->drv_stats.rx_frames_jumbo++; 70 } 71 72 /* The Cavium ThunderX network controller can *only* be found in SoCs 73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 74 * registers on this platform are implicitly strongly ordered with respect 75 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use 76 * with no memory barriers in this driver. The readq()/writeq() functions add 77 * explicit ordering operation which in this case are redundant, and only 78 * add overhead. 79 */ 80 81 /* Register read/write APIs */ 82 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val) 83 { 84 writeq_relaxed(val, nic->reg_base + offset); 85 } 86 87 u64 nicvf_reg_read(struct nicvf *nic, u64 offset) 88 { 89 return readq_relaxed(nic->reg_base + offset); 90 } 91 92 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, 93 u64 qidx, u64 val) 94 { 95 void __iomem *addr = nic->reg_base + offset; 96 97 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT)); 98 } 99 100 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) 101 { 102 void __iomem *addr = nic->reg_base + offset; 103 104 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT)); 105 } 106 107 /* VF -> PF mailbox communication */ 108 109 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) 110 { 111 u64 *msg = (u64 *)mbx; 112 113 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]); 114 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]); 115 } 116 117 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) 118 { 119 int timeout = NIC_MBOX_MSG_TIMEOUT; 120 int sleep = 10; 121 122 nic->pf_acked = false; 123 nic->pf_nacked = false; 124 125 nicvf_write_to_mbx(nic, mbx); 126 127 /* Wait for previous message to be acked, timeout 2sec */ 128 while (!nic->pf_acked) { 129 if (nic->pf_nacked) 130 return -EINVAL; 131 msleep(sleep); 132 if (nic->pf_acked) 133 break; 134 timeout -= sleep; 135 if (!timeout) { 136 netdev_err(nic->netdev, 137 "PF didn't ack to mbox msg %d from VF%d\n", 138 (mbx->msg.msg & 0xFF), nic->vf_id); 139 return -EBUSY; 140 } 141 } 142 return 0; 143 } 144 145 /* Checks if VF is able to comminicate with PF 146 * and also gets the VNIC number this VF is associated to. 147 */ 148 static int nicvf_check_pf_ready(struct nicvf *nic) 149 { 150 int timeout = 5000, sleep = 20; 151 union nic_mbx mbx = {}; 152 153 mbx.msg.msg = NIC_MBOX_MSG_READY; 154 155 nic->pf_ready_to_rcv_msg = false; 156 157 nicvf_write_to_mbx(nic, &mbx); 158 159 while (!nic->pf_ready_to_rcv_msg) { 160 msleep(sleep); 161 if (nic->pf_ready_to_rcv_msg) 162 break; 163 timeout -= sleep; 164 if (!timeout) { 165 netdev_err(nic->netdev, 166 "PF didn't respond to READY msg\n"); 167 return 0; 168 } 169 } 170 return 1; 171 } 172 173 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) 174 { 175 if (bgx->rx) 176 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats; 177 else 178 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats; 179 } 180 181 static void nicvf_handle_mbx_intr(struct nicvf *nic) 182 { 183 union nic_mbx mbx = {}; 184 u64 *mbx_data; 185 u64 mbx_addr; 186 int i; 187 188 mbx_addr = NIC_VF_PF_MAILBOX_0_1; 189 mbx_data = (u64 *)&mbx; 190 191 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { 192 *mbx_data = nicvf_reg_read(nic, mbx_addr); 193 mbx_data++; 194 mbx_addr += sizeof(u64); 195 } 196 197 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg); 198 switch (mbx.msg.msg) { 199 case NIC_MBOX_MSG_READY: 200 nic->pf_ready_to_rcv_msg = true; 201 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; 202 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; 203 nic->node = mbx.nic_cfg.node_id; 204 if (!nic->set_mac_pending) 205 ether_addr_copy(nic->netdev->dev_addr, 206 mbx.nic_cfg.mac_addr); 207 nic->link_up = false; 208 nic->duplex = 0; 209 nic->speed = 0; 210 break; 211 case NIC_MBOX_MSG_ACK: 212 nic->pf_acked = true; 213 break; 214 case NIC_MBOX_MSG_NACK: 215 nic->pf_nacked = true; 216 break; 217 case NIC_MBOX_MSG_RSS_SIZE: 218 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size; 219 nic->pf_acked = true; 220 break; 221 case NIC_MBOX_MSG_BGX_STATS: 222 nicvf_read_bgx_stats(nic, &mbx.bgx_stats); 223 nic->pf_acked = true; 224 nic->bgx_stats_acked = true; 225 break; 226 case NIC_MBOX_MSG_BGX_LINK_CHANGE: 227 nic->pf_acked = true; 228 nic->link_up = mbx.link_status.link_up; 229 nic->duplex = mbx.link_status.duplex; 230 nic->speed = mbx.link_status.speed; 231 if (nic->link_up) { 232 netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n", 233 nic->netdev->name, nic->speed, 234 nic->duplex == DUPLEX_FULL ? 235 "Full duplex" : "Half duplex"); 236 netif_carrier_on(nic->netdev); 237 netif_tx_start_all_queues(nic->netdev); 238 } else { 239 netdev_info(nic->netdev, "%s: Link is Down\n", 240 nic->netdev->name); 241 netif_carrier_off(nic->netdev); 242 netif_tx_stop_all_queues(nic->netdev); 243 } 244 break; 245 default: 246 netdev_err(nic->netdev, 247 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg); 248 break; 249 } 250 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0); 251 } 252 253 static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev) 254 { 255 union nic_mbx mbx = {}; 256 257 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC; 258 mbx.mac.vf_id = nic->vf_id; 259 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr); 260 261 return nicvf_send_msg_to_pf(nic, &mbx); 262 } 263 264 static void nicvf_config_cpi(struct nicvf *nic) 265 { 266 union nic_mbx mbx = {}; 267 268 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG; 269 mbx.cpi_cfg.vf_id = nic->vf_id; 270 mbx.cpi_cfg.cpi_alg = nic->cpi_alg; 271 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; 272 273 nicvf_send_msg_to_pf(nic, &mbx); 274 } 275 276 static void nicvf_get_rss_size(struct nicvf *nic) 277 { 278 union nic_mbx mbx = {}; 279 280 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; 281 mbx.rss_size.vf_id = nic->vf_id; 282 nicvf_send_msg_to_pf(nic, &mbx); 283 } 284 285 void nicvf_config_rss(struct nicvf *nic) 286 { 287 union nic_mbx mbx = {}; 288 struct nicvf_rss_info *rss = &nic->rss_info; 289 int ind_tbl_len = rss->rss_size; 290 int i, nextq = 0; 291 292 mbx.rss_cfg.vf_id = nic->vf_id; 293 mbx.rss_cfg.hash_bits = rss->hash_bits; 294 while (ind_tbl_len) { 295 mbx.rss_cfg.tbl_offset = nextq; 296 mbx.rss_cfg.tbl_len = min(ind_tbl_len, 297 RSS_IND_TBL_LEN_PER_MBX_MSG); 298 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ? 299 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG; 300 301 for (i = 0; i < mbx.rss_cfg.tbl_len; i++) 302 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++]; 303 304 nicvf_send_msg_to_pf(nic, &mbx); 305 306 ind_tbl_len -= mbx.rss_cfg.tbl_len; 307 } 308 } 309 310 void nicvf_set_rss_key(struct nicvf *nic) 311 { 312 struct nicvf_rss_info *rss = &nic->rss_info; 313 u64 key_addr = NIC_VNIC_RSS_KEY_0_4; 314 int idx; 315 316 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) { 317 nicvf_reg_write(nic, key_addr, rss->key[idx]); 318 key_addr += sizeof(u64); 319 } 320 } 321 322 static int nicvf_rss_init(struct nicvf *nic) 323 { 324 struct nicvf_rss_info *rss = &nic->rss_info; 325 int idx; 326 327 nicvf_get_rss_size(nic); 328 329 if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) { 330 rss->enable = false; 331 rss->hash_bits = 0; 332 return 0; 333 } 334 335 rss->enable = true; 336 337 /* Using the HW reset value for now */ 338 rss->key[0] = 0xFEED0BADFEED0BADULL; 339 rss->key[1] = 0xFEED0BADFEED0BADULL; 340 rss->key[2] = 0xFEED0BADFEED0BADULL; 341 rss->key[3] = 0xFEED0BADFEED0BADULL; 342 rss->key[4] = 0xFEED0BADFEED0BADULL; 343 344 nicvf_set_rss_key(nic); 345 346 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA; 347 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg); 348 349 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size)); 350 351 for (idx = 0; idx < rss->rss_size; idx++) 352 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx, 353 nic->qs->rq_cnt); 354 nicvf_config_rss(nic); 355 return 1; 356 } 357 358 int nicvf_set_real_num_queues(struct net_device *netdev, 359 int tx_queues, int rx_queues) 360 { 361 int err = 0; 362 363 err = netif_set_real_num_tx_queues(netdev, tx_queues); 364 if (err) { 365 netdev_err(netdev, 366 "Failed to set no of Tx queues: %d\n", tx_queues); 367 return err; 368 } 369 370 err = netif_set_real_num_rx_queues(netdev, rx_queues); 371 if (err) 372 netdev_err(netdev, 373 "Failed to set no of Rx queues: %d\n", rx_queues); 374 return err; 375 } 376 377 static int nicvf_init_resources(struct nicvf *nic) 378 { 379 int err; 380 union nic_mbx mbx = {}; 381 382 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 383 384 /* Enable Qset */ 385 nicvf_qset_config(nic, true); 386 387 /* Initialize queues and HW for data transfer */ 388 err = nicvf_config_data_transfer(nic, true); 389 if (err) { 390 netdev_err(nic->netdev, 391 "Failed to alloc/config VF's QSet resources\n"); 392 return err; 393 } 394 395 /* Send VF config done msg to PF */ 396 nicvf_write_to_mbx(nic, &mbx); 397 398 return 0; 399 } 400 401 static void nicvf_snd_pkt_handler(struct net_device *netdev, 402 struct cmp_queue *cq, 403 struct cqe_send_t *cqe_tx, int cqe_type) 404 { 405 struct sk_buff *skb = NULL; 406 struct nicvf *nic = netdev_priv(netdev); 407 struct snd_queue *sq; 408 struct sq_hdr_subdesc *hdr; 409 410 sq = &nic->qs->sq[cqe_tx->sq_idx]; 411 412 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); 413 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) 414 return; 415 416 netdev_dbg(nic->netdev, 417 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", 418 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 419 cqe_tx->sqe_ptr, hdr->subdesc_cnt); 420 421 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 422 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 423 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; 424 /* For TSO offloaded packets only one head SKB needs to be freed */ 425 if (skb) { 426 prefetch(skb); 427 dev_consume_skb_any(skb); 428 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; 429 } 430 } 431 432 static void nicvf_rcv_pkt_handler(struct net_device *netdev, 433 struct napi_struct *napi, 434 struct cmp_queue *cq, 435 struct cqe_rx_t *cqe_rx, int cqe_type) 436 { 437 struct sk_buff *skb; 438 struct nicvf *nic = netdev_priv(netdev); 439 int err = 0; 440 441 /* Check for errors */ 442 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 443 if (err && !cqe_rx->rb_cnt) 444 return; 445 446 skb = nicvf_get_rcv_skb(nic, cqe_rx); 447 if (!skb) { 448 netdev_dbg(nic->netdev, "Packet not received\n"); 449 return; 450 } 451 452 if (netif_msg_pktdata(nic)) { 453 netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name, 454 skb, skb->len); 455 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, 456 skb->data, skb->len, true); 457 } 458 459 nicvf_set_rx_frame_cnt(nic, skb); 460 461 skb_record_rx_queue(skb, cqe_rx->rq_idx); 462 if (netdev->hw_features & NETIF_F_RXCSUM) { 463 /* HW by default verifies TCP/UDP/SCTP checksums */ 464 skb->ip_summed = CHECKSUM_UNNECESSARY; 465 } else { 466 skb_checksum_none_assert(skb); 467 } 468 469 skb->protocol = eth_type_trans(skb, netdev); 470 471 if (napi && (netdev->features & NETIF_F_GRO)) 472 napi_gro_receive(napi, skb); 473 else 474 netif_receive_skb(skb); 475 } 476 477 static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, 478 struct napi_struct *napi, int budget) 479 { 480 int processed_cqe, work_done = 0, tx_done = 0; 481 int cqe_count, cqe_head; 482 struct nicvf *nic = netdev_priv(netdev); 483 struct queue_set *qs = nic->qs; 484 struct cmp_queue *cq = &qs->cq[cq_idx]; 485 struct cqe_rx_t *cq_desc; 486 struct netdev_queue *txq; 487 488 spin_lock_bh(&cq->lock); 489 loop: 490 processed_cqe = 0; 491 /* Get no of valid CQ entries to process */ 492 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); 493 cqe_count &= CQ_CQE_COUNT; 494 if (!cqe_count) 495 goto done; 496 497 /* Get head of the valid CQ entries */ 498 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 499 cqe_head &= 0xFFFF; 500 501 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n", 502 __func__, cq_idx, cqe_count, cqe_head); 503 while (processed_cqe < cqe_count) { 504 /* Get the CQ descriptor */ 505 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 506 cqe_head++; 507 cqe_head &= (cq->dmem.q_len - 1); 508 /* Initiate prefetch for next descriptor */ 509 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); 510 511 if ((work_done >= budget) && napi && 512 (cq_desc->cqe_type != CQE_TYPE_SEND)) { 513 break; 514 } 515 516 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n", 517 cq_idx, cq_desc->cqe_type); 518 switch (cq_desc->cqe_type) { 519 case CQE_TYPE_RX: 520 nicvf_rcv_pkt_handler(netdev, napi, cq, 521 cq_desc, CQE_TYPE_RX); 522 work_done++; 523 break; 524 case CQE_TYPE_SEND: 525 nicvf_snd_pkt_handler(netdev, cq, 526 (void *)cq_desc, CQE_TYPE_SEND); 527 tx_done++; 528 break; 529 case CQE_TYPE_INVALID: 530 case CQE_TYPE_RX_SPLIT: 531 case CQE_TYPE_RX_TCP: 532 case CQE_TYPE_SEND_PTP: 533 /* Ignore for now */ 534 break; 535 } 536 processed_cqe++; 537 } 538 netdev_dbg(nic->netdev, 539 "%s CQ%d processed_cqe %d work_done %d budget %d\n", 540 __func__, cq_idx, processed_cqe, work_done, budget); 541 542 /* Ring doorbell to inform H/W to reuse processed CQEs */ 543 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, 544 cq_idx, processed_cqe); 545 546 if ((work_done < budget) && napi) 547 goto loop; 548 549 done: 550 /* Wakeup TXQ if its stopped earlier due to SQ full */ 551 if (tx_done) { 552 txq = netdev_get_tx_queue(netdev, cq_idx); 553 if (netif_tx_queue_stopped(txq)) { 554 netif_tx_start_queue(txq); 555 nic->drv_stats.txq_wake++; 556 if (netif_msg_tx_err(nic)) 557 netdev_warn(netdev, 558 "%s: Transmit queue wakeup SQ%d\n", 559 netdev->name, cq_idx); 560 } 561 } 562 563 spin_unlock_bh(&cq->lock); 564 return work_done; 565 } 566 567 static int nicvf_poll(struct napi_struct *napi, int budget) 568 { 569 u64 cq_head; 570 int work_done = 0; 571 struct net_device *netdev = napi->dev; 572 struct nicvf *nic = netdev_priv(netdev); 573 struct nicvf_cq_poll *cq; 574 575 cq = container_of(napi, struct nicvf_cq_poll, napi); 576 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); 577 578 if (work_done < budget) { 579 /* Slow packet rate, exit polling */ 580 napi_complete(napi); 581 /* Re-enable interrupts */ 582 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, 583 cq->cq_idx); 584 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx); 585 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD, 586 cq->cq_idx, cq_head); 587 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx); 588 } 589 return work_done; 590 } 591 592 /* Qset error interrupt handler 593 * 594 * As of now only CQ errors are handled 595 */ 596 static void nicvf_handle_qs_err(unsigned long data) 597 { 598 struct nicvf *nic = (struct nicvf *)data; 599 struct queue_set *qs = nic->qs; 600 int qidx; 601 u64 status; 602 603 netif_tx_disable(nic->netdev); 604 605 /* Check if it is CQ err */ 606 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 607 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, 608 qidx); 609 if (!(status & CQ_ERR_MASK)) 610 continue; 611 /* Process already queued CQEs and reconfig CQ */ 612 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 613 nicvf_sq_disable(nic, qidx); 614 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0); 615 nicvf_cmp_queue_config(nic, qs, qidx, true); 616 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx); 617 nicvf_sq_enable(nic, &qs->sq[qidx], qidx); 618 619 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); 620 } 621 622 netif_tx_start_all_queues(nic->netdev); 623 /* Re-enable Qset error interrupt */ 624 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); 625 } 626 627 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq) 628 { 629 struct nicvf *nic = (struct nicvf *)nicvf_irq; 630 u64 intr; 631 632 intr = nicvf_reg_read(nic, NIC_VF_INT); 633 /* Check for spurious interrupt */ 634 if (!(intr & NICVF_INTR_MBOX_MASK)) 635 return IRQ_HANDLED; 636 637 nicvf_handle_mbx_intr(nic); 638 639 return IRQ_HANDLED; 640 } 641 642 static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq) 643 { 644 u64 qidx, intr, clear_intr = 0; 645 u64 cq_intr, rbdr_intr, qs_err_intr; 646 struct nicvf *nic = (struct nicvf *)nicvf_irq; 647 struct queue_set *qs = nic->qs; 648 struct nicvf_cq_poll *cq_poll = NULL; 649 650 intr = nicvf_reg_read(nic, NIC_VF_INT); 651 if (netif_msg_intr(nic)) 652 netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n", 653 nic->netdev->name, intr); 654 655 qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK; 656 if (qs_err_intr) { 657 /* Disable Qset err interrupt and schedule softirq */ 658 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); 659 tasklet_hi_schedule(&nic->qs_err_task); 660 clear_intr |= qs_err_intr; 661 } 662 663 /* Disable interrupts and start polling */ 664 cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT; 665 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 666 if (!(cq_intr & (1 << qidx))) 667 continue; 668 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx)) 669 continue; 670 671 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 672 clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT); 673 674 cq_poll = nic->napi[qidx]; 675 /* Schedule NAPI */ 676 if (cq_poll) 677 napi_schedule(&cq_poll->napi); 678 } 679 680 /* Handle RBDR interrupts */ 681 rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT; 682 if (rbdr_intr) { 683 /* Disable RBDR interrupt and schedule softirq */ 684 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 685 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx)) 686 continue; 687 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); 688 tasklet_hi_schedule(&nic->rbdr_task); 689 clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT); 690 } 691 } 692 693 /* Clear interrupts */ 694 nicvf_reg_write(nic, NIC_VF_INT, clear_intr); 695 return IRQ_HANDLED; 696 } 697 698 static int nicvf_enable_msix(struct nicvf *nic) 699 { 700 int ret, vec; 701 702 nic->num_vec = NIC_VF_MSIX_VECTORS; 703 704 for (vec = 0; vec < nic->num_vec; vec++) 705 nic->msix_entries[vec].entry = vec; 706 707 ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); 708 if (ret) { 709 netdev_err(nic->netdev, 710 "Req for #%d msix vectors failed\n", nic->num_vec); 711 return 0; 712 } 713 nic->msix_enabled = 1; 714 return 1; 715 } 716 717 static void nicvf_disable_msix(struct nicvf *nic) 718 { 719 if (nic->msix_enabled) { 720 pci_disable_msix(nic->pdev); 721 nic->msix_enabled = 0; 722 nic->num_vec = 0; 723 } 724 } 725 726 static int nicvf_register_interrupts(struct nicvf *nic) 727 { 728 int irq, free, ret = 0; 729 int vector; 730 731 for_each_cq_irq(irq) 732 sprintf(nic->irq_name[irq], "NICVF%d CQ%d", 733 nic->vf_id, irq); 734 735 for_each_sq_irq(irq) 736 sprintf(nic->irq_name[irq], "NICVF%d SQ%d", 737 nic->vf_id, irq - NICVF_INTR_ID_SQ); 738 739 for_each_rbdr_irq(irq) 740 sprintf(nic->irq_name[irq], "NICVF%d RBDR%d", 741 nic->vf_id, irq - NICVF_INTR_ID_RBDR); 742 743 /* Register all interrupts except mailbox */ 744 for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) { 745 vector = nic->msix_entries[irq].vector; 746 ret = request_irq(vector, nicvf_intr_handler, 747 0, nic->irq_name[irq], nic); 748 if (ret) 749 break; 750 nic->irq_allocated[irq] = true; 751 } 752 753 for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) { 754 vector = nic->msix_entries[irq].vector; 755 ret = request_irq(vector, nicvf_intr_handler, 756 0, nic->irq_name[irq], nic); 757 if (ret) 758 break; 759 nic->irq_allocated[irq] = true; 760 } 761 762 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], 763 "NICVF%d Qset error", nic->vf_id); 764 if (!ret) { 765 vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector; 766 irq = NICVF_INTR_ID_QS_ERR; 767 ret = request_irq(vector, nicvf_intr_handler, 768 0, nic->irq_name[irq], nic); 769 if (!ret) 770 nic->irq_allocated[irq] = true; 771 } 772 773 if (ret) { 774 netdev_err(nic->netdev, "Request irq failed\n"); 775 for (free = 0; free < irq; free++) 776 free_irq(nic->msix_entries[free].vector, nic); 777 return ret; 778 } 779 780 return 0; 781 } 782 783 static void nicvf_unregister_interrupts(struct nicvf *nic) 784 { 785 int irq; 786 787 /* Free registered interrupts */ 788 for (irq = 0; irq < nic->num_vec; irq++) { 789 if (nic->irq_allocated[irq]) 790 free_irq(nic->msix_entries[irq].vector, nic); 791 nic->irq_allocated[irq] = false; 792 } 793 794 /* Disable MSI-X */ 795 nicvf_disable_msix(nic); 796 } 797 798 /* Initialize MSIX vectors and register MISC interrupt. 799 * Send READY message to PF to check if its alive 800 */ 801 static int nicvf_register_misc_interrupt(struct nicvf *nic) 802 { 803 int ret = 0; 804 int irq = NICVF_INTR_ID_MISC; 805 806 /* Return if mailbox interrupt is already registered */ 807 if (nic->msix_enabled) 808 return 0; 809 810 /* Enable MSI-X */ 811 if (!nicvf_enable_msix(nic)) 812 return 1; 813 814 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF"); 815 /* Register Misc interrupt */ 816 ret = request_irq(nic->msix_entries[irq].vector, 817 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic); 818 819 if (ret) 820 return ret; 821 nic->irq_allocated[irq] = true; 822 823 /* Enable mailbox interrupt */ 824 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0); 825 826 /* Check if VF is able to communicate with PF */ 827 if (!nicvf_check_pf_ready(nic)) { 828 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); 829 nicvf_unregister_interrupts(nic); 830 return 1; 831 } 832 833 return 0; 834 } 835 836 static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) 837 { 838 struct nicvf *nic = netdev_priv(netdev); 839 int qid = skb_get_queue_mapping(skb); 840 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid); 841 842 /* Check for minimum packet length */ 843 if (skb->len <= ETH_HLEN) { 844 dev_kfree_skb(skb); 845 return NETDEV_TX_OK; 846 } 847 848 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { 849 netif_tx_stop_queue(txq); 850 nic->drv_stats.txq_stop++; 851 if (netif_msg_tx_err(nic)) 852 netdev_warn(netdev, 853 "%s: Transmit ring full, stopping SQ%d\n", 854 netdev->name, qid); 855 856 return NETDEV_TX_BUSY; 857 } 858 859 return NETDEV_TX_OK; 860 } 861 862 int nicvf_stop(struct net_device *netdev) 863 { 864 int irq, qidx; 865 struct nicvf *nic = netdev_priv(netdev); 866 struct queue_set *qs = nic->qs; 867 struct nicvf_cq_poll *cq_poll = NULL; 868 union nic_mbx mbx = {}; 869 870 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 871 nicvf_send_msg_to_pf(nic, &mbx); 872 873 netif_carrier_off(netdev); 874 875 /* Disable RBDR & QS error interrupts */ 876 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 877 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); 878 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx); 879 } 880 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); 881 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0); 882 883 /* Wait for pending IRQ handlers to finish */ 884 for (irq = 0; irq < nic->num_vec; irq++) 885 synchronize_irq(nic->msix_entries[irq].vector); 886 887 tasklet_kill(&nic->rbdr_task); 888 tasklet_kill(&nic->qs_err_task); 889 if (nic->rb_work_scheduled) 890 cancel_delayed_work_sync(&nic->rbdr_work); 891 892 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) { 893 cq_poll = nic->napi[qidx]; 894 if (!cq_poll) 895 continue; 896 nic->napi[qidx] = NULL; 897 napi_synchronize(&cq_poll->napi); 898 /* CQ intr is enabled while napi_complete, 899 * so disable it now 900 */ 901 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 902 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); 903 napi_disable(&cq_poll->napi); 904 netif_napi_del(&cq_poll->napi); 905 kfree(cq_poll); 906 } 907 908 netif_tx_disable(netdev); 909 910 /* Free resources */ 911 nicvf_config_data_transfer(nic, false); 912 913 /* Disable HW Qset */ 914 nicvf_qset_config(nic, false); 915 916 /* disable mailbox interrupt */ 917 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); 918 919 nicvf_unregister_interrupts(nic); 920 921 return 0; 922 } 923 924 int nicvf_open(struct net_device *netdev) 925 { 926 int err, qidx; 927 struct nicvf *nic = netdev_priv(netdev); 928 struct queue_set *qs = nic->qs; 929 struct nicvf_cq_poll *cq_poll = NULL; 930 931 nic->mtu = netdev->mtu; 932 933 netif_carrier_off(netdev); 934 935 err = nicvf_register_misc_interrupt(nic); 936 if (err) 937 return err; 938 939 /* Register NAPI handler for processing CQEs */ 940 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 941 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL); 942 if (!cq_poll) { 943 err = -ENOMEM; 944 goto napi_del; 945 } 946 cq_poll->cq_idx = qidx; 947 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll, 948 NAPI_POLL_WEIGHT); 949 napi_enable(&cq_poll->napi); 950 nic->napi[qidx] = cq_poll; 951 } 952 953 /* Check if we got MAC address from PF or else generate a radom MAC */ 954 if (is_zero_ether_addr(netdev->dev_addr)) { 955 eth_hw_addr_random(netdev); 956 nicvf_hw_set_mac_addr(nic, netdev); 957 } 958 959 if (nic->set_mac_pending) { 960 nic->set_mac_pending = false; 961 nicvf_hw_set_mac_addr(nic, netdev); 962 } 963 964 /* Init tasklet for handling Qset err interrupt */ 965 tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err, 966 (unsigned long)nic); 967 968 /* Init RBDR tasklet which will refill RBDR */ 969 tasklet_init(&nic->rbdr_task, nicvf_rbdr_task, 970 (unsigned long)nic); 971 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work); 972 973 /* Configure CPI alorithm */ 974 nic->cpi_alg = cpi_alg; 975 nicvf_config_cpi(nic); 976 977 /* Configure receive side scaling */ 978 nicvf_rss_init(nic); 979 980 err = nicvf_register_interrupts(nic); 981 if (err) 982 goto cleanup; 983 984 /* Initialize the queues */ 985 err = nicvf_init_resources(nic); 986 if (err) 987 goto cleanup; 988 989 /* Make sure queue initialization is written */ 990 wmb(); 991 992 nicvf_reg_write(nic, NIC_VF_INT, -1); 993 /* Enable Qset err interrupt */ 994 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); 995 996 /* Enable completion queue interrupt */ 997 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 998 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); 999 1000 /* Enable RBDR threshold interrupt */ 1001 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1002 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1003 1004 nic->drv_stats.txq_stop = 0; 1005 nic->drv_stats.txq_wake = 0; 1006 1007 netif_carrier_on(netdev); 1008 netif_tx_start_all_queues(netdev); 1009 1010 return 0; 1011 cleanup: 1012 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); 1013 nicvf_unregister_interrupts(nic); 1014 napi_del: 1015 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 1016 cq_poll = nic->napi[qidx]; 1017 if (!cq_poll) 1018 continue; 1019 napi_disable(&cq_poll->napi); 1020 netif_napi_del(&cq_poll->napi); 1021 kfree(cq_poll); 1022 nic->napi[qidx] = NULL; 1023 } 1024 return err; 1025 } 1026 1027 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) 1028 { 1029 union nic_mbx mbx = {}; 1030 1031 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; 1032 mbx.frs.max_frs = mtu; 1033 mbx.frs.vf_id = nic->vf_id; 1034 1035 return nicvf_send_msg_to_pf(nic, &mbx); 1036 } 1037 1038 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) 1039 { 1040 struct nicvf *nic = netdev_priv(netdev); 1041 1042 if (new_mtu > NIC_HW_MAX_FRS) 1043 return -EINVAL; 1044 1045 if (new_mtu < NIC_HW_MIN_FRS) 1046 return -EINVAL; 1047 1048 if (nicvf_update_hw_max_frs(nic, new_mtu)) 1049 return -EINVAL; 1050 netdev->mtu = new_mtu; 1051 nic->mtu = new_mtu; 1052 1053 return 0; 1054 } 1055 1056 static int nicvf_set_mac_address(struct net_device *netdev, void *p) 1057 { 1058 struct sockaddr *addr = p; 1059 struct nicvf *nic = netdev_priv(netdev); 1060 1061 if (!is_valid_ether_addr(addr->sa_data)) 1062 return -EADDRNOTAVAIL; 1063 1064 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1065 1066 if (nic->msix_enabled) { 1067 if (nicvf_hw_set_mac_addr(nic, netdev)) 1068 return -EBUSY; 1069 } else { 1070 nic->set_mac_pending = true; 1071 } 1072 1073 return 0; 1074 } 1075 1076 void nicvf_update_lmac_stats(struct nicvf *nic) 1077 { 1078 int stat = 0; 1079 union nic_mbx mbx = {}; 1080 int timeout; 1081 1082 if (!netif_running(nic->netdev)) 1083 return; 1084 1085 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; 1086 mbx.bgx_stats.vf_id = nic->vf_id; 1087 /* Rx stats */ 1088 mbx.bgx_stats.rx = 1; 1089 while (stat < BGX_RX_STATS_COUNT) { 1090 nic->bgx_stats_acked = 0; 1091 mbx.bgx_stats.idx = stat; 1092 nicvf_send_msg_to_pf(nic, &mbx); 1093 timeout = 0; 1094 while ((!nic->bgx_stats_acked) && (timeout < 10)) { 1095 msleep(2); 1096 timeout++; 1097 } 1098 stat++; 1099 } 1100 1101 stat = 0; 1102 1103 /* Tx stats */ 1104 mbx.bgx_stats.rx = 0; 1105 while (stat < BGX_TX_STATS_COUNT) { 1106 nic->bgx_stats_acked = 0; 1107 mbx.bgx_stats.idx = stat; 1108 nicvf_send_msg_to_pf(nic, &mbx); 1109 timeout = 0; 1110 while ((!nic->bgx_stats_acked) && (timeout < 10)) { 1111 msleep(2); 1112 timeout++; 1113 } 1114 stat++; 1115 } 1116 } 1117 1118 void nicvf_update_stats(struct nicvf *nic) 1119 { 1120 int qidx; 1121 struct nicvf_hw_stats *stats = &nic->stats; 1122 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1123 struct queue_set *qs = nic->qs; 1124 1125 #define GET_RX_STATS(reg) \ 1126 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3)) 1127 #define GET_TX_STATS(reg) \ 1128 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3)) 1129 1130 stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS); 1131 stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST); 1132 stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST); 1133 stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST); 1134 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS); 1135 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR); 1136 stats->rx_drop_red = GET_RX_STATS(RX_RED); 1137 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN); 1138 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST); 1139 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST); 1140 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); 1141 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); 1142 1143 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); 1144 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); 1145 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); 1146 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); 1147 stats->tx_drops = GET_TX_STATS(TX_DROP); 1148 1149 drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok + 1150 stats->rx_bcast_frames_ok + 1151 stats->rx_mcast_frames_ok; 1152 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1153 stats->tx_bcast_frames_ok + 1154 stats->tx_mcast_frames_ok; 1155 drv_stats->rx_drops = stats->rx_drop_red + 1156 stats->rx_drop_overrun; 1157 drv_stats->tx_drops = stats->tx_drops; 1158 1159 /* Update RQ and SQ stats */ 1160 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1161 nicvf_update_rq_stats(nic, qidx); 1162 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1163 nicvf_update_sq_stats(nic, qidx); 1164 } 1165 1166 static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, 1167 struct rtnl_link_stats64 *stats) 1168 { 1169 struct nicvf *nic = netdev_priv(netdev); 1170 struct nicvf_hw_stats *hw_stats = &nic->stats; 1171 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1172 1173 nicvf_update_stats(nic); 1174 1175 stats->rx_bytes = hw_stats->rx_bytes_ok; 1176 stats->rx_packets = drv_stats->rx_frames_ok; 1177 stats->rx_dropped = drv_stats->rx_drops; 1178 1179 stats->tx_bytes = hw_stats->tx_bytes_ok; 1180 stats->tx_packets = drv_stats->tx_frames_ok; 1181 stats->tx_dropped = drv_stats->tx_drops; 1182 1183 return stats; 1184 } 1185 1186 static void nicvf_tx_timeout(struct net_device *dev) 1187 { 1188 struct nicvf *nic = netdev_priv(dev); 1189 1190 if (netif_msg_tx_err(nic)) 1191 netdev_warn(dev, "%s: Transmit timed out, resetting\n", 1192 dev->name); 1193 1194 schedule_work(&nic->reset_task); 1195 } 1196 1197 static void nicvf_reset_task(struct work_struct *work) 1198 { 1199 struct nicvf *nic; 1200 1201 nic = container_of(work, struct nicvf, reset_task); 1202 1203 if (!netif_running(nic->netdev)) 1204 return; 1205 1206 nicvf_stop(nic->netdev); 1207 nicvf_open(nic->netdev); 1208 nic->netdev->trans_start = jiffies; 1209 } 1210 1211 static const struct net_device_ops nicvf_netdev_ops = { 1212 .ndo_open = nicvf_open, 1213 .ndo_stop = nicvf_stop, 1214 .ndo_start_xmit = nicvf_xmit, 1215 .ndo_change_mtu = nicvf_change_mtu, 1216 .ndo_set_mac_address = nicvf_set_mac_address, 1217 .ndo_get_stats64 = nicvf_get_stats64, 1218 .ndo_tx_timeout = nicvf_tx_timeout, 1219 }; 1220 1221 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1222 { 1223 struct device *dev = &pdev->dev; 1224 struct net_device *netdev; 1225 struct nicvf *nic; 1226 struct queue_set *qs; 1227 int err; 1228 1229 err = pci_enable_device(pdev); 1230 if (err) { 1231 dev_err(dev, "Failed to enable PCI device\n"); 1232 return err; 1233 } 1234 1235 err = pci_request_regions(pdev, DRV_NAME); 1236 if (err) { 1237 dev_err(dev, "PCI request regions failed 0x%x\n", err); 1238 goto err_disable_device; 1239 } 1240 1241 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); 1242 if (err) { 1243 dev_err(dev, "Unable to get usable DMA configuration\n"); 1244 goto err_release_regions; 1245 } 1246 1247 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); 1248 if (err) { 1249 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n"); 1250 goto err_release_regions; 1251 } 1252 1253 netdev = alloc_etherdev_mqs(sizeof(struct nicvf), 1254 MAX_RCV_QUEUES_PER_QS, 1255 MAX_SND_QUEUES_PER_QS); 1256 if (!netdev) { 1257 err = -ENOMEM; 1258 goto err_release_regions; 1259 } 1260 1261 pci_set_drvdata(pdev, netdev); 1262 1263 SET_NETDEV_DEV(netdev, &pdev->dev); 1264 1265 nic = netdev_priv(netdev); 1266 nic->netdev = netdev; 1267 nic->pdev = pdev; 1268 1269 /* MAP VF's configuration registers */ 1270 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 1271 if (!nic->reg_base) { 1272 dev_err(dev, "Cannot map config register space, aborting\n"); 1273 err = -ENOMEM; 1274 goto err_free_netdev; 1275 } 1276 1277 err = nicvf_set_qset_resources(nic); 1278 if (err) 1279 goto err_free_netdev; 1280 1281 qs = nic->qs; 1282 1283 err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt); 1284 if (err) 1285 goto err_free_netdev; 1286 1287 /* Check if PF is alive and get MAC address for this VF */ 1288 err = nicvf_register_misc_interrupt(nic); 1289 if (err) 1290 goto err_free_netdev; 1291 1292 netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | 1293 NETIF_F_TSO | NETIF_F_GRO); 1294 netdev->hw_features = netdev->features; 1295 1296 netdev->netdev_ops = &nicvf_netdev_ops; 1297 netdev->watchdog_timeo = NICVF_TX_TIMEOUT; 1298 1299 INIT_WORK(&nic->reset_task, nicvf_reset_task); 1300 1301 err = register_netdev(netdev); 1302 if (err) { 1303 dev_err(dev, "Failed to register netdevice\n"); 1304 goto err_unregister_interrupts; 1305 } 1306 1307 nic->msg_enable = debug; 1308 1309 nicvf_set_ethtool_ops(netdev); 1310 1311 return 0; 1312 1313 err_unregister_interrupts: 1314 nicvf_unregister_interrupts(nic); 1315 err_free_netdev: 1316 pci_set_drvdata(pdev, NULL); 1317 free_netdev(netdev); 1318 err_release_regions: 1319 pci_release_regions(pdev); 1320 err_disable_device: 1321 pci_disable_device(pdev); 1322 return err; 1323 } 1324 1325 static void nicvf_remove(struct pci_dev *pdev) 1326 { 1327 struct net_device *netdev = pci_get_drvdata(pdev); 1328 struct nicvf *nic = netdev_priv(netdev); 1329 1330 unregister_netdev(netdev); 1331 nicvf_unregister_interrupts(nic); 1332 pci_set_drvdata(pdev, NULL); 1333 free_netdev(netdev); 1334 pci_release_regions(pdev); 1335 pci_disable_device(pdev); 1336 } 1337 1338 static void nicvf_shutdown(struct pci_dev *pdev) 1339 { 1340 nicvf_remove(pdev); 1341 } 1342 1343 static struct pci_driver nicvf_driver = { 1344 .name = DRV_NAME, 1345 .id_table = nicvf_id_table, 1346 .probe = nicvf_probe, 1347 .remove = nicvf_remove, 1348 .shutdown = nicvf_shutdown, 1349 }; 1350 1351 static int __init nicvf_init_module(void) 1352 { 1353 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 1354 1355 return pci_register_driver(&nicvf_driver); 1356 } 1357 1358 static void __exit nicvf_cleanup_module(void) 1359 { 1360 pci_unregister_driver(&nicvf_driver); 1361 } 1362 1363 module_init(nicvf_init_module); 1364 module_exit(nicvf_cleanup_module); 1365