1 /* 2 * Copyright (C) 2015 Cavium, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of version 2 of the GNU General Public License 6 * as published by the Free Software Foundation. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/interrupt.h> 11 #include <linux/pci.h> 12 #include <linux/netdevice.h> 13 #include <linux/if_vlan.h> 14 #include <linux/etherdevice.h> 15 #include <linux/ethtool.h> 16 #include <linux/log2.h> 17 #include <linux/prefetch.h> 18 #include <linux/irq.h> 19 #include <linux/iommu.h> 20 #include <linux/bpf.h> 21 #include <linux/bpf_trace.h> 22 #include <linux/filter.h> 23 24 #include "nic_reg.h" 25 #include "nic.h" 26 #include "nicvf_queues.h" 27 #include "thunder_bgx.h" 28 29 #define DRV_NAME "thunder-nicvf" 30 #define DRV_VERSION "1.0" 31 32 /* Supported devices */ 33 static const struct pci_device_id nicvf_id_table[] = { 34 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, 35 PCI_DEVICE_ID_THUNDER_NIC_VF, 36 PCI_VENDOR_ID_CAVIUM, 37 PCI_SUBSYS_DEVID_88XX_NIC_VF) }, 38 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, 39 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, 40 PCI_VENDOR_ID_CAVIUM, 41 PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) }, 42 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, 43 PCI_DEVICE_ID_THUNDER_NIC_VF, 44 PCI_VENDOR_ID_CAVIUM, 45 PCI_SUBSYS_DEVID_81XX_NIC_VF) }, 46 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, 47 PCI_DEVICE_ID_THUNDER_NIC_VF, 48 PCI_VENDOR_ID_CAVIUM, 49 PCI_SUBSYS_DEVID_83XX_NIC_VF) }, 50 { 0, } /* end of table */ 51 }; 52 53 MODULE_AUTHOR("Sunil Goutham"); 54 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver"); 55 MODULE_LICENSE("GPL v2"); 56 MODULE_VERSION(DRV_VERSION); 57 MODULE_DEVICE_TABLE(pci, nicvf_id_table); 58 59 static int debug = 0x00; 60 module_param(debug, int, 0644); 61 MODULE_PARM_DESC(debug, "Debug message level bitmap"); 62 63 static int cpi_alg = CPI_ALG_NONE; 64 module_param(cpi_alg, int, S_IRUGO); 65 MODULE_PARM_DESC(cpi_alg, 66 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); 67 68 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) 69 { 70 if (nic->sqs_mode) 71 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS); 72 else 73 return qidx; 74 } 75 76 /* The Cavium ThunderX network controller can *only* be found in SoCs 77 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 78 * registers on this platform are implicitly strongly ordered with respect 79 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use 80 * with no memory barriers in this driver. The readq()/writeq() functions add 81 * explicit ordering operation which in this case are redundant, and only 82 * add overhead. 83 */ 84 85 /* Register read/write APIs */ 86 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val) 87 { 88 writeq_relaxed(val, nic->reg_base + offset); 89 } 90 91 u64 nicvf_reg_read(struct nicvf *nic, u64 offset) 92 { 93 return readq_relaxed(nic->reg_base + offset); 94 } 95 96 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, 97 u64 qidx, u64 val) 98 { 99 void __iomem *addr = nic->reg_base + offset; 100 101 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT)); 102 } 103 104 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) 105 { 106 void __iomem *addr = nic->reg_base + offset; 107 108 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT)); 109 } 110 111 /* VF -> PF mailbox communication */ 112 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) 113 { 114 u64 *msg = (u64 *)mbx; 115 116 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]); 117 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]); 118 } 119 120 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) 121 { 122 int timeout = NIC_MBOX_MSG_TIMEOUT; 123 int sleep = 10; 124 125 nic->pf_acked = false; 126 nic->pf_nacked = false; 127 128 nicvf_write_to_mbx(nic, mbx); 129 130 /* Wait for previous message to be acked, timeout 2sec */ 131 while (!nic->pf_acked) { 132 if (nic->pf_nacked) { 133 netdev_err(nic->netdev, 134 "PF NACK to mbox msg 0x%02x from VF%d\n", 135 (mbx->msg.msg & 0xFF), nic->vf_id); 136 return -EINVAL; 137 } 138 msleep(sleep); 139 if (nic->pf_acked) 140 break; 141 timeout -= sleep; 142 if (!timeout) { 143 netdev_err(nic->netdev, 144 "PF didn't ACK to mbox msg 0x%02x from VF%d\n", 145 (mbx->msg.msg & 0xFF), nic->vf_id); 146 return -EBUSY; 147 } 148 } 149 return 0; 150 } 151 152 /* Checks if VF is able to comminicate with PF 153 * and also gets the VNIC number this VF is associated to. 154 */ 155 static int nicvf_check_pf_ready(struct nicvf *nic) 156 { 157 union nic_mbx mbx = {}; 158 159 mbx.msg.msg = NIC_MBOX_MSG_READY; 160 if (nicvf_send_msg_to_pf(nic, &mbx)) { 161 netdev_err(nic->netdev, 162 "PF didn't respond to READY msg\n"); 163 return 0; 164 } 165 166 return 1; 167 } 168 169 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) 170 { 171 if (bgx->rx) 172 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats; 173 else 174 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats; 175 } 176 177 static void nicvf_handle_mbx_intr(struct nicvf *nic) 178 { 179 union nic_mbx mbx = {}; 180 u64 *mbx_data; 181 u64 mbx_addr; 182 int i; 183 184 mbx_addr = NIC_VF_PF_MAILBOX_0_1; 185 mbx_data = (u64 *)&mbx; 186 187 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { 188 *mbx_data = nicvf_reg_read(nic, mbx_addr); 189 mbx_data++; 190 mbx_addr += sizeof(u64); 191 } 192 193 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg); 194 switch (mbx.msg.msg) { 195 case NIC_MBOX_MSG_READY: 196 nic->pf_acked = true; 197 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; 198 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; 199 nic->node = mbx.nic_cfg.node_id; 200 if (!nic->set_mac_pending) 201 ether_addr_copy(nic->netdev->dev_addr, 202 mbx.nic_cfg.mac_addr); 203 nic->sqs_mode = mbx.nic_cfg.sqs_mode; 204 nic->loopback_supported = mbx.nic_cfg.loopback_supported; 205 nic->link_up = false; 206 nic->duplex = 0; 207 nic->speed = 0; 208 break; 209 case NIC_MBOX_MSG_ACK: 210 nic->pf_acked = true; 211 break; 212 case NIC_MBOX_MSG_NACK: 213 nic->pf_nacked = true; 214 break; 215 case NIC_MBOX_MSG_RSS_SIZE: 216 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size; 217 nic->pf_acked = true; 218 break; 219 case NIC_MBOX_MSG_BGX_STATS: 220 nicvf_read_bgx_stats(nic, &mbx.bgx_stats); 221 nic->pf_acked = true; 222 break; 223 case NIC_MBOX_MSG_BGX_LINK_CHANGE: 224 nic->pf_acked = true; 225 nic->link_up = mbx.link_status.link_up; 226 nic->duplex = mbx.link_status.duplex; 227 nic->speed = mbx.link_status.speed; 228 nic->mac_type = mbx.link_status.mac_type; 229 if (nic->link_up) { 230 netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n", 231 nic->speed, 232 nic->duplex == DUPLEX_FULL ? 233 "Full" : "Half"); 234 netif_carrier_on(nic->netdev); 235 netif_tx_start_all_queues(nic->netdev); 236 } else { 237 netdev_info(nic->netdev, "Link is Down\n"); 238 netif_carrier_off(nic->netdev); 239 netif_tx_stop_all_queues(nic->netdev); 240 } 241 break; 242 case NIC_MBOX_MSG_ALLOC_SQS: 243 nic->sqs_count = mbx.sqs_alloc.qs_count; 244 nic->pf_acked = true; 245 break; 246 case NIC_MBOX_MSG_SNICVF_PTR: 247 /* Primary VF: make note of secondary VF's pointer 248 * to be used while packet transmission. 249 */ 250 nic->snicvf[mbx.nicvf.sqs_id] = 251 (struct nicvf *)mbx.nicvf.nicvf; 252 nic->pf_acked = true; 253 break; 254 case NIC_MBOX_MSG_PNICVF_PTR: 255 /* Secondary VF/Qset: make note of primary VF's pointer 256 * to be used while packet reception, to handover packet 257 * to primary VF's netdev. 258 */ 259 nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf; 260 nic->pf_acked = true; 261 break; 262 case NIC_MBOX_MSG_PFC: 263 nic->pfc.autoneg = mbx.pfc.autoneg; 264 nic->pfc.fc_rx = mbx.pfc.fc_rx; 265 nic->pfc.fc_tx = mbx.pfc.fc_tx; 266 nic->pf_acked = true; 267 break; 268 default: 269 netdev_err(nic->netdev, 270 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg); 271 break; 272 } 273 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0); 274 } 275 276 static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev) 277 { 278 union nic_mbx mbx = {}; 279 280 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC; 281 mbx.mac.vf_id = nic->vf_id; 282 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr); 283 284 return nicvf_send_msg_to_pf(nic, &mbx); 285 } 286 287 static void nicvf_config_cpi(struct nicvf *nic) 288 { 289 union nic_mbx mbx = {}; 290 291 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG; 292 mbx.cpi_cfg.vf_id = nic->vf_id; 293 mbx.cpi_cfg.cpi_alg = nic->cpi_alg; 294 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; 295 296 nicvf_send_msg_to_pf(nic, &mbx); 297 } 298 299 static void nicvf_get_rss_size(struct nicvf *nic) 300 { 301 union nic_mbx mbx = {}; 302 303 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; 304 mbx.rss_size.vf_id = nic->vf_id; 305 nicvf_send_msg_to_pf(nic, &mbx); 306 } 307 308 void nicvf_config_rss(struct nicvf *nic) 309 { 310 union nic_mbx mbx = {}; 311 struct nicvf_rss_info *rss = &nic->rss_info; 312 int ind_tbl_len = rss->rss_size; 313 int i, nextq = 0; 314 315 mbx.rss_cfg.vf_id = nic->vf_id; 316 mbx.rss_cfg.hash_bits = rss->hash_bits; 317 while (ind_tbl_len) { 318 mbx.rss_cfg.tbl_offset = nextq; 319 mbx.rss_cfg.tbl_len = min(ind_tbl_len, 320 RSS_IND_TBL_LEN_PER_MBX_MSG); 321 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ? 322 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG; 323 324 for (i = 0; i < mbx.rss_cfg.tbl_len; i++) 325 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++]; 326 327 nicvf_send_msg_to_pf(nic, &mbx); 328 329 ind_tbl_len -= mbx.rss_cfg.tbl_len; 330 } 331 } 332 333 void nicvf_set_rss_key(struct nicvf *nic) 334 { 335 struct nicvf_rss_info *rss = &nic->rss_info; 336 u64 key_addr = NIC_VNIC_RSS_KEY_0_4; 337 int idx; 338 339 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) { 340 nicvf_reg_write(nic, key_addr, rss->key[idx]); 341 key_addr += sizeof(u64); 342 } 343 } 344 345 static int nicvf_rss_init(struct nicvf *nic) 346 { 347 struct nicvf_rss_info *rss = &nic->rss_info; 348 int idx; 349 350 nicvf_get_rss_size(nic); 351 352 if (cpi_alg != CPI_ALG_NONE) { 353 rss->enable = false; 354 rss->hash_bits = 0; 355 return 0; 356 } 357 358 rss->enable = true; 359 360 netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64)); 361 nicvf_set_rss_key(nic); 362 363 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA; 364 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg); 365 366 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size)); 367 368 for (idx = 0; idx < rss->rss_size; idx++) 369 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx, 370 nic->rx_queues); 371 nicvf_config_rss(nic); 372 return 1; 373 } 374 375 /* Request PF to allocate additional Qsets */ 376 static void nicvf_request_sqs(struct nicvf *nic) 377 { 378 union nic_mbx mbx = {}; 379 int sqs; 380 int sqs_count = nic->sqs_count; 381 int rx_queues = 0, tx_queues = 0; 382 383 /* Only primary VF should request */ 384 if (nic->sqs_mode || !nic->sqs_count) 385 return; 386 387 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS; 388 mbx.sqs_alloc.vf_id = nic->vf_id; 389 mbx.sqs_alloc.qs_count = nic->sqs_count; 390 if (nicvf_send_msg_to_pf(nic, &mbx)) { 391 /* No response from PF */ 392 nic->sqs_count = 0; 393 return; 394 } 395 396 /* Return if no Secondary Qsets available */ 397 if (!nic->sqs_count) 398 return; 399 400 if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS) 401 rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS; 402 403 tx_queues = nic->tx_queues + nic->xdp_tx_queues; 404 if (tx_queues > MAX_SND_QUEUES_PER_QS) 405 tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS; 406 407 /* Set no of Rx/Tx queues in each of the SQsets */ 408 for (sqs = 0; sqs < nic->sqs_count; sqs++) { 409 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR; 410 mbx.nicvf.vf_id = nic->vf_id; 411 mbx.nicvf.sqs_id = sqs; 412 nicvf_send_msg_to_pf(nic, &mbx); 413 414 nic->snicvf[sqs]->sqs_id = sqs; 415 if (rx_queues > MAX_RCV_QUEUES_PER_QS) { 416 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS; 417 rx_queues -= MAX_RCV_QUEUES_PER_QS; 418 } else { 419 nic->snicvf[sqs]->qs->rq_cnt = rx_queues; 420 rx_queues = 0; 421 } 422 423 if (tx_queues > MAX_SND_QUEUES_PER_QS) { 424 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS; 425 tx_queues -= MAX_SND_QUEUES_PER_QS; 426 } else { 427 nic->snicvf[sqs]->qs->sq_cnt = tx_queues; 428 tx_queues = 0; 429 } 430 431 nic->snicvf[sqs]->qs->cq_cnt = 432 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt); 433 434 /* Initialize secondary Qset's queues and its interrupts */ 435 nicvf_open(nic->snicvf[sqs]->netdev); 436 } 437 438 /* Update stack with actual Rx/Tx queue count allocated */ 439 if (sqs_count != nic->sqs_count) 440 nicvf_set_real_num_queues(nic->netdev, 441 nic->tx_queues, nic->rx_queues); 442 } 443 444 /* Send this Qset's nicvf pointer to PF. 445 * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs 446 * so that packets received by these Qsets can use primary VF's netdev 447 */ 448 static void nicvf_send_vf_struct(struct nicvf *nic) 449 { 450 union nic_mbx mbx = {}; 451 452 mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR; 453 mbx.nicvf.sqs_mode = nic->sqs_mode; 454 mbx.nicvf.nicvf = (u64)nic; 455 nicvf_send_msg_to_pf(nic, &mbx); 456 } 457 458 static void nicvf_get_primary_vf_struct(struct nicvf *nic) 459 { 460 union nic_mbx mbx = {}; 461 462 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR; 463 nicvf_send_msg_to_pf(nic, &mbx); 464 } 465 466 int nicvf_set_real_num_queues(struct net_device *netdev, 467 int tx_queues, int rx_queues) 468 { 469 int err = 0; 470 471 err = netif_set_real_num_tx_queues(netdev, tx_queues); 472 if (err) { 473 netdev_err(netdev, 474 "Failed to set no of Tx queues: %d\n", tx_queues); 475 return err; 476 } 477 478 err = netif_set_real_num_rx_queues(netdev, rx_queues); 479 if (err) 480 netdev_err(netdev, 481 "Failed to set no of Rx queues: %d\n", rx_queues); 482 return err; 483 } 484 485 static int nicvf_init_resources(struct nicvf *nic) 486 { 487 int err; 488 489 /* Enable Qset */ 490 nicvf_qset_config(nic, true); 491 492 /* Initialize queues and HW for data transfer */ 493 err = nicvf_config_data_transfer(nic, true); 494 if (err) { 495 netdev_err(nic->netdev, 496 "Failed to alloc/config VF's QSet resources\n"); 497 return err; 498 } 499 500 return 0; 501 } 502 503 static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, 504 struct cqe_rx_t *cqe_rx, struct snd_queue *sq, 505 struct sk_buff **skb) 506 { 507 struct xdp_buff xdp; 508 struct page *page; 509 u32 action; 510 u16 len, offset = 0; 511 u64 dma_addr, cpu_addr; 512 void *orig_data; 513 514 /* Retrieve packet buffer's DMA address and length */ 515 len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64)))); 516 dma_addr = *((u64 *)((void *)cqe_rx + (7 * sizeof(u64)))); 517 518 cpu_addr = nicvf_iova_to_phys(nic, dma_addr); 519 if (!cpu_addr) 520 return false; 521 cpu_addr = (u64)phys_to_virt(cpu_addr); 522 page = virt_to_page((void *)cpu_addr); 523 524 xdp.data_hard_start = page_address(page); 525 xdp.data = (void *)cpu_addr; 526 xdp.data_end = xdp.data + len; 527 orig_data = xdp.data; 528 529 rcu_read_lock(); 530 action = bpf_prog_run_xdp(prog, &xdp); 531 rcu_read_unlock(); 532 533 /* Check if XDP program has changed headers */ 534 if (orig_data != xdp.data) { 535 len = xdp.data_end - xdp.data; 536 offset = orig_data - xdp.data; 537 dma_addr -= offset; 538 } 539 540 switch (action) { 541 case XDP_PASS: 542 /* Check if it's a recycled page, if not 543 * unmap the DMA mapping. 544 * 545 * Recycled page holds an extra reference. 546 */ 547 if (page_ref_count(page) == 1) { 548 dma_addr &= PAGE_MASK; 549 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, 550 RCV_FRAG_LEN + XDP_PACKET_HEADROOM, 551 DMA_FROM_DEVICE, 552 DMA_ATTR_SKIP_CPU_SYNC); 553 } 554 555 /* Build SKB and pass on packet to network stack */ 556 *skb = build_skb(xdp.data, 557 RCV_FRAG_LEN - cqe_rx->align_pad + offset); 558 if (!*skb) 559 put_page(page); 560 else 561 skb_put(*skb, len); 562 return false; 563 case XDP_TX: 564 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); 565 return true; 566 default: 567 bpf_warn_invalid_xdp_action(action); 568 case XDP_ABORTED: 569 trace_xdp_exception(nic->netdev, prog, action); 570 case XDP_DROP: 571 /* Check if it's a recycled page, if not 572 * unmap the DMA mapping. 573 * 574 * Recycled page holds an extra reference. 575 */ 576 if (page_ref_count(page) == 1) { 577 dma_addr &= PAGE_MASK; 578 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, 579 RCV_FRAG_LEN + XDP_PACKET_HEADROOM, 580 DMA_FROM_DEVICE, 581 DMA_ATTR_SKIP_CPU_SYNC); 582 } 583 put_page(page); 584 return true; 585 } 586 return false; 587 } 588 589 static void nicvf_snd_pkt_handler(struct net_device *netdev, 590 struct cqe_send_t *cqe_tx, 591 int budget, int *subdesc_cnt, 592 unsigned int *tx_pkts, unsigned int *tx_bytes) 593 { 594 struct sk_buff *skb = NULL; 595 struct page *page; 596 struct nicvf *nic = netdev_priv(netdev); 597 struct snd_queue *sq; 598 struct sq_hdr_subdesc *hdr; 599 struct sq_hdr_subdesc *tso_sqe; 600 601 sq = &nic->qs->sq[cqe_tx->sq_idx]; 602 603 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); 604 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) 605 return; 606 607 /* Check for errors */ 608 if (cqe_tx->send_status) 609 nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx); 610 611 /* Is this a XDP designated Tx queue */ 612 if (sq->is_xdp) { 613 page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr]; 614 /* Check if it's recycled page or else unmap DMA mapping */ 615 if (page && (page_ref_count(page) == 1)) 616 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr, 617 hdr->subdesc_cnt); 618 619 /* Release page reference for recycling */ 620 if (page) 621 put_page(page); 622 sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL; 623 *subdesc_cnt += hdr->subdesc_cnt + 1; 624 return; 625 } 626 627 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; 628 if (skb) { 629 /* Check for dummy descriptor used for HW TSO offload on 88xx */ 630 if (hdr->dont_send) { 631 /* Get actual TSO descriptors and free them */ 632 tso_sqe = 633 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); 634 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, 635 tso_sqe->subdesc_cnt); 636 *subdesc_cnt += tso_sqe->subdesc_cnt + 1; 637 } else { 638 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr, 639 hdr->subdesc_cnt); 640 } 641 *subdesc_cnt += hdr->subdesc_cnt + 1; 642 prefetch(skb); 643 (*tx_pkts)++; 644 *tx_bytes += skb->len; 645 napi_consume_skb(skb, budget); 646 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; 647 } else { 648 /* In case of SW TSO on 88xx, only last segment will have 649 * a SKB attached, so just free SQEs here. 650 */ 651 if (!nic->hw_tso) 652 *subdesc_cnt += hdr->subdesc_cnt + 1; 653 } 654 } 655 656 static inline void nicvf_set_rxhash(struct net_device *netdev, 657 struct cqe_rx_t *cqe_rx, 658 struct sk_buff *skb) 659 { 660 u8 hash_type; 661 u32 hash; 662 663 if (!(netdev->features & NETIF_F_RXHASH)) 664 return; 665 666 switch (cqe_rx->rss_alg) { 667 case RSS_ALG_TCP_IP: 668 case RSS_ALG_UDP_IP: 669 hash_type = PKT_HASH_TYPE_L4; 670 hash = cqe_rx->rss_tag; 671 break; 672 case RSS_ALG_IP: 673 hash_type = PKT_HASH_TYPE_L3; 674 hash = cqe_rx->rss_tag; 675 break; 676 default: 677 hash_type = PKT_HASH_TYPE_NONE; 678 hash = 0; 679 } 680 681 skb_set_hash(skb, hash, hash_type); 682 } 683 684 static void nicvf_rcv_pkt_handler(struct net_device *netdev, 685 struct napi_struct *napi, 686 struct cqe_rx_t *cqe_rx, struct snd_queue *sq) 687 { 688 struct sk_buff *skb = NULL; 689 struct nicvf *nic = netdev_priv(netdev); 690 struct nicvf *snic = nic; 691 int err = 0; 692 int rq_idx; 693 694 rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx); 695 696 if (nic->sqs_mode) { 697 /* Use primary VF's 'nicvf' struct */ 698 nic = nic->pnicvf; 699 netdev = nic->netdev; 700 } 701 702 /* Check for errors */ 703 if (cqe_rx->err_level || cqe_rx->err_opcode) { 704 err = nicvf_check_cqe_rx_errs(nic, cqe_rx); 705 if (err && !cqe_rx->rb_cnt) 706 return; 707 } 708 709 /* For XDP, ignore pkts spanning multiple pages */ 710 if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) { 711 /* Packet consumed by XDP */ 712 if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb)) 713 return; 714 } else { 715 skb = nicvf_get_rcv_skb(snic, cqe_rx, 716 nic->xdp_prog ? true : false); 717 } 718 719 if (!skb) 720 return; 721 722 if (netif_msg_pktdata(nic)) { 723 netdev_info(nic->netdev, "skb 0x%p, len=%d\n", skb, skb->len); 724 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, 725 skb->data, skb->len, true); 726 } 727 728 /* If error packet, drop it here */ 729 if (err) { 730 dev_kfree_skb_any(skb); 731 return; 732 } 733 734 nicvf_set_rxhash(netdev, cqe_rx, skb); 735 736 skb_record_rx_queue(skb, rq_idx); 737 if (netdev->hw_features & NETIF_F_RXCSUM) { 738 /* HW by default verifies TCP/UDP/SCTP checksums */ 739 skb->ip_summed = CHECKSUM_UNNECESSARY; 740 } else { 741 skb_checksum_none_assert(skb); 742 } 743 744 skb->protocol = eth_type_trans(skb, netdev); 745 746 /* Check for stripped VLAN */ 747 if (cqe_rx->vlan_found && cqe_rx->vlan_stripped) 748 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 749 ntohs((__force __be16)cqe_rx->vlan_tci)); 750 751 if (napi && (netdev->features & NETIF_F_GRO)) 752 napi_gro_receive(napi, skb); 753 else 754 netif_receive_skb(skb); 755 } 756 757 static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, 758 struct napi_struct *napi, int budget) 759 { 760 int processed_cqe, work_done = 0, tx_done = 0; 761 int cqe_count, cqe_head; 762 int subdesc_cnt = 0; 763 struct nicvf *nic = netdev_priv(netdev); 764 struct queue_set *qs = nic->qs; 765 struct cmp_queue *cq = &qs->cq[cq_idx]; 766 struct cqe_rx_t *cq_desc; 767 struct netdev_queue *txq; 768 struct snd_queue *sq = &qs->sq[cq_idx]; 769 unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx; 770 771 spin_lock_bh(&cq->lock); 772 loop: 773 processed_cqe = 0; 774 /* Get no of valid CQ entries to process */ 775 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); 776 cqe_count &= CQ_CQE_COUNT; 777 if (!cqe_count) 778 goto done; 779 780 /* Get head of the valid CQ entries */ 781 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 782 cqe_head &= 0xFFFF; 783 784 while (processed_cqe < cqe_count) { 785 /* Get the CQ descriptor */ 786 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 787 cqe_head++; 788 cqe_head &= (cq->dmem.q_len - 1); 789 /* Initiate prefetch for next descriptor */ 790 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); 791 792 if ((work_done >= budget) && napi && 793 (cq_desc->cqe_type != CQE_TYPE_SEND)) { 794 break; 795 } 796 797 switch (cq_desc->cqe_type) { 798 case CQE_TYPE_RX: 799 nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq); 800 work_done++; 801 break; 802 case CQE_TYPE_SEND: 803 nicvf_snd_pkt_handler(netdev, (void *)cq_desc, 804 budget, &subdesc_cnt, 805 &tx_pkts, &tx_bytes); 806 tx_done++; 807 break; 808 case CQE_TYPE_INVALID: 809 case CQE_TYPE_RX_SPLIT: 810 case CQE_TYPE_RX_TCP: 811 case CQE_TYPE_SEND_PTP: 812 /* Ignore for now */ 813 break; 814 } 815 processed_cqe++; 816 } 817 818 /* Ring doorbell to inform H/W to reuse processed CQEs */ 819 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, 820 cq_idx, processed_cqe); 821 822 if ((work_done < budget) && napi) 823 goto loop; 824 825 done: 826 /* Update SQ's descriptor free count */ 827 if (subdesc_cnt) 828 nicvf_put_sq_desc(sq, subdesc_cnt); 829 830 txq_idx = nicvf_netdev_qidx(nic, cq_idx); 831 /* Handle XDP TX queues */ 832 if (nic->pnicvf->xdp_prog) { 833 if (txq_idx < nic->pnicvf->xdp_tx_queues) { 834 nicvf_xdp_sq_doorbell(nic, sq, cq_idx); 835 goto out; 836 } 837 nic = nic->pnicvf; 838 txq_idx -= nic->pnicvf->xdp_tx_queues; 839 } 840 841 /* Wakeup TXQ if its stopped earlier due to SQ full */ 842 if (tx_done || 843 (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) { 844 netdev = nic->pnicvf->netdev; 845 txq = netdev_get_tx_queue(netdev, txq_idx); 846 if (tx_pkts) 847 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); 848 849 /* To read updated queue and carrier status */ 850 smp_mb(); 851 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { 852 netif_tx_wake_queue(txq); 853 nic = nic->pnicvf; 854 this_cpu_inc(nic->drv_stats->txq_wake); 855 netif_warn(nic, tx_err, netdev, 856 "Transmit queue wakeup SQ%d\n", txq_idx); 857 } 858 } 859 860 out: 861 spin_unlock_bh(&cq->lock); 862 return work_done; 863 } 864 865 static int nicvf_poll(struct napi_struct *napi, int budget) 866 { 867 u64 cq_head; 868 int work_done = 0; 869 struct net_device *netdev = napi->dev; 870 struct nicvf *nic = netdev_priv(netdev); 871 struct nicvf_cq_poll *cq; 872 873 cq = container_of(napi, struct nicvf_cq_poll, napi); 874 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); 875 876 if (work_done < budget) { 877 /* Slow packet rate, exit polling */ 878 napi_complete_done(napi, work_done); 879 /* Re-enable interrupts */ 880 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, 881 cq->cq_idx); 882 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx); 883 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD, 884 cq->cq_idx, cq_head); 885 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx); 886 } 887 return work_done; 888 } 889 890 /* Qset error interrupt handler 891 * 892 * As of now only CQ errors are handled 893 */ 894 static void nicvf_handle_qs_err(unsigned long data) 895 { 896 struct nicvf *nic = (struct nicvf *)data; 897 struct queue_set *qs = nic->qs; 898 int qidx; 899 u64 status; 900 901 netif_tx_disable(nic->netdev); 902 903 /* Check if it is CQ err */ 904 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 905 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, 906 qidx); 907 if (!(status & CQ_ERR_MASK)) 908 continue; 909 /* Process already queued CQEs and reconfig CQ */ 910 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 911 nicvf_sq_disable(nic, qidx); 912 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0); 913 nicvf_cmp_queue_config(nic, qs, qidx, true); 914 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx); 915 nicvf_sq_enable(nic, &qs->sq[qidx], qidx); 916 917 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); 918 } 919 920 netif_tx_start_all_queues(nic->netdev); 921 /* Re-enable Qset error interrupt */ 922 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); 923 } 924 925 static void nicvf_dump_intr_status(struct nicvf *nic) 926 { 927 netif_info(nic, intr, nic->netdev, "interrupt status 0x%llx\n", 928 nicvf_reg_read(nic, NIC_VF_INT)); 929 } 930 931 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq) 932 { 933 struct nicvf *nic = (struct nicvf *)nicvf_irq; 934 u64 intr; 935 936 nicvf_dump_intr_status(nic); 937 938 intr = nicvf_reg_read(nic, NIC_VF_INT); 939 /* Check for spurious interrupt */ 940 if (!(intr & NICVF_INTR_MBOX_MASK)) 941 return IRQ_HANDLED; 942 943 nicvf_handle_mbx_intr(nic); 944 945 return IRQ_HANDLED; 946 } 947 948 static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq) 949 { 950 struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq; 951 struct nicvf *nic = cq_poll->nicvf; 952 int qidx = cq_poll->cq_idx; 953 954 nicvf_dump_intr_status(nic); 955 956 /* Disable interrupts */ 957 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 958 959 /* Schedule NAPI */ 960 napi_schedule_irqoff(&cq_poll->napi); 961 962 /* Clear interrupt */ 963 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); 964 965 return IRQ_HANDLED; 966 } 967 968 static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq) 969 { 970 struct nicvf *nic = (struct nicvf *)nicvf_irq; 971 u8 qidx; 972 973 974 nicvf_dump_intr_status(nic); 975 976 /* Disable RBDR interrupt and schedule softirq */ 977 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) { 978 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx)) 979 continue; 980 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); 981 tasklet_hi_schedule(&nic->rbdr_task); 982 /* Clear interrupt */ 983 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx); 984 } 985 986 return IRQ_HANDLED; 987 } 988 989 static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq) 990 { 991 struct nicvf *nic = (struct nicvf *)nicvf_irq; 992 993 nicvf_dump_intr_status(nic); 994 995 /* Disable Qset err interrupt and schedule softirq */ 996 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); 997 tasklet_hi_schedule(&nic->qs_err_task); 998 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0); 999 1000 return IRQ_HANDLED; 1001 } 1002 1003 static void nicvf_set_irq_affinity(struct nicvf *nic) 1004 { 1005 int vec, cpu; 1006 1007 for (vec = 0; vec < nic->num_vec; vec++) { 1008 if (!nic->irq_allocated[vec]) 1009 continue; 1010 1011 if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL)) 1012 return; 1013 /* CQ interrupts */ 1014 if (vec < NICVF_INTR_ID_SQ) 1015 /* Leave CPU0 for RBDR and other interrupts */ 1016 cpu = nicvf_netdev_qidx(nic, vec) + 1; 1017 else 1018 cpu = 0; 1019 1020 cpumask_set_cpu(cpumask_local_spread(cpu, nic->node), 1021 nic->affinity_mask[vec]); 1022 irq_set_affinity_hint(pci_irq_vector(nic->pdev, vec), 1023 nic->affinity_mask[vec]); 1024 } 1025 } 1026 1027 static int nicvf_register_interrupts(struct nicvf *nic) 1028 { 1029 int irq, ret = 0; 1030 1031 for_each_cq_irq(irq) 1032 sprintf(nic->irq_name[irq], "%s-rxtx-%d", 1033 nic->pnicvf->netdev->name, 1034 nicvf_netdev_qidx(nic, irq)); 1035 1036 for_each_sq_irq(irq) 1037 sprintf(nic->irq_name[irq], "%s-sq-%d", 1038 nic->pnicvf->netdev->name, 1039 nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ)); 1040 1041 for_each_rbdr_irq(irq) 1042 sprintf(nic->irq_name[irq], "%s-rbdr-%d", 1043 nic->pnicvf->netdev->name, 1044 nic->sqs_mode ? (nic->sqs_id + 1) : 0); 1045 1046 /* Register CQ interrupts */ 1047 for (irq = 0; irq < nic->qs->cq_cnt; irq++) { 1048 ret = request_irq(pci_irq_vector(nic->pdev, irq), 1049 nicvf_intr_handler, 1050 0, nic->irq_name[irq], nic->napi[irq]); 1051 if (ret) 1052 goto err; 1053 nic->irq_allocated[irq] = true; 1054 } 1055 1056 /* Register RBDR interrupt */ 1057 for (irq = NICVF_INTR_ID_RBDR; 1058 irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) { 1059 ret = request_irq(pci_irq_vector(nic->pdev, irq), 1060 nicvf_rbdr_intr_handler, 1061 0, nic->irq_name[irq], nic); 1062 if (ret) 1063 goto err; 1064 nic->irq_allocated[irq] = true; 1065 } 1066 1067 /* Register QS error interrupt */ 1068 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d", 1069 nic->pnicvf->netdev->name, 1070 nic->sqs_mode ? (nic->sqs_id + 1) : 0); 1071 irq = NICVF_INTR_ID_QS_ERR; 1072 ret = request_irq(pci_irq_vector(nic->pdev, irq), 1073 nicvf_qs_err_intr_handler, 1074 0, nic->irq_name[irq], nic); 1075 if (ret) 1076 goto err; 1077 1078 nic->irq_allocated[irq] = true; 1079 1080 /* Set IRQ affinities */ 1081 nicvf_set_irq_affinity(nic); 1082 1083 err: 1084 if (ret) 1085 netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq); 1086 1087 return ret; 1088 } 1089 1090 static void nicvf_unregister_interrupts(struct nicvf *nic) 1091 { 1092 struct pci_dev *pdev = nic->pdev; 1093 int irq; 1094 1095 /* Free registered interrupts */ 1096 for (irq = 0; irq < nic->num_vec; irq++) { 1097 if (!nic->irq_allocated[irq]) 1098 continue; 1099 1100 irq_set_affinity_hint(pci_irq_vector(pdev, irq), NULL); 1101 free_cpumask_var(nic->affinity_mask[irq]); 1102 1103 if (irq < NICVF_INTR_ID_SQ) 1104 free_irq(pci_irq_vector(pdev, irq), nic->napi[irq]); 1105 else 1106 free_irq(pci_irq_vector(pdev, irq), nic); 1107 1108 nic->irq_allocated[irq] = false; 1109 } 1110 1111 /* Disable MSI-X */ 1112 pci_free_irq_vectors(pdev); 1113 nic->num_vec = 0; 1114 } 1115 1116 /* Initialize MSIX vectors and register MISC interrupt. 1117 * Send READY message to PF to check if its alive 1118 */ 1119 static int nicvf_register_misc_interrupt(struct nicvf *nic) 1120 { 1121 int ret = 0; 1122 int irq = NICVF_INTR_ID_MISC; 1123 1124 /* Return if mailbox interrupt is already registered */ 1125 if (nic->pdev->msix_enabled) 1126 return 0; 1127 1128 /* Enable MSI-X */ 1129 nic->num_vec = pci_msix_vec_count(nic->pdev); 1130 ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec, 1131 PCI_IRQ_MSIX); 1132 if (ret < 0) { 1133 netdev_err(nic->netdev, 1134 "Req for #%d msix vectors failed\n", nic->num_vec); 1135 return 1; 1136 } 1137 1138 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF"); 1139 /* Register Misc interrupt */ 1140 ret = request_irq(pci_irq_vector(nic->pdev, irq), 1141 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic); 1142 1143 if (ret) 1144 return ret; 1145 nic->irq_allocated[irq] = true; 1146 1147 /* Enable mailbox interrupt */ 1148 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0); 1149 1150 /* Check if VF is able to communicate with PF */ 1151 if (!nicvf_check_pf_ready(nic)) { 1152 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); 1153 nicvf_unregister_interrupts(nic); 1154 return 1; 1155 } 1156 1157 return 0; 1158 } 1159 1160 static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) 1161 { 1162 struct nicvf *nic = netdev_priv(netdev); 1163 int qid = skb_get_queue_mapping(skb); 1164 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid); 1165 struct nicvf *snic; 1166 struct snd_queue *sq; 1167 int tmp; 1168 1169 /* Check for minimum packet length */ 1170 if (skb->len <= ETH_HLEN) { 1171 dev_kfree_skb(skb); 1172 return NETDEV_TX_OK; 1173 } 1174 1175 /* In XDP case, initial HW tx queues are used for XDP, 1176 * but stack's queue mapping starts at '0', so skip the 1177 * Tx queues attached to Rx queues for XDP. 1178 */ 1179 if (nic->xdp_prog) 1180 qid += nic->xdp_tx_queues; 1181 1182 snic = nic; 1183 /* Get secondary Qset's SQ structure */ 1184 if (qid >= MAX_SND_QUEUES_PER_QS) { 1185 tmp = qid / MAX_SND_QUEUES_PER_QS; 1186 snic = (struct nicvf *)nic->snicvf[tmp - 1]; 1187 if (!snic) { 1188 netdev_warn(nic->netdev, 1189 "Secondary Qset#%d's ptr not initialized\n", 1190 tmp - 1); 1191 dev_kfree_skb(skb); 1192 return NETDEV_TX_OK; 1193 } 1194 qid = qid % MAX_SND_QUEUES_PER_QS; 1195 } 1196 1197 sq = &snic->qs->sq[qid]; 1198 if (!netif_tx_queue_stopped(txq) && 1199 !nicvf_sq_append_skb(snic, sq, skb, qid)) { 1200 netif_tx_stop_queue(txq); 1201 1202 /* Barrier, so that stop_queue visible to other cpus */ 1203 smp_mb(); 1204 1205 /* Check again, incase another cpu freed descriptors */ 1206 if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) { 1207 netif_tx_wake_queue(txq); 1208 } else { 1209 this_cpu_inc(nic->drv_stats->txq_stop); 1210 netif_warn(nic, tx_err, netdev, 1211 "Transmit ring full, stopping SQ%d\n", qid); 1212 } 1213 return NETDEV_TX_BUSY; 1214 } 1215 1216 return NETDEV_TX_OK; 1217 } 1218 1219 static inline void nicvf_free_cq_poll(struct nicvf *nic) 1220 { 1221 struct nicvf_cq_poll *cq_poll; 1222 int qidx; 1223 1224 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) { 1225 cq_poll = nic->napi[qidx]; 1226 if (!cq_poll) 1227 continue; 1228 nic->napi[qidx] = NULL; 1229 kfree(cq_poll); 1230 } 1231 } 1232 1233 int nicvf_stop(struct net_device *netdev) 1234 { 1235 int irq, qidx; 1236 struct nicvf *nic = netdev_priv(netdev); 1237 struct queue_set *qs = nic->qs; 1238 struct nicvf_cq_poll *cq_poll = NULL; 1239 union nic_mbx mbx = {}; 1240 1241 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 1242 nicvf_send_msg_to_pf(nic, &mbx); 1243 1244 netif_carrier_off(netdev); 1245 netif_tx_stop_all_queues(nic->netdev); 1246 nic->link_up = false; 1247 1248 /* Teardown secondary qsets first */ 1249 if (!nic->sqs_mode) { 1250 for (qidx = 0; qidx < nic->sqs_count; qidx++) { 1251 if (!nic->snicvf[qidx]) 1252 continue; 1253 nicvf_stop(nic->snicvf[qidx]->netdev); 1254 nic->snicvf[qidx] = NULL; 1255 } 1256 } 1257 1258 /* Disable RBDR & QS error interrupts */ 1259 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 1260 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); 1261 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx); 1262 } 1263 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); 1264 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0); 1265 1266 /* Wait for pending IRQ handlers to finish */ 1267 for (irq = 0; irq < nic->num_vec; irq++) 1268 synchronize_irq(pci_irq_vector(nic->pdev, irq)); 1269 1270 tasklet_kill(&nic->rbdr_task); 1271 tasklet_kill(&nic->qs_err_task); 1272 if (nic->rb_work_scheduled) 1273 cancel_delayed_work_sync(&nic->rbdr_work); 1274 1275 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) { 1276 cq_poll = nic->napi[qidx]; 1277 if (!cq_poll) 1278 continue; 1279 napi_synchronize(&cq_poll->napi); 1280 /* CQ intr is enabled while napi_complete, 1281 * so disable it now 1282 */ 1283 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); 1284 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); 1285 napi_disable(&cq_poll->napi); 1286 netif_napi_del(&cq_poll->napi); 1287 } 1288 1289 netif_tx_disable(netdev); 1290 1291 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) 1292 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); 1293 1294 /* Free resources */ 1295 nicvf_config_data_transfer(nic, false); 1296 1297 /* Disable HW Qset */ 1298 nicvf_qset_config(nic, false); 1299 1300 /* disable mailbox interrupt */ 1301 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); 1302 1303 nicvf_unregister_interrupts(nic); 1304 1305 nicvf_free_cq_poll(nic); 1306 1307 /* Clear multiqset info */ 1308 nic->pnicvf = nic; 1309 1310 return 0; 1311 } 1312 1313 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) 1314 { 1315 union nic_mbx mbx = {}; 1316 1317 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; 1318 mbx.frs.max_frs = mtu; 1319 mbx.frs.vf_id = nic->vf_id; 1320 1321 return nicvf_send_msg_to_pf(nic, &mbx); 1322 } 1323 1324 int nicvf_open(struct net_device *netdev) 1325 { 1326 int cpu, err, qidx; 1327 struct nicvf *nic = netdev_priv(netdev); 1328 struct queue_set *qs = nic->qs; 1329 struct nicvf_cq_poll *cq_poll = NULL; 1330 union nic_mbx mbx = {}; 1331 1332 netif_carrier_off(netdev); 1333 1334 err = nicvf_register_misc_interrupt(nic); 1335 if (err) 1336 return err; 1337 1338 /* Register NAPI handler for processing CQEs */ 1339 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 1340 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL); 1341 if (!cq_poll) { 1342 err = -ENOMEM; 1343 goto napi_del; 1344 } 1345 cq_poll->cq_idx = qidx; 1346 cq_poll->nicvf = nic; 1347 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll, 1348 NAPI_POLL_WEIGHT); 1349 napi_enable(&cq_poll->napi); 1350 nic->napi[qidx] = cq_poll; 1351 } 1352 1353 /* Check if we got MAC address from PF or else generate a radom MAC */ 1354 if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) { 1355 eth_hw_addr_random(netdev); 1356 nicvf_hw_set_mac_addr(nic, netdev); 1357 } 1358 1359 if (nic->set_mac_pending) { 1360 nic->set_mac_pending = false; 1361 nicvf_hw_set_mac_addr(nic, netdev); 1362 } 1363 1364 /* Init tasklet for handling Qset err interrupt */ 1365 tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err, 1366 (unsigned long)nic); 1367 1368 /* Init RBDR tasklet which will refill RBDR */ 1369 tasklet_init(&nic->rbdr_task, nicvf_rbdr_task, 1370 (unsigned long)nic); 1371 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work); 1372 1373 /* Configure CPI alorithm */ 1374 nic->cpi_alg = cpi_alg; 1375 if (!nic->sqs_mode) 1376 nicvf_config_cpi(nic); 1377 1378 nicvf_request_sqs(nic); 1379 if (nic->sqs_mode) 1380 nicvf_get_primary_vf_struct(nic); 1381 1382 /* Configure receive side scaling and MTU */ 1383 if (!nic->sqs_mode) { 1384 nicvf_rss_init(nic); 1385 err = nicvf_update_hw_max_frs(nic, netdev->mtu); 1386 if (err) 1387 goto cleanup; 1388 1389 /* Clear percpu stats */ 1390 for_each_possible_cpu(cpu) 1391 memset(per_cpu_ptr(nic->drv_stats, cpu), 0, 1392 sizeof(struct nicvf_drv_stats)); 1393 } 1394 1395 err = nicvf_register_interrupts(nic); 1396 if (err) 1397 goto cleanup; 1398 1399 /* Initialize the queues */ 1400 err = nicvf_init_resources(nic); 1401 if (err) 1402 goto cleanup; 1403 1404 /* Make sure queue initialization is written */ 1405 wmb(); 1406 1407 nicvf_reg_write(nic, NIC_VF_INT, -1); 1408 /* Enable Qset err interrupt */ 1409 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); 1410 1411 /* Enable completion queue interrupt */ 1412 for (qidx = 0; qidx < qs->cq_cnt; qidx++) 1413 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); 1414 1415 /* Enable RBDR threshold interrupt */ 1416 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1417 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1418 1419 /* Send VF config done msg to PF */ 1420 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; 1421 nicvf_write_to_mbx(nic, &mbx); 1422 1423 return 0; 1424 cleanup: 1425 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); 1426 nicvf_unregister_interrupts(nic); 1427 tasklet_kill(&nic->qs_err_task); 1428 tasklet_kill(&nic->rbdr_task); 1429 napi_del: 1430 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { 1431 cq_poll = nic->napi[qidx]; 1432 if (!cq_poll) 1433 continue; 1434 napi_disable(&cq_poll->napi); 1435 netif_napi_del(&cq_poll->napi); 1436 } 1437 nicvf_free_cq_poll(nic); 1438 return err; 1439 } 1440 1441 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) 1442 { 1443 struct nicvf *nic = netdev_priv(netdev); 1444 int orig_mtu = netdev->mtu; 1445 1446 netdev->mtu = new_mtu; 1447 1448 if (!netif_running(netdev)) 1449 return 0; 1450 1451 if (nicvf_update_hw_max_frs(nic, new_mtu)) { 1452 netdev->mtu = orig_mtu; 1453 return -EINVAL; 1454 } 1455 1456 return 0; 1457 } 1458 1459 static int nicvf_set_mac_address(struct net_device *netdev, void *p) 1460 { 1461 struct sockaddr *addr = p; 1462 struct nicvf *nic = netdev_priv(netdev); 1463 1464 if (!is_valid_ether_addr(addr->sa_data)) 1465 return -EADDRNOTAVAIL; 1466 1467 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1468 1469 if (nic->pdev->msix_enabled) { 1470 if (nicvf_hw_set_mac_addr(nic, netdev)) 1471 return -EBUSY; 1472 } else { 1473 nic->set_mac_pending = true; 1474 } 1475 1476 return 0; 1477 } 1478 1479 void nicvf_update_lmac_stats(struct nicvf *nic) 1480 { 1481 int stat = 0; 1482 union nic_mbx mbx = {}; 1483 1484 if (!netif_running(nic->netdev)) 1485 return; 1486 1487 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; 1488 mbx.bgx_stats.vf_id = nic->vf_id; 1489 /* Rx stats */ 1490 mbx.bgx_stats.rx = 1; 1491 while (stat < BGX_RX_STATS_COUNT) { 1492 mbx.bgx_stats.idx = stat; 1493 if (nicvf_send_msg_to_pf(nic, &mbx)) 1494 return; 1495 stat++; 1496 } 1497 1498 stat = 0; 1499 1500 /* Tx stats */ 1501 mbx.bgx_stats.rx = 0; 1502 while (stat < BGX_TX_STATS_COUNT) { 1503 mbx.bgx_stats.idx = stat; 1504 if (nicvf_send_msg_to_pf(nic, &mbx)) 1505 return; 1506 stat++; 1507 } 1508 } 1509 1510 void nicvf_update_stats(struct nicvf *nic) 1511 { 1512 int qidx, cpu; 1513 u64 tmp_stats = 0; 1514 struct nicvf_hw_stats *stats = &nic->hw_stats; 1515 struct nicvf_drv_stats *drv_stats; 1516 struct queue_set *qs = nic->qs; 1517 1518 #define GET_RX_STATS(reg) \ 1519 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3)) 1520 #define GET_TX_STATS(reg) \ 1521 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3)) 1522 1523 stats->rx_bytes = GET_RX_STATS(RX_OCTS); 1524 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST); 1525 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST); 1526 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST); 1527 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS); 1528 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR); 1529 stats->rx_drop_red = GET_RX_STATS(RX_RED); 1530 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS); 1531 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN); 1532 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS); 1533 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST); 1534 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST); 1535 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); 1536 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); 1537 1538 stats->tx_bytes = GET_TX_STATS(TX_OCTS); 1539 stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST); 1540 stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST); 1541 stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST); 1542 stats->tx_drops = GET_TX_STATS(TX_DROP); 1543 1544 /* On T88 pass 2.0, the dummy SQE added for TSO notification 1545 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed 1546 * pointed by dummy SQE and results in tx_drops counter being 1547 * incremented. Subtracting it from tx_tso counter will give 1548 * exact tx_drops counter. 1549 */ 1550 if (nic->t88 && nic->hw_tso) { 1551 for_each_possible_cpu(cpu) { 1552 drv_stats = per_cpu_ptr(nic->drv_stats, cpu); 1553 tmp_stats += drv_stats->tx_tso; 1554 } 1555 stats->tx_drops = tmp_stats - stats->tx_drops; 1556 } 1557 stats->tx_frames = stats->tx_ucast_frames + 1558 stats->tx_bcast_frames + 1559 stats->tx_mcast_frames; 1560 stats->rx_frames = stats->rx_ucast_frames + 1561 stats->rx_bcast_frames + 1562 stats->rx_mcast_frames; 1563 stats->rx_drops = stats->rx_drop_red + 1564 stats->rx_drop_overrun; 1565 1566 /* Update RQ and SQ stats */ 1567 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1568 nicvf_update_rq_stats(nic, qidx); 1569 for (qidx = 0; qidx < qs->sq_cnt; qidx++) 1570 nicvf_update_sq_stats(nic, qidx); 1571 } 1572 1573 static void nicvf_get_stats64(struct net_device *netdev, 1574 struct rtnl_link_stats64 *stats) 1575 { 1576 struct nicvf *nic = netdev_priv(netdev); 1577 struct nicvf_hw_stats *hw_stats = &nic->hw_stats; 1578 1579 nicvf_update_stats(nic); 1580 1581 stats->rx_bytes = hw_stats->rx_bytes; 1582 stats->rx_packets = hw_stats->rx_frames; 1583 stats->rx_dropped = hw_stats->rx_drops; 1584 stats->multicast = hw_stats->rx_mcast_frames; 1585 1586 stats->tx_bytes = hw_stats->tx_bytes; 1587 stats->tx_packets = hw_stats->tx_frames; 1588 stats->tx_dropped = hw_stats->tx_drops; 1589 1590 } 1591 1592 static void nicvf_tx_timeout(struct net_device *dev) 1593 { 1594 struct nicvf *nic = netdev_priv(dev); 1595 1596 netif_warn(nic, tx_err, dev, "Transmit timed out, resetting\n"); 1597 1598 this_cpu_inc(nic->drv_stats->tx_timeout); 1599 schedule_work(&nic->reset_task); 1600 } 1601 1602 static void nicvf_reset_task(struct work_struct *work) 1603 { 1604 struct nicvf *nic; 1605 1606 nic = container_of(work, struct nicvf, reset_task); 1607 1608 if (!netif_running(nic->netdev)) 1609 return; 1610 1611 nicvf_stop(nic->netdev); 1612 nicvf_open(nic->netdev); 1613 netif_trans_update(nic->netdev); 1614 } 1615 1616 static int nicvf_config_loopback(struct nicvf *nic, 1617 netdev_features_t features) 1618 { 1619 union nic_mbx mbx = {}; 1620 1621 mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK; 1622 mbx.lbk.vf_id = nic->vf_id; 1623 mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0; 1624 1625 return nicvf_send_msg_to_pf(nic, &mbx); 1626 } 1627 1628 static netdev_features_t nicvf_fix_features(struct net_device *netdev, 1629 netdev_features_t features) 1630 { 1631 struct nicvf *nic = netdev_priv(netdev); 1632 1633 if ((features & NETIF_F_LOOPBACK) && 1634 netif_running(netdev) && !nic->loopback_supported) 1635 features &= ~NETIF_F_LOOPBACK; 1636 1637 return features; 1638 } 1639 1640 static int nicvf_set_features(struct net_device *netdev, 1641 netdev_features_t features) 1642 { 1643 struct nicvf *nic = netdev_priv(netdev); 1644 netdev_features_t changed = features ^ netdev->features; 1645 1646 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 1647 nicvf_config_vlan_stripping(nic, features); 1648 1649 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) 1650 return nicvf_config_loopback(nic, features); 1651 1652 return 0; 1653 } 1654 1655 static void nicvf_set_xdp_queues(struct nicvf *nic, bool bpf_attached) 1656 { 1657 u8 cq_count, txq_count; 1658 1659 /* Set XDP Tx queue count same as Rx queue count */ 1660 if (!bpf_attached) 1661 nic->xdp_tx_queues = 0; 1662 else 1663 nic->xdp_tx_queues = nic->rx_queues; 1664 1665 /* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets 1666 * needs to be allocated, check how many. 1667 */ 1668 txq_count = nic->xdp_tx_queues + nic->tx_queues; 1669 cq_count = max(nic->rx_queues, txq_count); 1670 if (cq_count > MAX_CMP_QUEUES_PER_QS) { 1671 nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS); 1672 nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1; 1673 } else { 1674 nic->sqs_count = 0; 1675 } 1676 1677 /* Set primary Qset's resources */ 1678 nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS); 1679 nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS); 1680 nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt); 1681 1682 /* Update stack */ 1683 nicvf_set_real_num_queues(nic->netdev, nic->tx_queues, nic->rx_queues); 1684 } 1685 1686 static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) 1687 { 1688 struct net_device *dev = nic->netdev; 1689 bool if_up = netif_running(nic->netdev); 1690 struct bpf_prog *old_prog; 1691 bool bpf_attached = false; 1692 1693 /* For now just support only the usual MTU sized frames */ 1694 if (prog && (dev->mtu > 1500)) { 1695 netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", 1696 dev->mtu); 1697 return -EOPNOTSUPP; 1698 } 1699 1700 /* ALL SQs attached to CQs i.e same as RQs, are treated as 1701 * XDP Tx queues and more Tx queues are allocated for 1702 * network stack to send pkts out. 1703 * 1704 * No of Tx queues are either same as Rx queues or whatever 1705 * is left in max no of queues possible. 1706 */ 1707 if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) { 1708 netdev_warn(dev, 1709 "Failed to attach BPF prog, RXQs + TXQs > Max %d\n", 1710 nic->max_queues); 1711 return -ENOMEM; 1712 } 1713 1714 if (if_up) 1715 nicvf_stop(nic->netdev); 1716 1717 old_prog = xchg(&nic->xdp_prog, prog); 1718 /* Detach old prog, if any */ 1719 if (old_prog) 1720 bpf_prog_put(old_prog); 1721 1722 if (nic->xdp_prog) { 1723 /* Attach BPF program */ 1724 nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); 1725 if (!IS_ERR(nic->xdp_prog)) 1726 bpf_attached = true; 1727 } 1728 1729 /* Calculate Tx queues needed for XDP and network stack */ 1730 nicvf_set_xdp_queues(nic, bpf_attached); 1731 1732 if (if_up) { 1733 /* Reinitialize interface, clean slate */ 1734 nicvf_open(nic->netdev); 1735 netif_trans_update(nic->netdev); 1736 } 1737 1738 return 0; 1739 } 1740 1741 static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp) 1742 { 1743 struct nicvf *nic = netdev_priv(netdev); 1744 1745 /* To avoid checks while retrieving buffer address from CQE_RX, 1746 * do not support XDP for T88 pass1.x silicons which are anyway 1747 * not in use widely. 1748 */ 1749 if (pass1_silicon(nic->pdev)) 1750 return -EOPNOTSUPP; 1751 1752 switch (xdp->command) { 1753 case XDP_SETUP_PROG: 1754 return nicvf_xdp_setup(nic, xdp->prog); 1755 case XDP_QUERY_PROG: 1756 xdp->prog_attached = !!nic->xdp_prog; 1757 xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0; 1758 return 0; 1759 default: 1760 return -EINVAL; 1761 } 1762 } 1763 1764 static const struct net_device_ops nicvf_netdev_ops = { 1765 .ndo_open = nicvf_open, 1766 .ndo_stop = nicvf_stop, 1767 .ndo_start_xmit = nicvf_xmit, 1768 .ndo_change_mtu = nicvf_change_mtu, 1769 .ndo_set_mac_address = nicvf_set_mac_address, 1770 .ndo_get_stats64 = nicvf_get_stats64, 1771 .ndo_tx_timeout = nicvf_tx_timeout, 1772 .ndo_fix_features = nicvf_fix_features, 1773 .ndo_set_features = nicvf_set_features, 1774 .ndo_xdp = nicvf_xdp, 1775 }; 1776 1777 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1778 { 1779 struct device *dev = &pdev->dev; 1780 struct net_device *netdev; 1781 struct nicvf *nic; 1782 int err, qcount; 1783 u16 sdevid; 1784 1785 err = pci_enable_device(pdev); 1786 if (err) { 1787 dev_err(dev, "Failed to enable PCI device\n"); 1788 return err; 1789 } 1790 1791 err = pci_request_regions(pdev, DRV_NAME); 1792 if (err) { 1793 dev_err(dev, "PCI request regions failed 0x%x\n", err); 1794 goto err_disable_device; 1795 } 1796 1797 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); 1798 if (err) { 1799 dev_err(dev, "Unable to get usable DMA configuration\n"); 1800 goto err_release_regions; 1801 } 1802 1803 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); 1804 if (err) { 1805 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n"); 1806 goto err_release_regions; 1807 } 1808 1809 qcount = netif_get_num_default_rss_queues(); 1810 1811 /* Restrict multiqset support only for host bound VFs */ 1812 if (pdev->is_virtfn) { 1813 /* Set max number of queues per VF */ 1814 qcount = min_t(int, num_online_cpus(), 1815 (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS); 1816 } 1817 1818 netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount); 1819 if (!netdev) { 1820 err = -ENOMEM; 1821 goto err_release_regions; 1822 } 1823 1824 pci_set_drvdata(pdev, netdev); 1825 1826 SET_NETDEV_DEV(netdev, &pdev->dev); 1827 1828 nic = netdev_priv(netdev); 1829 nic->netdev = netdev; 1830 nic->pdev = pdev; 1831 nic->pnicvf = nic; 1832 nic->max_queues = qcount; 1833 1834 /* MAP VF's configuration registers */ 1835 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 1836 if (!nic->reg_base) { 1837 dev_err(dev, "Cannot map config register space, aborting\n"); 1838 err = -ENOMEM; 1839 goto err_free_netdev; 1840 } 1841 1842 nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats); 1843 if (!nic->drv_stats) { 1844 err = -ENOMEM; 1845 goto err_free_netdev; 1846 } 1847 1848 err = nicvf_set_qset_resources(nic); 1849 if (err) 1850 goto err_free_netdev; 1851 1852 /* Check if PF is alive and get MAC address for this VF */ 1853 err = nicvf_register_misc_interrupt(nic); 1854 if (err) 1855 goto err_free_netdev; 1856 1857 nicvf_send_vf_struct(nic); 1858 1859 if (!pass1_silicon(nic->pdev)) 1860 nic->hw_tso = true; 1861 1862 /* Get iommu domain for iova to physical addr conversion */ 1863 nic->iommu_domain = iommu_get_domain_for_dev(dev); 1864 1865 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); 1866 if (sdevid == 0xA134) 1867 nic->t88 = true; 1868 1869 /* Check if this VF is in QS only mode */ 1870 if (nic->sqs_mode) 1871 return 0; 1872 1873 err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues); 1874 if (err) 1875 goto err_unregister_interrupts; 1876 1877 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_SG | 1878 NETIF_F_TSO | NETIF_F_GRO | NETIF_F_TSO6 | 1879 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1880 NETIF_F_HW_VLAN_CTAG_RX); 1881 1882 netdev->hw_features |= NETIF_F_RXHASH; 1883 1884 netdev->features |= netdev->hw_features; 1885 netdev->hw_features |= NETIF_F_LOOPBACK; 1886 1887 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | 1888 NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; 1889 1890 netdev->netdev_ops = &nicvf_netdev_ops; 1891 netdev->watchdog_timeo = NICVF_TX_TIMEOUT; 1892 1893 /* MTU range: 64 - 9200 */ 1894 netdev->min_mtu = NIC_HW_MIN_FRS; 1895 netdev->max_mtu = NIC_HW_MAX_FRS; 1896 1897 INIT_WORK(&nic->reset_task, nicvf_reset_task); 1898 1899 err = register_netdev(netdev); 1900 if (err) { 1901 dev_err(dev, "Failed to register netdevice\n"); 1902 goto err_unregister_interrupts; 1903 } 1904 1905 nic->msg_enable = debug; 1906 1907 nicvf_set_ethtool_ops(netdev); 1908 1909 return 0; 1910 1911 err_unregister_interrupts: 1912 nicvf_unregister_interrupts(nic); 1913 err_free_netdev: 1914 pci_set_drvdata(pdev, NULL); 1915 if (nic->drv_stats) 1916 free_percpu(nic->drv_stats); 1917 free_netdev(netdev); 1918 err_release_regions: 1919 pci_release_regions(pdev); 1920 err_disable_device: 1921 pci_disable_device(pdev); 1922 return err; 1923 } 1924 1925 static void nicvf_remove(struct pci_dev *pdev) 1926 { 1927 struct net_device *netdev = pci_get_drvdata(pdev); 1928 struct nicvf *nic; 1929 struct net_device *pnetdev; 1930 1931 if (!netdev) 1932 return; 1933 1934 nic = netdev_priv(netdev); 1935 pnetdev = nic->pnicvf->netdev; 1936 1937 /* Check if this Qset is assigned to different VF. 1938 * If yes, clean primary and all secondary Qsets. 1939 */ 1940 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) 1941 unregister_netdev(pnetdev); 1942 nicvf_unregister_interrupts(nic); 1943 pci_set_drvdata(pdev, NULL); 1944 if (nic->drv_stats) 1945 free_percpu(nic->drv_stats); 1946 free_netdev(netdev); 1947 pci_release_regions(pdev); 1948 pci_disable_device(pdev); 1949 } 1950 1951 static void nicvf_shutdown(struct pci_dev *pdev) 1952 { 1953 nicvf_remove(pdev); 1954 } 1955 1956 static struct pci_driver nicvf_driver = { 1957 .name = DRV_NAME, 1958 .id_table = nicvf_id_table, 1959 .probe = nicvf_probe, 1960 .remove = nicvf_remove, 1961 .shutdown = nicvf_shutdown, 1962 }; 1963 1964 static int __init nicvf_init_module(void) 1965 { 1966 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 1967 1968 return pci_register_driver(&nicvf_driver); 1969 } 1970 1971 static void __exit nicvf_cleanup_module(void) 1972 { 1973 pci_unregister_driver(&nicvf_driver); 1974 } 1975 1976 module_init(nicvf_init_module); 1977 module_exit(nicvf_cleanup_module); 1978