1 /* 2 * Copyright (C) 2015-2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 /* 35 * nfp_net_common.c 36 * Netronome network device driver: Common functions between PF and VF 37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> 38 * Jason McMullan <jason.mcmullan@netronome.com> 39 * Rolf Neugebauer <rolf.neugebauer@netronome.com> 40 * Brad Petrus <brad.petrus@netronome.com> 41 * Chris Telfer <chris.telfer@netronome.com> 42 */ 43 44 #include <linux/bitfield.h> 45 #include <linux/bpf.h> 46 #include <linux/bpf_trace.h> 47 #include <linux/module.h> 48 #include <linux/kernel.h> 49 #include <linux/init.h> 50 #include <linux/fs.h> 51 #include <linux/netdevice.h> 52 #include <linux/etherdevice.h> 53 #include <linux/interrupt.h> 54 #include <linux/ip.h> 55 #include <linux/ipv6.h> 56 #include <linux/page_ref.h> 57 #include <linux/pci.h> 58 #include <linux/pci_regs.h> 59 #include <linux/msi.h> 60 #include <linux/ethtool.h> 61 #include <linux/log2.h> 62 #include <linux/if_vlan.h> 63 #include <linux/random.h> 64 #include <linux/vmalloc.h> 65 #include <linux/ktime.h> 66 67 #include <net/switchdev.h> 68 #include <net/vxlan.h> 69 70 #include "nfpcore/nfp_nsp.h" 71 #include "nfp_app.h" 72 #include "nfp_net_ctrl.h" 73 #include "nfp_net.h" 74 #include "nfp_net_sriov.h" 75 #include "nfp_port.h" 76 77 /** 78 * nfp_net_get_fw_version() - Read and parse the FW version 79 * @fw_ver: Output fw_version structure to read to 80 * @ctrl_bar: Mapped address of the control BAR 81 */ 82 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, 83 void __iomem *ctrl_bar) 84 { 85 u32 reg; 86 87 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION); 88 put_unaligned_le32(reg, fw_ver); 89 } 90 91 static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag) 92 { 93 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM, 94 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, 95 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 96 } 97 98 static void 99 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr) 100 { 101 dma_sync_single_for_device(dp->dev, dma_addr, 102 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, 103 dp->rx_dma_dir); 104 } 105 106 static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr) 107 { 108 dma_unmap_single_attrs(dp->dev, dma_addr, 109 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA, 110 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 111 } 112 113 static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr, 114 unsigned int len) 115 { 116 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM, 117 len, dp->rx_dma_dir); 118 } 119 120 /* Firmware reconfig 121 * 122 * Firmware reconfig may take a while so we have two versions of it - 123 * synchronous and asynchronous (posted). All synchronous callers are holding 124 * RTNL so we don't have to worry about serializing them. 125 */ 126 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update) 127 { 128 nn_writel(nn, NFP_NET_CFG_UPDATE, update); 129 /* ensure update is written before pinging HW */ 130 nn_pci_flush(nn); 131 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); 132 } 133 134 /* Pass 0 as update to run posted reconfigs. */ 135 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update) 136 { 137 update |= nn->reconfig_posted; 138 nn->reconfig_posted = 0; 139 140 nfp_net_reconfig_start(nn, update); 141 142 nn->reconfig_timer_active = true; 143 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ); 144 } 145 146 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check) 147 { 148 u32 reg; 149 150 reg = nn_readl(nn, NFP_NET_CFG_UPDATE); 151 if (reg == 0) 152 return true; 153 if (reg & NFP_NET_CFG_UPDATE_ERR) { 154 nn_err(nn, "Reconfig error: 0x%08x\n", reg); 155 return true; 156 } else if (last_check) { 157 nn_err(nn, "Reconfig timeout: 0x%08x\n", reg); 158 return true; 159 } 160 161 return false; 162 } 163 164 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) 165 { 166 bool timed_out = false; 167 168 /* Poll update field, waiting for NFP to ack the config */ 169 while (!nfp_net_reconfig_check_done(nn, timed_out)) { 170 msleep(1); 171 timed_out = time_is_before_eq_jiffies(deadline); 172 } 173 174 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR) 175 return -EIO; 176 177 return timed_out ? -EIO : 0; 178 } 179 180 static void nfp_net_reconfig_timer(struct timer_list *t) 181 { 182 struct nfp_net *nn = from_timer(nn, t, reconfig_timer); 183 184 spin_lock_bh(&nn->reconfig_lock); 185 186 nn->reconfig_timer_active = false; 187 188 /* If sync caller is present it will take over from us */ 189 if (nn->reconfig_sync_present) 190 goto done; 191 192 /* Read reconfig status and report errors */ 193 nfp_net_reconfig_check_done(nn, true); 194 195 if (nn->reconfig_posted) 196 nfp_net_reconfig_start_async(nn, 0); 197 done: 198 spin_unlock_bh(&nn->reconfig_lock); 199 } 200 201 /** 202 * nfp_net_reconfig_post() - Post async reconfig request 203 * @nn: NFP Net device to reconfigure 204 * @update: The value for the update field in the BAR config 205 * 206 * Record FW reconfiguration request. Reconfiguration will be kicked off 207 * whenever reconfiguration machinery is idle. Multiple requests can be 208 * merged together! 209 */ 210 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update) 211 { 212 spin_lock_bh(&nn->reconfig_lock); 213 214 /* Sync caller will kick off async reconf when it's done, just post */ 215 if (nn->reconfig_sync_present) { 216 nn->reconfig_posted |= update; 217 goto done; 218 } 219 220 /* Opportunistically check if the previous command is done */ 221 if (!nn->reconfig_timer_active || 222 nfp_net_reconfig_check_done(nn, false)) 223 nfp_net_reconfig_start_async(nn, update); 224 else 225 nn->reconfig_posted |= update; 226 done: 227 spin_unlock_bh(&nn->reconfig_lock); 228 } 229 230 /** 231 * nfp_net_reconfig() - Reconfigure the firmware 232 * @nn: NFP Net device to reconfigure 233 * @update: The value for the update field in the BAR config 234 * 235 * Write the update word to the BAR and ping the reconfig queue. The 236 * poll until the firmware has acknowledged the update by zeroing the 237 * update word. 238 * 239 * Return: Negative errno on error, 0 on success 240 */ 241 int nfp_net_reconfig(struct nfp_net *nn, u32 update) 242 { 243 bool cancelled_timer = false; 244 u32 pre_posted_requests; 245 int ret; 246 247 spin_lock_bh(&nn->reconfig_lock); 248 249 nn->reconfig_sync_present = true; 250 251 if (nn->reconfig_timer_active) { 252 del_timer(&nn->reconfig_timer); 253 nn->reconfig_timer_active = false; 254 cancelled_timer = true; 255 } 256 pre_posted_requests = nn->reconfig_posted; 257 nn->reconfig_posted = 0; 258 259 spin_unlock_bh(&nn->reconfig_lock); 260 261 if (cancelled_timer) 262 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); 263 264 /* Run the posted reconfigs which were issued before we started */ 265 if (pre_posted_requests) { 266 nfp_net_reconfig_start(nn, pre_posted_requests); 267 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); 268 } 269 270 nfp_net_reconfig_start(nn, update); 271 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); 272 273 spin_lock_bh(&nn->reconfig_lock); 274 275 if (nn->reconfig_posted) 276 nfp_net_reconfig_start_async(nn, 0); 277 278 nn->reconfig_sync_present = false; 279 280 spin_unlock_bh(&nn->reconfig_lock); 281 282 return ret; 283 } 284 285 /** 286 * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox 287 * @nn: NFP Net device to reconfigure 288 * @mbox_cmd: The value for the mailbox command 289 * 290 * Helper function for mailbox updates 291 * 292 * Return: Negative errno on error, 0 on success 293 */ 294 static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) 295 { 296 int ret; 297 298 nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd); 299 300 ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX); 301 if (ret) { 302 nn_err(nn, "Mailbox update error\n"); 303 return ret; 304 } 305 306 return -nn_readl(nn, NFP_NET_CFG_MBOX_RET); 307 } 308 309 /* Interrupt configuration and handling 310 */ 311 312 /** 313 * nfp_net_irq_unmask() - Unmask automasked interrupt 314 * @nn: NFP Network structure 315 * @entry_nr: MSI-X table entry 316 * 317 * Clear the ICR for the IRQ entry. 318 */ 319 static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr) 320 { 321 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED); 322 nn_pci_flush(nn); 323 } 324 325 /** 326 * nfp_net_irqs_alloc() - allocates MSI-X irqs 327 * @pdev: PCI device structure 328 * @irq_entries: Array to be initialized and used to hold the irq entries 329 * @min_irqs: Minimal acceptable number of interrupts 330 * @wanted_irqs: Target number of interrupts to allocate 331 * 332 * Return: Number of irqs obtained or 0 on error. 333 */ 334 unsigned int 335 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, 336 unsigned int min_irqs, unsigned int wanted_irqs) 337 { 338 unsigned int i; 339 int got_irqs; 340 341 for (i = 0; i < wanted_irqs; i++) 342 irq_entries[i].entry = i; 343 344 got_irqs = pci_enable_msix_range(pdev, irq_entries, 345 min_irqs, wanted_irqs); 346 if (got_irqs < 0) { 347 dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n", 348 min_irqs, wanted_irqs, got_irqs); 349 return 0; 350 } 351 352 if (got_irqs < wanted_irqs) 353 dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n", 354 wanted_irqs, got_irqs); 355 356 return got_irqs; 357 } 358 359 /** 360 * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev 361 * @nn: NFP Network structure 362 * @irq_entries: Table of allocated interrupts 363 * @n: Size of @irq_entries (number of entries to grab) 364 * 365 * After interrupts are allocated with nfp_net_irqs_alloc() this function 366 * should be called to assign them to a specific netdev (port). 367 */ 368 void 369 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, 370 unsigned int n) 371 { 372 struct nfp_net_dp *dp = &nn->dp; 373 374 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS; 375 dp->num_r_vecs = nn->max_r_vecs; 376 377 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n); 378 379 if (dp->num_rx_rings > dp->num_r_vecs || 380 dp->num_tx_rings > dp->num_r_vecs) 381 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n", 382 dp->num_rx_rings, dp->num_tx_rings, 383 dp->num_r_vecs); 384 385 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings); 386 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings); 387 dp->num_stack_tx_rings = dp->num_tx_rings; 388 } 389 390 /** 391 * nfp_net_irqs_disable() - Disable interrupts 392 * @pdev: PCI device structure 393 * 394 * Undoes what @nfp_net_irqs_alloc() does. 395 */ 396 void nfp_net_irqs_disable(struct pci_dev *pdev) 397 { 398 pci_disable_msix(pdev); 399 } 400 401 /** 402 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings. 403 * @irq: Interrupt 404 * @data: Opaque data structure 405 * 406 * Return: Indicate if the interrupt has been handled. 407 */ 408 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data) 409 { 410 struct nfp_net_r_vector *r_vec = data; 411 412 napi_schedule_irqoff(&r_vec->napi); 413 414 /* The FW auto-masks any interrupt, either via the MASK bit in 415 * the MSI-X table or via the per entry ICR field. So there 416 * is no need to disable interrupts here. 417 */ 418 return IRQ_HANDLED; 419 } 420 421 static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data) 422 { 423 struct nfp_net_r_vector *r_vec = data; 424 425 tasklet_schedule(&r_vec->tasklet); 426 427 return IRQ_HANDLED; 428 } 429 430 /** 431 * nfp_net_read_link_status() - Reread link status from control BAR 432 * @nn: NFP Network structure 433 */ 434 static void nfp_net_read_link_status(struct nfp_net *nn) 435 { 436 unsigned long flags; 437 bool link_up; 438 u32 sts; 439 440 spin_lock_irqsave(&nn->link_status_lock, flags); 441 442 sts = nn_readl(nn, NFP_NET_CFG_STS); 443 link_up = !!(sts & NFP_NET_CFG_STS_LINK); 444 445 if (nn->link_up == link_up) 446 goto out; 447 448 nn->link_up = link_up; 449 if (nn->port) 450 set_bit(NFP_PORT_CHANGED, &nn->port->flags); 451 452 if (nn->link_up) { 453 netif_carrier_on(nn->dp.netdev); 454 netdev_info(nn->dp.netdev, "NIC Link is Up\n"); 455 } else { 456 netif_carrier_off(nn->dp.netdev); 457 netdev_info(nn->dp.netdev, "NIC Link is Down\n"); 458 } 459 out: 460 spin_unlock_irqrestore(&nn->link_status_lock, flags); 461 } 462 463 /** 464 * nfp_net_irq_lsc() - Interrupt service routine for link state changes 465 * @irq: Interrupt 466 * @data: Opaque data structure 467 * 468 * Return: Indicate if the interrupt has been handled. 469 */ 470 static irqreturn_t nfp_net_irq_lsc(int irq, void *data) 471 { 472 struct nfp_net *nn = data; 473 struct msix_entry *entry; 474 475 entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX]; 476 477 nfp_net_read_link_status(nn); 478 479 nfp_net_irq_unmask(nn, entry->entry); 480 481 return IRQ_HANDLED; 482 } 483 484 /** 485 * nfp_net_irq_exn() - Interrupt service routine for exceptions 486 * @irq: Interrupt 487 * @data: Opaque data structure 488 * 489 * Return: Indicate if the interrupt has been handled. 490 */ 491 static irqreturn_t nfp_net_irq_exn(int irq, void *data) 492 { 493 struct nfp_net *nn = data; 494 495 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__); 496 /* XXX TO BE IMPLEMENTED */ 497 return IRQ_HANDLED; 498 } 499 500 /** 501 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring 502 * @tx_ring: TX ring structure 503 * @r_vec: IRQ vector servicing this ring 504 * @idx: Ring index 505 * @is_xdp: Is this an XDP TX ring? 506 */ 507 static void 508 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, 509 struct nfp_net_r_vector *r_vec, unsigned int idx, 510 bool is_xdp) 511 { 512 struct nfp_net *nn = r_vec->nfp_net; 513 514 tx_ring->idx = idx; 515 tx_ring->r_vec = r_vec; 516 tx_ring->is_xdp = is_xdp; 517 u64_stats_init(&tx_ring->r_vec->tx_sync); 518 519 tx_ring->qcidx = tx_ring->idx * nn->stride_tx; 520 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); 521 } 522 523 /** 524 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring 525 * @rx_ring: RX ring structure 526 * @r_vec: IRQ vector servicing this ring 527 * @idx: Ring index 528 */ 529 static void 530 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, 531 struct nfp_net_r_vector *r_vec, unsigned int idx) 532 { 533 struct nfp_net *nn = r_vec->nfp_net; 534 535 rx_ring->idx = idx; 536 rx_ring->r_vec = r_vec; 537 u64_stats_init(&rx_ring->r_vec->rx_sync); 538 539 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; 540 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); 541 } 542 543 /** 544 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN) 545 * @nn: NFP Network structure 546 * @ctrl_offset: Control BAR offset where IRQ configuration should be written 547 * @format: printf-style format to construct the interrupt name 548 * @name: Pointer to allocated space for interrupt name 549 * @name_sz: Size of space for interrupt name 550 * @vector_idx: Index of MSI-X vector used for this interrupt 551 * @handler: IRQ handler to register for this interrupt 552 */ 553 static int 554 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset, 555 const char *format, char *name, size_t name_sz, 556 unsigned int vector_idx, irq_handler_t handler) 557 { 558 struct msix_entry *entry; 559 int err; 560 561 entry = &nn->irq_entries[vector_idx]; 562 563 snprintf(name, name_sz, format, nfp_net_name(nn)); 564 err = request_irq(entry->vector, handler, 0, name, nn); 565 if (err) { 566 nn_err(nn, "Failed to request IRQ %d (err=%d).\n", 567 entry->vector, err); 568 return err; 569 } 570 nn_writeb(nn, ctrl_offset, entry->entry); 571 572 return 0; 573 } 574 575 /** 576 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN) 577 * @nn: NFP Network structure 578 * @ctrl_offset: Control BAR offset where IRQ configuration should be written 579 * @vector_idx: Index of MSI-X vector used for this interrupt 580 */ 581 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset, 582 unsigned int vector_idx) 583 { 584 nn_writeb(nn, ctrl_offset, 0xff); 585 free_irq(nn->irq_entries[vector_idx].vector, nn); 586 } 587 588 /* Transmit 589 * 590 * One queue controller peripheral queue is used for transmit. The 591 * driver en-queues packets for transmit by advancing the write 592 * pointer. The device indicates that packets have transmitted by 593 * advancing the read pointer. The driver maintains a local copy of 594 * the read and write pointer in @struct nfp_net_tx_ring. The driver 595 * keeps @wr_p in sync with the queue controller write pointer and can 596 * determine how many packets have been transmitted by comparing its 597 * copy of the read pointer @rd_p with the read pointer maintained by 598 * the queue controller peripheral. 599 */ 600 601 /** 602 * nfp_net_tx_full() - Check if the TX ring is full 603 * @tx_ring: TX ring to check 604 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1) 605 * 606 * This function checks, based on the *host copy* of read/write 607 * pointer if a given TX ring is full. The real TX queue may have 608 * some newly made available slots. 609 * 610 * Return: True if the ring is full. 611 */ 612 static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt) 613 { 614 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt); 615 } 616 617 /* Wrappers for deciding when to stop and restart TX queues */ 618 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring) 619 { 620 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4); 621 } 622 623 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring) 624 { 625 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1); 626 } 627 628 /** 629 * nfp_net_tx_ring_stop() - stop tx ring 630 * @nd_q: netdev queue 631 * @tx_ring: driver tx queue structure 632 * 633 * Safely stop TX ring. Remember that while we are running .start_xmit() 634 * someone else may be cleaning the TX ring completions so we need to be 635 * extra careful here. 636 */ 637 static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, 638 struct nfp_net_tx_ring *tx_ring) 639 { 640 netif_tx_stop_queue(nd_q); 641 642 /* We can race with the TX completion out of NAPI so recheck */ 643 smp_mb(); 644 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring))) 645 netif_tx_start_queue(nd_q); 646 } 647 648 /** 649 * nfp_net_tx_tso() - Set up Tx descriptor for LSO 650 * @r_vec: per-ring structure 651 * @txbuf: Pointer to driver soft TX descriptor 652 * @txd: Pointer to HW TX descriptor 653 * @skb: Pointer to SKB 654 * 655 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs. 656 * Return error on packet header greater than maximum supported LSO header size. 657 */ 658 static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, 659 struct nfp_net_tx_buf *txbuf, 660 struct nfp_net_tx_desc *txd, struct sk_buff *skb) 661 { 662 u32 hdrlen; 663 u16 mss; 664 665 if (!skb_is_gso(skb)) 666 return; 667 668 if (!skb->encapsulation) { 669 txd->l3_offset = skb_network_offset(skb); 670 txd->l4_offset = skb_transport_offset(skb); 671 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 672 } else { 673 txd->l3_offset = skb_inner_network_offset(skb); 674 txd->l4_offset = skb_inner_transport_offset(skb); 675 hdrlen = skb_inner_transport_header(skb) - skb->data + 676 inner_tcp_hdrlen(skb); 677 } 678 679 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs; 680 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1); 681 682 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK; 683 txd->lso_hdrlen = hdrlen; 684 txd->mss = cpu_to_le16(mss); 685 txd->flags |= PCIE_DESC_TX_LSO; 686 687 u64_stats_update_begin(&r_vec->tx_sync); 688 r_vec->tx_lso++; 689 u64_stats_update_end(&r_vec->tx_sync); 690 } 691 692 /** 693 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor 694 * @dp: NFP Net data path struct 695 * @r_vec: per-ring structure 696 * @txbuf: Pointer to driver soft TX descriptor 697 * @txd: Pointer to TX descriptor 698 * @skb: Pointer to SKB 699 * 700 * This function sets the TX checksum flags in the TX descriptor based 701 * on the configuration and the protocol of the packet to be transmitted. 702 */ 703 static void nfp_net_tx_csum(struct nfp_net_dp *dp, 704 struct nfp_net_r_vector *r_vec, 705 struct nfp_net_tx_buf *txbuf, 706 struct nfp_net_tx_desc *txd, struct sk_buff *skb) 707 { 708 struct ipv6hdr *ipv6h; 709 struct iphdr *iph; 710 u8 l4_hdr; 711 712 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM)) 713 return; 714 715 if (skb->ip_summed != CHECKSUM_PARTIAL) 716 return; 717 718 txd->flags |= PCIE_DESC_TX_CSUM; 719 if (skb->encapsulation) 720 txd->flags |= PCIE_DESC_TX_ENCAP; 721 722 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 723 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); 724 725 if (iph->version == 4) { 726 txd->flags |= PCIE_DESC_TX_IP4_CSUM; 727 l4_hdr = iph->protocol; 728 } else if (ipv6h->version == 6) { 729 l4_hdr = ipv6h->nexthdr; 730 } else { 731 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version); 732 return; 733 } 734 735 switch (l4_hdr) { 736 case IPPROTO_TCP: 737 txd->flags |= PCIE_DESC_TX_TCP_CSUM; 738 break; 739 case IPPROTO_UDP: 740 txd->flags |= PCIE_DESC_TX_UDP_CSUM; 741 break; 742 default: 743 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr); 744 return; 745 } 746 747 u64_stats_update_begin(&r_vec->tx_sync); 748 if (skb->encapsulation) 749 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt; 750 else 751 r_vec->hw_csum_tx += txbuf->pkt_cnt; 752 u64_stats_update_end(&r_vec->tx_sync); 753 } 754 755 static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring) 756 { 757 wmb(); 758 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add); 759 tx_ring->wr_ptr_add = 0; 760 } 761 762 static int nfp_net_prep_port_id(struct sk_buff *skb) 763 { 764 struct metadata_dst *md_dst = skb_metadata_dst(skb); 765 unsigned char *data; 766 767 if (likely(!md_dst)) 768 return 0; 769 if (unlikely(md_dst->type != METADATA_HW_PORT_MUX)) 770 return 0; 771 772 if (unlikely(skb_cow_head(skb, 8))) 773 return -ENOMEM; 774 775 data = skb_push(skb, 8); 776 put_unaligned_be32(NFP_NET_META_PORTID, data); 777 put_unaligned_be32(md_dst->u.port_info.port_id, data + 4); 778 779 return 8; 780 } 781 782 /** 783 * nfp_net_tx() - Main transmit entry point 784 * @skb: SKB to transmit 785 * @netdev: netdev structure 786 * 787 * Return: NETDEV_TX_OK on success. 788 */ 789 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) 790 { 791 struct nfp_net *nn = netdev_priv(netdev); 792 const struct skb_frag_struct *frag; 793 struct nfp_net_tx_desc *txd, txdg; 794 int f, nr_frags, wr_idx, md_bytes; 795 struct nfp_net_tx_ring *tx_ring; 796 struct nfp_net_r_vector *r_vec; 797 struct nfp_net_tx_buf *txbuf; 798 struct netdev_queue *nd_q; 799 struct nfp_net_dp *dp; 800 dma_addr_t dma_addr; 801 unsigned int fsize; 802 u16 qidx; 803 804 dp = &nn->dp; 805 qidx = skb_get_queue_mapping(skb); 806 tx_ring = &dp->tx_rings[qidx]; 807 r_vec = tx_ring->r_vec; 808 nd_q = netdev_get_tx_queue(dp->netdev, qidx); 809 810 nr_frags = skb_shinfo(skb)->nr_frags; 811 812 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { 813 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n", 814 qidx, tx_ring->wr_p, tx_ring->rd_p); 815 netif_tx_stop_queue(nd_q); 816 nfp_net_tx_xmit_more_flush(tx_ring); 817 u64_stats_update_begin(&r_vec->tx_sync); 818 r_vec->tx_busy++; 819 u64_stats_update_end(&r_vec->tx_sync); 820 return NETDEV_TX_BUSY; 821 } 822 823 md_bytes = nfp_net_prep_port_id(skb); 824 if (unlikely(md_bytes < 0)) { 825 nfp_net_tx_xmit_more_flush(tx_ring); 826 dev_kfree_skb_any(skb); 827 return NETDEV_TX_OK; 828 } 829 830 /* Start with the head skbuf */ 831 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), 832 DMA_TO_DEVICE); 833 if (dma_mapping_error(dp->dev, dma_addr)) 834 goto err_free; 835 836 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); 837 838 /* Stash the soft descriptor of the head then initialize it */ 839 txbuf = &tx_ring->txbufs[wr_idx]; 840 txbuf->skb = skb; 841 txbuf->dma_addr = dma_addr; 842 txbuf->fidx = -1; 843 txbuf->pkt_cnt = 1; 844 txbuf->real_len = skb->len; 845 846 /* Build TX descriptor */ 847 txd = &tx_ring->txds[wr_idx]; 848 txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes; 849 txd->dma_len = cpu_to_le16(skb_headlen(skb)); 850 nfp_desc_set_dma_addr(txd, dma_addr); 851 txd->data_len = cpu_to_le16(skb->len); 852 853 txd->flags = 0; 854 txd->mss = 0; 855 txd->lso_hdrlen = 0; 856 857 /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */ 858 nfp_net_tx_tso(r_vec, txbuf, txd, skb); 859 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb); 860 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { 861 txd->flags |= PCIE_DESC_TX_VLAN; 862 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); 863 } 864 865 /* Gather DMA */ 866 if (nr_frags > 0) { 867 /* all descs must match except for in addr, length and eop */ 868 txdg = *txd; 869 870 for (f = 0; f < nr_frags; f++) { 871 frag = &skb_shinfo(skb)->frags[f]; 872 fsize = skb_frag_size(frag); 873 874 dma_addr = skb_frag_dma_map(dp->dev, frag, 0, 875 fsize, DMA_TO_DEVICE); 876 if (dma_mapping_error(dp->dev, dma_addr)) 877 goto err_unmap; 878 879 wr_idx = D_IDX(tx_ring, wr_idx + 1); 880 tx_ring->txbufs[wr_idx].skb = skb; 881 tx_ring->txbufs[wr_idx].dma_addr = dma_addr; 882 tx_ring->txbufs[wr_idx].fidx = f; 883 884 txd = &tx_ring->txds[wr_idx]; 885 *txd = txdg; 886 txd->dma_len = cpu_to_le16(fsize); 887 nfp_desc_set_dma_addr(txd, dma_addr); 888 txd->offset_eop |= 889 (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0; 890 } 891 892 u64_stats_update_begin(&r_vec->tx_sync); 893 r_vec->tx_gather++; 894 u64_stats_update_end(&r_vec->tx_sync); 895 } 896 897 netdev_tx_sent_queue(nd_q, txbuf->real_len); 898 899 skb_tx_timestamp(skb); 900 901 tx_ring->wr_p += nr_frags + 1; 902 if (nfp_net_tx_ring_should_stop(tx_ring)) 903 nfp_net_tx_ring_stop(nd_q, tx_ring); 904 905 tx_ring->wr_ptr_add += nr_frags + 1; 906 if (!skb->xmit_more || netif_xmit_stopped(nd_q)) 907 nfp_net_tx_xmit_more_flush(tx_ring); 908 909 return NETDEV_TX_OK; 910 911 err_unmap: 912 while (--f >= 0) { 913 frag = &skb_shinfo(skb)->frags[f]; 914 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, 915 skb_frag_size(frag), DMA_TO_DEVICE); 916 tx_ring->txbufs[wr_idx].skb = NULL; 917 tx_ring->txbufs[wr_idx].dma_addr = 0; 918 tx_ring->txbufs[wr_idx].fidx = -2; 919 wr_idx = wr_idx - 1; 920 if (wr_idx < 0) 921 wr_idx += tx_ring->cnt; 922 } 923 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, 924 skb_headlen(skb), DMA_TO_DEVICE); 925 tx_ring->txbufs[wr_idx].skb = NULL; 926 tx_ring->txbufs[wr_idx].dma_addr = 0; 927 tx_ring->txbufs[wr_idx].fidx = -2; 928 err_free: 929 nn_dp_warn(dp, "Failed to map DMA TX buffer\n"); 930 nfp_net_tx_xmit_more_flush(tx_ring); 931 u64_stats_update_begin(&r_vec->tx_sync); 932 r_vec->tx_errors++; 933 u64_stats_update_end(&r_vec->tx_sync); 934 dev_kfree_skb_any(skb); 935 return NETDEV_TX_OK; 936 } 937 938 /** 939 * nfp_net_tx_complete() - Handled completed TX packets 940 * @tx_ring: TX ring structure 941 * 942 * Return: Number of completed TX descriptors 943 */ 944 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) 945 { 946 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 947 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; 948 const struct skb_frag_struct *frag; 949 struct netdev_queue *nd_q; 950 u32 done_pkts = 0, done_bytes = 0; 951 struct sk_buff *skb; 952 int todo, nr_frags; 953 u32 qcp_rd_p; 954 int fidx; 955 int idx; 956 957 if (tx_ring->wr_p == tx_ring->rd_p) 958 return; 959 960 /* Work out how many descriptors have been transmitted */ 961 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); 962 963 if (qcp_rd_p == tx_ring->qcp_rd_p) 964 return; 965 966 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); 967 968 while (todo--) { 969 idx = D_IDX(tx_ring, tx_ring->rd_p++); 970 971 skb = tx_ring->txbufs[idx].skb; 972 if (!skb) 973 continue; 974 975 nr_frags = skb_shinfo(skb)->nr_frags; 976 fidx = tx_ring->txbufs[idx].fidx; 977 978 if (fidx == -1) { 979 /* unmap head */ 980 dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr, 981 skb_headlen(skb), DMA_TO_DEVICE); 982 983 done_pkts += tx_ring->txbufs[idx].pkt_cnt; 984 done_bytes += tx_ring->txbufs[idx].real_len; 985 } else { 986 /* unmap fragment */ 987 frag = &skb_shinfo(skb)->frags[fidx]; 988 dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr, 989 skb_frag_size(frag), DMA_TO_DEVICE); 990 } 991 992 /* check for last gather fragment */ 993 if (fidx == nr_frags - 1) 994 dev_consume_skb_any(skb); 995 996 tx_ring->txbufs[idx].dma_addr = 0; 997 tx_ring->txbufs[idx].skb = NULL; 998 tx_ring->txbufs[idx].fidx = -2; 999 } 1000 1001 tx_ring->qcp_rd_p = qcp_rd_p; 1002 1003 u64_stats_update_begin(&r_vec->tx_sync); 1004 r_vec->tx_bytes += done_bytes; 1005 r_vec->tx_pkts += done_pkts; 1006 u64_stats_update_end(&r_vec->tx_sync); 1007 1008 if (!dp->netdev) 1009 return; 1010 1011 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); 1012 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes); 1013 if (nfp_net_tx_ring_should_wake(tx_ring)) { 1014 /* Make sure TX thread will see updated tx_ring->rd_p */ 1015 smp_mb(); 1016 1017 if (unlikely(netif_tx_queue_stopped(nd_q))) 1018 netif_tx_wake_queue(nd_q); 1019 } 1020 1021 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, 1022 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", 1023 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); 1024 } 1025 1026 static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring) 1027 { 1028 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 1029 u32 done_pkts = 0, done_bytes = 0; 1030 bool done_all; 1031 int idx, todo; 1032 u32 qcp_rd_p; 1033 1034 /* Work out how many descriptors have been transmitted */ 1035 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); 1036 1037 if (qcp_rd_p == tx_ring->qcp_rd_p) 1038 return true; 1039 1040 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); 1041 1042 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE; 1043 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE); 1044 1045 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo); 1046 1047 done_pkts = todo; 1048 while (todo--) { 1049 idx = D_IDX(tx_ring, tx_ring->rd_p); 1050 tx_ring->rd_p++; 1051 1052 done_bytes += tx_ring->txbufs[idx].real_len; 1053 } 1054 1055 u64_stats_update_begin(&r_vec->tx_sync); 1056 r_vec->tx_bytes += done_bytes; 1057 r_vec->tx_pkts += done_pkts; 1058 u64_stats_update_end(&r_vec->tx_sync); 1059 1060 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt, 1061 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n", 1062 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); 1063 1064 return done_all; 1065 } 1066 1067 /** 1068 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers 1069 * @dp: NFP Net data path struct 1070 * @tx_ring: TX ring structure 1071 * 1072 * Assumes that the device is stopped 1073 */ 1074 static void 1075 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) 1076 { 1077 const struct skb_frag_struct *frag; 1078 struct netdev_queue *nd_q; 1079 1080 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) { 1081 struct nfp_net_tx_buf *tx_buf; 1082 struct sk_buff *skb; 1083 int idx, nr_frags; 1084 1085 idx = D_IDX(tx_ring, tx_ring->rd_p); 1086 tx_buf = &tx_ring->txbufs[idx]; 1087 1088 skb = tx_ring->txbufs[idx].skb; 1089 nr_frags = skb_shinfo(skb)->nr_frags; 1090 1091 if (tx_buf->fidx == -1) { 1092 /* unmap head */ 1093 dma_unmap_single(dp->dev, tx_buf->dma_addr, 1094 skb_headlen(skb), DMA_TO_DEVICE); 1095 } else { 1096 /* unmap fragment */ 1097 frag = &skb_shinfo(skb)->frags[tx_buf->fidx]; 1098 dma_unmap_page(dp->dev, tx_buf->dma_addr, 1099 skb_frag_size(frag), DMA_TO_DEVICE); 1100 } 1101 1102 /* check for last gather fragment */ 1103 if (tx_buf->fidx == nr_frags - 1) 1104 dev_kfree_skb_any(skb); 1105 1106 tx_buf->dma_addr = 0; 1107 tx_buf->skb = NULL; 1108 tx_buf->fidx = -2; 1109 1110 tx_ring->qcp_rd_p++; 1111 tx_ring->rd_p++; 1112 } 1113 1114 memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt); 1115 tx_ring->wr_p = 0; 1116 tx_ring->rd_p = 0; 1117 tx_ring->qcp_rd_p = 0; 1118 tx_ring->wr_ptr_add = 0; 1119 1120 if (tx_ring->is_xdp || !dp->netdev) 1121 return; 1122 1123 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); 1124 netdev_tx_reset_queue(nd_q); 1125 } 1126 1127 static void nfp_net_tx_timeout(struct net_device *netdev) 1128 { 1129 struct nfp_net *nn = netdev_priv(netdev); 1130 int i; 1131 1132 for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) { 1133 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i))) 1134 continue; 1135 nn_warn(nn, "TX timeout on ring: %d\n", i); 1136 } 1137 nn_warn(nn, "TX watchdog timeout\n"); 1138 } 1139 1140 /* Receive processing 1141 */ 1142 static unsigned int 1143 nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp) 1144 { 1145 unsigned int fl_bufsz; 1146 1147 fl_bufsz = NFP_NET_RX_BUF_HEADROOM; 1148 fl_bufsz += dp->rx_dma_off; 1149 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) 1150 fl_bufsz += NFP_NET_MAX_PREPEND; 1151 else 1152 fl_bufsz += dp->rx_offset; 1153 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu; 1154 1155 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz); 1156 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1157 1158 return fl_bufsz; 1159 } 1160 1161 static void 1162 nfp_net_free_frag(void *frag, bool xdp) 1163 { 1164 if (!xdp) 1165 skb_free_frag(frag); 1166 else 1167 __free_page(virt_to_page(frag)); 1168 } 1169 1170 /** 1171 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX 1172 * @dp: NFP Net data path struct 1173 * @dma_addr: Pointer to storage for DMA address (output param) 1174 * 1175 * This function will allcate a new page frag, map it for DMA. 1176 * 1177 * Return: allocated page frag or NULL on failure. 1178 */ 1179 static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) 1180 { 1181 void *frag; 1182 1183 if (!dp->xdp_prog) { 1184 frag = netdev_alloc_frag(dp->fl_bufsz); 1185 } else { 1186 struct page *page; 1187 1188 page = alloc_page(GFP_KERNEL); 1189 frag = page ? page_address(page) : NULL; 1190 } 1191 if (!frag) { 1192 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); 1193 return NULL; 1194 } 1195 1196 *dma_addr = nfp_net_dma_map_rx(dp, frag); 1197 if (dma_mapping_error(dp->dev, *dma_addr)) { 1198 nfp_net_free_frag(frag, dp->xdp_prog); 1199 nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); 1200 return NULL; 1201 } 1202 1203 return frag; 1204 } 1205 1206 static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) 1207 { 1208 void *frag; 1209 1210 if (!dp->xdp_prog) { 1211 frag = napi_alloc_frag(dp->fl_bufsz); 1212 if (unlikely(!frag)) 1213 return NULL; 1214 } else { 1215 struct page *page; 1216 1217 page = dev_alloc_page(); 1218 if (unlikely(!page)) 1219 return NULL; 1220 frag = page_address(page); 1221 } 1222 1223 *dma_addr = nfp_net_dma_map_rx(dp, frag); 1224 if (dma_mapping_error(dp->dev, *dma_addr)) { 1225 nfp_net_free_frag(frag, dp->xdp_prog); 1226 nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); 1227 return NULL; 1228 } 1229 1230 return frag; 1231 } 1232 1233 /** 1234 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings 1235 * @dp: NFP Net data path struct 1236 * @rx_ring: RX ring structure 1237 * @frag: page fragment buffer 1238 * @dma_addr: DMA address of skb mapping 1239 */ 1240 static void nfp_net_rx_give_one(const struct nfp_net_dp *dp, 1241 struct nfp_net_rx_ring *rx_ring, 1242 void *frag, dma_addr_t dma_addr) 1243 { 1244 unsigned int wr_idx; 1245 1246 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); 1247 1248 nfp_net_dma_sync_dev_rx(dp, dma_addr); 1249 1250 /* Stash SKB and DMA address away */ 1251 rx_ring->rxbufs[wr_idx].frag = frag; 1252 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr; 1253 1254 /* Fill freelist descriptor */ 1255 rx_ring->rxds[wr_idx].fld.reserved = 0; 1256 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0; 1257 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, 1258 dma_addr + dp->rx_dma_off); 1259 1260 rx_ring->wr_p++; 1261 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) { 1262 /* Update write pointer of the freelist queue. Make 1263 * sure all writes are flushed before telling the hardware. 1264 */ 1265 wmb(); 1266 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH); 1267 } 1268 } 1269 1270 /** 1271 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable 1272 * @rx_ring: RX ring structure 1273 * 1274 * Warning: Do *not* call if ring buffers were never put on the FW freelist 1275 * (i.e. device was not enabled)! 1276 */ 1277 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) 1278 { 1279 unsigned int wr_idx, last_idx; 1280 1281 /* Move the empty entry to the end of the list */ 1282 wr_idx = D_IDX(rx_ring, rx_ring->wr_p); 1283 last_idx = rx_ring->cnt - 1; 1284 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr; 1285 rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag; 1286 rx_ring->rxbufs[last_idx].dma_addr = 0; 1287 rx_ring->rxbufs[last_idx].frag = NULL; 1288 1289 memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt); 1290 rx_ring->wr_p = 0; 1291 rx_ring->rd_p = 0; 1292 } 1293 1294 /** 1295 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring 1296 * @dp: NFP Net data path struct 1297 * @rx_ring: RX ring to remove buffers from 1298 * 1299 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1) 1300 * entries. After device is disabled nfp_net_rx_ring_reset() must be called 1301 * to restore required ring geometry. 1302 */ 1303 static void 1304 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp, 1305 struct nfp_net_rx_ring *rx_ring) 1306 { 1307 unsigned int i; 1308 1309 for (i = 0; i < rx_ring->cnt - 1; i++) { 1310 /* NULL skb can only happen when initial filling of the ring 1311 * fails to allocate enough buffers and calls here to free 1312 * already allocated ones. 1313 */ 1314 if (!rx_ring->rxbufs[i].frag) 1315 continue; 1316 1317 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr); 1318 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog); 1319 rx_ring->rxbufs[i].dma_addr = 0; 1320 rx_ring->rxbufs[i].frag = NULL; 1321 } 1322 } 1323 1324 /** 1325 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW) 1326 * @dp: NFP Net data path struct 1327 * @rx_ring: RX ring to remove buffers from 1328 */ 1329 static int 1330 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp, 1331 struct nfp_net_rx_ring *rx_ring) 1332 { 1333 struct nfp_net_rx_buf *rxbufs; 1334 unsigned int i; 1335 1336 rxbufs = rx_ring->rxbufs; 1337 1338 for (i = 0; i < rx_ring->cnt - 1; i++) { 1339 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr); 1340 if (!rxbufs[i].frag) { 1341 nfp_net_rx_ring_bufs_free(dp, rx_ring); 1342 return -ENOMEM; 1343 } 1344 } 1345 1346 return 0; 1347 } 1348 1349 /** 1350 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW 1351 * @dp: NFP Net data path struct 1352 * @rx_ring: RX ring to fill 1353 */ 1354 static void 1355 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp, 1356 struct nfp_net_rx_ring *rx_ring) 1357 { 1358 unsigned int i; 1359 1360 for (i = 0; i < rx_ring->cnt - 1; i++) 1361 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag, 1362 rx_ring->rxbufs[i].dma_addr); 1363 } 1364 1365 /** 1366 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors 1367 * @flags: RX descriptor flags field in CPU byte order 1368 */ 1369 static int nfp_net_rx_csum_has_errors(u16 flags) 1370 { 1371 u16 csum_all_checked, csum_all_ok; 1372 1373 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL; 1374 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK; 1375 1376 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT); 1377 } 1378 1379 /** 1380 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags 1381 * @dp: NFP Net data path struct 1382 * @r_vec: per-ring structure 1383 * @rxd: Pointer to RX descriptor 1384 * @meta: Parsed metadata prepend 1385 * @skb: Pointer to SKB 1386 */ 1387 static void nfp_net_rx_csum(struct nfp_net_dp *dp, 1388 struct nfp_net_r_vector *r_vec, 1389 struct nfp_net_rx_desc *rxd, 1390 struct nfp_meta_parsed *meta, struct sk_buff *skb) 1391 { 1392 skb_checksum_none_assert(skb); 1393 1394 if (!(dp->netdev->features & NETIF_F_RXCSUM)) 1395 return; 1396 1397 if (meta->csum_type) { 1398 skb->ip_summed = meta->csum_type; 1399 skb->csum = meta->csum; 1400 u64_stats_update_begin(&r_vec->rx_sync); 1401 r_vec->hw_csum_rx_ok++; 1402 u64_stats_update_end(&r_vec->rx_sync); 1403 return; 1404 } 1405 1406 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) { 1407 u64_stats_update_begin(&r_vec->rx_sync); 1408 r_vec->hw_csum_rx_error++; 1409 u64_stats_update_end(&r_vec->rx_sync); 1410 return; 1411 } 1412 1413 /* Assume that the firmware will never report inner CSUM_OK unless outer 1414 * L4 headers were successfully parsed. FW will always report zero UDP 1415 * checksum as CSUM_OK. 1416 */ 1417 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK || 1418 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) { 1419 __skb_incr_checksum_unnecessary(skb); 1420 u64_stats_update_begin(&r_vec->rx_sync); 1421 r_vec->hw_csum_rx_ok++; 1422 u64_stats_update_end(&r_vec->rx_sync); 1423 } 1424 1425 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK || 1426 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) { 1427 __skb_incr_checksum_unnecessary(skb); 1428 u64_stats_update_begin(&r_vec->rx_sync); 1429 r_vec->hw_csum_rx_inner_ok++; 1430 u64_stats_update_end(&r_vec->rx_sync); 1431 } 1432 } 1433 1434 static void 1435 nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta, 1436 unsigned int type, __be32 *hash) 1437 { 1438 if (!(netdev->features & NETIF_F_RXHASH)) 1439 return; 1440 1441 switch (type) { 1442 case NFP_NET_RSS_IPV4: 1443 case NFP_NET_RSS_IPV6: 1444 case NFP_NET_RSS_IPV6_EX: 1445 meta->hash_type = PKT_HASH_TYPE_L3; 1446 break; 1447 default: 1448 meta->hash_type = PKT_HASH_TYPE_L4; 1449 break; 1450 } 1451 1452 meta->hash = get_unaligned_be32(hash); 1453 } 1454 1455 static void 1456 nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta, 1457 void *data, struct nfp_net_rx_desc *rxd) 1458 { 1459 struct nfp_net_rx_hash *rx_hash = data; 1460 1461 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS)) 1462 return; 1463 1464 nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type), 1465 &rx_hash->hash); 1466 } 1467 1468 static void * 1469 nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, 1470 void *data, int meta_len) 1471 { 1472 u32 meta_info; 1473 1474 meta_info = get_unaligned_be32(data); 1475 data += 4; 1476 1477 while (meta_info) { 1478 switch (meta_info & NFP_NET_META_FIELD_MASK) { 1479 case NFP_NET_META_HASH: 1480 meta_info >>= NFP_NET_META_FIELD_SIZE; 1481 nfp_net_set_hash(netdev, meta, 1482 meta_info & NFP_NET_META_FIELD_MASK, 1483 (__be32 *)data); 1484 data += 4; 1485 break; 1486 case NFP_NET_META_MARK: 1487 meta->mark = get_unaligned_be32(data); 1488 data += 4; 1489 break; 1490 case NFP_NET_META_PORTID: 1491 meta->portid = get_unaligned_be32(data); 1492 data += 4; 1493 break; 1494 case NFP_NET_META_CSUM: 1495 meta->csum_type = CHECKSUM_COMPLETE; 1496 meta->csum = 1497 (__force __wsum)__get_unaligned_cpu32(data); 1498 data += 4; 1499 break; 1500 default: 1501 return NULL; 1502 } 1503 1504 meta_info >>= NFP_NET_META_FIELD_SIZE; 1505 } 1506 1507 return data; 1508 } 1509 1510 static void 1511 nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, 1512 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf, 1513 struct sk_buff *skb) 1514 { 1515 u64_stats_update_begin(&r_vec->rx_sync); 1516 r_vec->rx_drops++; 1517 /* If we have both skb and rxbuf the replacement buffer allocation 1518 * must have failed, count this as an alloc failure. 1519 */ 1520 if (skb && rxbuf) 1521 r_vec->rx_replace_buf_alloc_fail++; 1522 u64_stats_update_end(&r_vec->rx_sync); 1523 1524 /* skb is build based on the frag, free_skb() would free the frag 1525 * so to be able to reuse it we need an extra ref. 1526 */ 1527 if (skb && rxbuf && skb->head == rxbuf->frag) 1528 page_ref_inc(virt_to_head_page(rxbuf->frag)); 1529 if (rxbuf) 1530 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr); 1531 if (skb) 1532 dev_kfree_skb_any(skb); 1533 } 1534 1535 static bool 1536 nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, 1537 struct nfp_net_tx_ring *tx_ring, 1538 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off, 1539 unsigned int pkt_len, bool *completed) 1540 { 1541 struct nfp_net_tx_buf *txbuf; 1542 struct nfp_net_tx_desc *txd; 1543 int wr_idx; 1544 1545 if (unlikely(nfp_net_tx_full(tx_ring, 1))) { 1546 if (!*completed) { 1547 nfp_net_xdp_complete(tx_ring); 1548 *completed = true; 1549 } 1550 1551 if (unlikely(nfp_net_tx_full(tx_ring, 1))) { 1552 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, 1553 NULL); 1554 return false; 1555 } 1556 } 1557 1558 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); 1559 1560 /* Stash the soft descriptor of the head then initialize it */ 1561 txbuf = &tx_ring->txbufs[wr_idx]; 1562 1563 nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr); 1564 1565 txbuf->frag = rxbuf->frag; 1566 txbuf->dma_addr = rxbuf->dma_addr; 1567 txbuf->fidx = -1; 1568 txbuf->pkt_cnt = 1; 1569 txbuf->real_len = pkt_len; 1570 1571 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off, 1572 pkt_len, DMA_BIDIRECTIONAL); 1573 1574 /* Build TX descriptor */ 1575 txd = &tx_ring->txds[wr_idx]; 1576 txd->offset_eop = PCIE_DESC_TX_EOP; 1577 txd->dma_len = cpu_to_le16(pkt_len); 1578 nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off); 1579 txd->data_len = cpu_to_le16(pkt_len); 1580 1581 txd->flags = 0; 1582 txd->mss = 0; 1583 txd->lso_hdrlen = 0; 1584 1585 tx_ring->wr_p++; 1586 tx_ring->wr_ptr_add++; 1587 return true; 1588 } 1589 1590 /** 1591 * nfp_net_rx() - receive up to @budget packets on @rx_ring 1592 * @rx_ring: RX ring to receive from 1593 * @budget: NAPI budget 1594 * 1595 * Note, this function is separated out from the napi poll function to 1596 * more cleanly separate packet receive code from other bookkeeping 1597 * functions performed in the napi poll function. 1598 * 1599 * Return: Number of packets received. 1600 */ 1601 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) 1602 { 1603 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; 1604 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; 1605 struct nfp_net_tx_ring *tx_ring; 1606 struct bpf_prog *xdp_prog; 1607 bool xdp_tx_cmpl = false; 1608 unsigned int true_bufsz; 1609 struct sk_buff *skb; 1610 int pkts_polled = 0; 1611 struct xdp_buff xdp; 1612 int idx; 1613 1614 rcu_read_lock(); 1615 xdp_prog = READ_ONCE(dp->xdp_prog); 1616 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; 1617 xdp.rxq = &rx_ring->xdp_rxq; 1618 tx_ring = r_vec->xdp_ring; 1619 1620 while (pkts_polled < budget) { 1621 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off; 1622 struct nfp_net_rx_buf *rxbuf; 1623 struct nfp_net_rx_desc *rxd; 1624 struct nfp_meta_parsed meta; 1625 struct net_device *netdev; 1626 dma_addr_t new_dma_addr; 1627 u32 meta_len_xdp = 0; 1628 void *new_frag; 1629 1630 idx = D_IDX(rx_ring, rx_ring->rd_p); 1631 1632 rxd = &rx_ring->rxds[idx]; 1633 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) 1634 break; 1635 1636 /* Memory barrier to ensure that we won't do other reads 1637 * before the DD bit. 1638 */ 1639 dma_rmb(); 1640 1641 memset(&meta, 0, sizeof(meta)); 1642 1643 rx_ring->rd_p++; 1644 pkts_polled++; 1645 1646 rxbuf = &rx_ring->rxbufs[idx]; 1647 /* < meta_len > 1648 * <-- [rx_offset] --> 1649 * --------------------------------------------------------- 1650 * | [XX] | metadata | packet | XXXX | 1651 * --------------------------------------------------------- 1652 * <---------------- data_len ---------------> 1653 * 1654 * The rx_offset is fixed for all packets, the meta_len can vary 1655 * on a packet by packet basis. If rx_offset is set to zero 1656 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the 1657 * buffer and is immediately followed by the packet (no [XX]). 1658 */ 1659 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; 1660 data_len = le16_to_cpu(rxd->rxd.data_len); 1661 pkt_len = data_len - meta_len; 1662 1663 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; 1664 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) 1665 pkt_off += meta_len; 1666 else 1667 pkt_off += dp->rx_offset; 1668 meta_off = pkt_off - meta_len; 1669 1670 /* Stats update */ 1671 u64_stats_update_begin(&r_vec->rx_sync); 1672 r_vec->rx_pkts++; 1673 r_vec->rx_bytes += pkt_len; 1674 u64_stats_update_end(&r_vec->rx_sync); 1675 1676 if (unlikely(meta_len > NFP_NET_MAX_PREPEND || 1677 (dp->rx_offset && meta_len > dp->rx_offset))) { 1678 nn_dp_warn(dp, "oversized RX packet metadata %u\n", 1679 meta_len); 1680 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); 1681 continue; 1682 } 1683 1684 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, 1685 data_len); 1686 1687 if (!dp->chained_metadata_format) { 1688 nfp_net_set_hash_desc(dp->netdev, &meta, 1689 rxbuf->frag + meta_off, rxd); 1690 } else if (meta_len) { 1691 void *end; 1692 1693 end = nfp_net_parse_meta(dp->netdev, &meta, 1694 rxbuf->frag + meta_off, 1695 meta_len); 1696 if (unlikely(end != rxbuf->frag + pkt_off)) { 1697 nn_dp_warn(dp, "invalid RX packet metadata\n"); 1698 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, 1699 NULL); 1700 continue; 1701 } 1702 } 1703 1704 if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF && 1705 dp->bpf_offload_xdp) && !meta.portid) { 1706 void *orig_data = rxbuf->frag + pkt_off; 1707 unsigned int dma_off; 1708 int act; 1709 1710 xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; 1711 xdp.data = orig_data; 1712 xdp.data_meta = orig_data; 1713 xdp.data_end = orig_data + pkt_len; 1714 1715 act = bpf_prog_run_xdp(xdp_prog, &xdp); 1716 1717 pkt_len -= xdp.data - orig_data; 1718 pkt_off += xdp.data - orig_data; 1719 1720 switch (act) { 1721 case XDP_PASS: 1722 meta_len_xdp = xdp.data - xdp.data_meta; 1723 break; 1724 case XDP_TX: 1725 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM; 1726 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring, 1727 tx_ring, rxbuf, 1728 dma_off, 1729 pkt_len, 1730 &xdp_tx_cmpl))) 1731 trace_xdp_exception(dp->netdev, 1732 xdp_prog, act); 1733 continue; 1734 default: 1735 bpf_warn_invalid_xdp_action(act); 1736 /* fall through */ 1737 case XDP_ABORTED: 1738 trace_xdp_exception(dp->netdev, xdp_prog, act); 1739 /* fall through */ 1740 case XDP_DROP: 1741 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, 1742 rxbuf->dma_addr); 1743 continue; 1744 } 1745 } 1746 1747 skb = build_skb(rxbuf->frag, true_bufsz); 1748 if (unlikely(!skb)) { 1749 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); 1750 continue; 1751 } 1752 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); 1753 if (unlikely(!new_frag)) { 1754 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); 1755 continue; 1756 } 1757 1758 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); 1759 1760 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); 1761 1762 if (likely(!meta.portid)) { 1763 netdev = dp->netdev; 1764 } else { 1765 struct nfp_net *nn; 1766 1767 nn = netdev_priv(dp->netdev); 1768 netdev = nfp_app_repr_get(nn->app, meta.portid); 1769 if (unlikely(!netdev)) { 1770 nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb); 1771 continue; 1772 } 1773 nfp_repr_inc_rx_stats(netdev, pkt_len); 1774 } 1775 1776 skb_reserve(skb, pkt_off); 1777 skb_put(skb, pkt_len); 1778 1779 skb->mark = meta.mark; 1780 skb_set_hash(skb, meta.hash, meta.hash_type); 1781 1782 skb_record_rx_queue(skb, rx_ring->idx); 1783 skb->protocol = eth_type_trans(skb, netdev); 1784 1785 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb); 1786 1787 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) 1788 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1789 le16_to_cpu(rxd->rxd.vlan)); 1790 if (meta_len_xdp) 1791 skb_metadata_set(skb, meta_len_xdp); 1792 1793 napi_gro_receive(&rx_ring->r_vec->napi, skb); 1794 } 1795 1796 if (xdp_prog) { 1797 if (tx_ring->wr_ptr_add) 1798 nfp_net_tx_xmit_more_flush(tx_ring); 1799 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) && 1800 !xdp_tx_cmpl) 1801 if (!nfp_net_xdp_complete(tx_ring)) 1802 pkts_polled = budget; 1803 } 1804 rcu_read_unlock(); 1805 1806 return pkts_polled; 1807 } 1808 1809 /** 1810 * nfp_net_poll() - napi poll function 1811 * @napi: NAPI structure 1812 * @budget: NAPI budget 1813 * 1814 * Return: number of packets polled. 1815 */ 1816 static int nfp_net_poll(struct napi_struct *napi, int budget) 1817 { 1818 struct nfp_net_r_vector *r_vec = 1819 container_of(napi, struct nfp_net_r_vector, napi); 1820 unsigned int pkts_polled = 0; 1821 1822 if (r_vec->tx_ring) 1823 nfp_net_tx_complete(r_vec->tx_ring); 1824 if (r_vec->rx_ring) 1825 pkts_polled = nfp_net_rx(r_vec->rx_ring, budget); 1826 1827 if (pkts_polled < budget) 1828 if (napi_complete_done(napi, pkts_polled)) 1829 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); 1830 1831 return pkts_polled; 1832 } 1833 1834 /* Control device data path 1835 */ 1836 1837 static bool 1838 nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, 1839 struct sk_buff *skb, bool old) 1840 { 1841 unsigned int real_len = skb->len, meta_len = 0; 1842 struct nfp_net_tx_ring *tx_ring; 1843 struct nfp_net_tx_buf *txbuf; 1844 struct nfp_net_tx_desc *txd; 1845 struct nfp_net_dp *dp; 1846 dma_addr_t dma_addr; 1847 int wr_idx; 1848 1849 dp = &r_vec->nfp_net->dp; 1850 tx_ring = r_vec->tx_ring; 1851 1852 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) { 1853 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n"); 1854 goto err_free; 1855 } 1856 1857 if (unlikely(nfp_net_tx_full(tx_ring, 1))) { 1858 u64_stats_update_begin(&r_vec->tx_sync); 1859 r_vec->tx_busy++; 1860 u64_stats_update_end(&r_vec->tx_sync); 1861 if (!old) 1862 __skb_queue_tail(&r_vec->queue, skb); 1863 else 1864 __skb_queue_head(&r_vec->queue, skb); 1865 return true; 1866 } 1867 1868 if (nfp_app_ctrl_has_meta(nn->app)) { 1869 if (unlikely(skb_headroom(skb) < 8)) { 1870 nn_dp_warn(dp, "CTRL TX on skb without headroom\n"); 1871 goto err_free; 1872 } 1873 meta_len = 8; 1874 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4)); 1875 put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4)); 1876 } 1877 1878 /* Start with the head skbuf */ 1879 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb), 1880 DMA_TO_DEVICE); 1881 if (dma_mapping_error(dp->dev, dma_addr)) 1882 goto err_dma_warn; 1883 1884 wr_idx = D_IDX(tx_ring, tx_ring->wr_p); 1885 1886 /* Stash the soft descriptor of the head then initialize it */ 1887 txbuf = &tx_ring->txbufs[wr_idx]; 1888 txbuf->skb = skb; 1889 txbuf->dma_addr = dma_addr; 1890 txbuf->fidx = -1; 1891 txbuf->pkt_cnt = 1; 1892 txbuf->real_len = real_len; 1893 1894 /* Build TX descriptor */ 1895 txd = &tx_ring->txds[wr_idx]; 1896 txd->offset_eop = meta_len | PCIE_DESC_TX_EOP; 1897 txd->dma_len = cpu_to_le16(skb_headlen(skb)); 1898 nfp_desc_set_dma_addr(txd, dma_addr); 1899 txd->data_len = cpu_to_le16(skb->len); 1900 1901 txd->flags = 0; 1902 txd->mss = 0; 1903 txd->lso_hdrlen = 0; 1904 1905 tx_ring->wr_p++; 1906 tx_ring->wr_ptr_add++; 1907 nfp_net_tx_xmit_more_flush(tx_ring); 1908 1909 return false; 1910 1911 err_dma_warn: 1912 nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n"); 1913 err_free: 1914 u64_stats_update_begin(&r_vec->tx_sync); 1915 r_vec->tx_errors++; 1916 u64_stats_update_end(&r_vec->tx_sync); 1917 dev_kfree_skb_any(skb); 1918 return false; 1919 } 1920 1921 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb) 1922 { 1923 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0]; 1924 bool ret; 1925 1926 spin_lock_bh(&r_vec->lock); 1927 ret = nfp_ctrl_tx_one(nn, r_vec, skb, false); 1928 spin_unlock_bh(&r_vec->lock); 1929 1930 return ret; 1931 } 1932 1933 static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec) 1934 { 1935 struct sk_buff *skb; 1936 1937 while ((skb = __skb_dequeue(&r_vec->queue))) 1938 if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true)) 1939 return; 1940 } 1941 1942 static bool 1943 nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len) 1944 { 1945 u32 meta_type, meta_tag; 1946 1947 if (!nfp_app_ctrl_has_meta(nn->app)) 1948 return !meta_len; 1949 1950 if (meta_len != 8) 1951 return false; 1952 1953 meta_type = get_unaligned_be32(data); 1954 meta_tag = get_unaligned_be32(data + 4); 1955 1956 return (meta_type == NFP_NET_META_PORTID && 1957 meta_tag == NFP_META_PORT_ID_CTRL); 1958 } 1959 1960 static bool 1961 nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp, 1962 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring) 1963 { 1964 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off; 1965 struct nfp_net_rx_buf *rxbuf; 1966 struct nfp_net_rx_desc *rxd; 1967 dma_addr_t new_dma_addr; 1968 struct sk_buff *skb; 1969 void *new_frag; 1970 int idx; 1971 1972 idx = D_IDX(rx_ring, rx_ring->rd_p); 1973 1974 rxd = &rx_ring->rxds[idx]; 1975 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) 1976 return false; 1977 1978 /* Memory barrier to ensure that we won't do other reads 1979 * before the DD bit. 1980 */ 1981 dma_rmb(); 1982 1983 rx_ring->rd_p++; 1984 1985 rxbuf = &rx_ring->rxbufs[idx]; 1986 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; 1987 data_len = le16_to_cpu(rxd->rxd.data_len); 1988 pkt_len = data_len - meta_len; 1989 1990 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off; 1991 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) 1992 pkt_off += meta_len; 1993 else 1994 pkt_off += dp->rx_offset; 1995 meta_off = pkt_off - meta_len; 1996 1997 /* Stats update */ 1998 u64_stats_update_begin(&r_vec->rx_sync); 1999 r_vec->rx_pkts++; 2000 r_vec->rx_bytes += pkt_len; 2001 u64_stats_update_end(&r_vec->rx_sync); 2002 2003 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len); 2004 2005 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) { 2006 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n", 2007 meta_len); 2008 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); 2009 return true; 2010 } 2011 2012 skb = build_skb(rxbuf->frag, dp->fl_bufsz); 2013 if (unlikely(!skb)) { 2014 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL); 2015 return true; 2016 } 2017 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr); 2018 if (unlikely(!new_frag)) { 2019 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); 2020 return true; 2021 } 2022 2023 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); 2024 2025 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); 2026 2027 skb_reserve(skb, pkt_off); 2028 skb_put(skb, pkt_len); 2029 2030 nfp_app_ctrl_rx(nn->app, skb); 2031 2032 return true; 2033 } 2034 2035 static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec) 2036 { 2037 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; 2038 struct nfp_net *nn = r_vec->nfp_net; 2039 struct nfp_net_dp *dp = &nn->dp; 2040 2041 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring)) 2042 continue; 2043 } 2044 2045 static void nfp_ctrl_poll(unsigned long arg) 2046 { 2047 struct nfp_net_r_vector *r_vec = (void *)arg; 2048 2049 spin_lock_bh(&r_vec->lock); 2050 nfp_net_tx_complete(r_vec->tx_ring); 2051 __nfp_ctrl_tx_queued(r_vec); 2052 spin_unlock_bh(&r_vec->lock); 2053 2054 nfp_ctrl_rx(r_vec); 2055 2056 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); 2057 } 2058 2059 /* Setup and Configuration 2060 */ 2061 2062 /** 2063 * nfp_net_vecs_init() - Assign IRQs and setup rvecs. 2064 * @nn: NFP Network structure 2065 */ 2066 static void nfp_net_vecs_init(struct nfp_net *nn) 2067 { 2068 struct nfp_net_r_vector *r_vec; 2069 int r; 2070 2071 nn->lsc_handler = nfp_net_irq_lsc; 2072 nn->exn_handler = nfp_net_irq_exn; 2073 2074 for (r = 0; r < nn->max_r_vecs; r++) { 2075 struct msix_entry *entry; 2076 2077 entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r]; 2078 2079 r_vec = &nn->r_vecs[r]; 2080 r_vec->nfp_net = nn; 2081 r_vec->irq_entry = entry->entry; 2082 r_vec->irq_vector = entry->vector; 2083 2084 if (nn->dp.netdev) { 2085 r_vec->handler = nfp_net_irq_rxtx; 2086 } else { 2087 r_vec->handler = nfp_ctrl_irq_rxtx; 2088 2089 __skb_queue_head_init(&r_vec->queue); 2090 spin_lock_init(&r_vec->lock); 2091 tasklet_init(&r_vec->tasklet, nfp_ctrl_poll, 2092 (unsigned long)r_vec); 2093 tasklet_disable(&r_vec->tasklet); 2094 } 2095 2096 cpumask_set_cpu(r, &r_vec->affinity_mask); 2097 } 2098 } 2099 2100 /** 2101 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring 2102 * @tx_ring: TX ring to free 2103 */ 2104 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) 2105 { 2106 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 2107 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; 2108 2109 kfree(tx_ring->txbufs); 2110 2111 if (tx_ring->txds) 2112 dma_free_coherent(dp->dev, tx_ring->size, 2113 tx_ring->txds, tx_ring->dma); 2114 2115 tx_ring->cnt = 0; 2116 tx_ring->txbufs = NULL; 2117 tx_ring->txds = NULL; 2118 tx_ring->dma = 0; 2119 tx_ring->size = 0; 2120 } 2121 2122 /** 2123 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring 2124 * @dp: NFP Net data path struct 2125 * @tx_ring: TX Ring structure to allocate 2126 * 2127 * Return: 0 on success, negative errno otherwise. 2128 */ 2129 static int 2130 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring) 2131 { 2132 struct nfp_net_r_vector *r_vec = tx_ring->r_vec; 2133 int sz; 2134 2135 tx_ring->cnt = dp->txd_cnt; 2136 2137 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; 2138 tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, 2139 &tx_ring->dma, GFP_KERNEL); 2140 if (!tx_ring->txds) 2141 goto err_alloc; 2142 2143 sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt; 2144 tx_ring->txbufs = kzalloc(sz, GFP_KERNEL); 2145 if (!tx_ring->txbufs) 2146 goto err_alloc; 2147 2148 if (!tx_ring->is_xdp && dp->netdev) 2149 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask, 2150 tx_ring->idx); 2151 2152 return 0; 2153 2154 err_alloc: 2155 nfp_net_tx_ring_free(tx_ring); 2156 return -ENOMEM; 2157 } 2158 2159 static void 2160 nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp, 2161 struct nfp_net_tx_ring *tx_ring) 2162 { 2163 unsigned int i; 2164 2165 if (!tx_ring->is_xdp) 2166 return; 2167 2168 for (i = 0; i < tx_ring->cnt; i++) { 2169 if (!tx_ring->txbufs[i].frag) 2170 return; 2171 2172 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr); 2173 __free_page(virt_to_page(tx_ring->txbufs[i].frag)); 2174 } 2175 } 2176 2177 static int 2178 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp, 2179 struct nfp_net_tx_ring *tx_ring) 2180 { 2181 struct nfp_net_tx_buf *txbufs = tx_ring->txbufs; 2182 unsigned int i; 2183 2184 if (!tx_ring->is_xdp) 2185 return 0; 2186 2187 for (i = 0; i < tx_ring->cnt; i++) { 2188 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr); 2189 if (!txbufs[i].frag) { 2190 nfp_net_tx_ring_bufs_free(dp, tx_ring); 2191 return -ENOMEM; 2192 } 2193 } 2194 2195 return 0; 2196 } 2197 2198 static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) 2199 { 2200 unsigned int r; 2201 2202 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings), 2203 GFP_KERNEL); 2204 if (!dp->tx_rings) 2205 return -ENOMEM; 2206 2207 for (r = 0; r < dp->num_tx_rings; r++) { 2208 int bias = 0; 2209 2210 if (r >= dp->num_stack_tx_rings) 2211 bias = dp->num_stack_tx_rings; 2212 2213 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias], 2214 r, bias); 2215 2216 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r])) 2217 goto err_free_prev; 2218 2219 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r])) 2220 goto err_free_ring; 2221 } 2222 2223 return 0; 2224 2225 err_free_prev: 2226 while (r--) { 2227 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); 2228 err_free_ring: 2229 nfp_net_tx_ring_free(&dp->tx_rings[r]); 2230 } 2231 kfree(dp->tx_rings); 2232 return -ENOMEM; 2233 } 2234 2235 static void nfp_net_tx_rings_free(struct nfp_net_dp *dp) 2236 { 2237 unsigned int r; 2238 2239 for (r = 0; r < dp->num_tx_rings; r++) { 2240 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]); 2241 nfp_net_tx_ring_free(&dp->tx_rings[r]); 2242 } 2243 2244 kfree(dp->tx_rings); 2245 } 2246 2247 /** 2248 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring 2249 * @rx_ring: RX ring to free 2250 */ 2251 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) 2252 { 2253 struct nfp_net_r_vector *r_vec = rx_ring->r_vec; 2254 struct nfp_net_dp *dp = &r_vec->nfp_net->dp; 2255 2256 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); 2257 kfree(rx_ring->rxbufs); 2258 2259 if (rx_ring->rxds) 2260 dma_free_coherent(dp->dev, rx_ring->size, 2261 rx_ring->rxds, rx_ring->dma); 2262 2263 rx_ring->cnt = 0; 2264 rx_ring->rxbufs = NULL; 2265 rx_ring->rxds = NULL; 2266 rx_ring->dma = 0; 2267 rx_ring->size = 0; 2268 } 2269 2270 /** 2271 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring 2272 * @dp: NFP Net data path struct 2273 * @rx_ring: RX ring to allocate 2274 * 2275 * Return: 0 on success, negative errno otherwise. 2276 */ 2277 static int 2278 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) 2279 { 2280 int sz, err; 2281 2282 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, rx_ring->idx); 2283 if (err < 0) 2284 return err; 2285 2286 rx_ring->cnt = dp->rxd_cnt; 2287 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; 2288 rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, 2289 &rx_ring->dma, GFP_KERNEL); 2290 if (!rx_ring->rxds) 2291 goto err_alloc; 2292 2293 sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt; 2294 rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL); 2295 if (!rx_ring->rxbufs) 2296 goto err_alloc; 2297 2298 return 0; 2299 2300 err_alloc: 2301 nfp_net_rx_ring_free(rx_ring); 2302 return -ENOMEM; 2303 } 2304 2305 static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp) 2306 { 2307 unsigned int r; 2308 2309 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings), 2310 GFP_KERNEL); 2311 if (!dp->rx_rings) 2312 return -ENOMEM; 2313 2314 for (r = 0; r < dp->num_rx_rings; r++) { 2315 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r); 2316 2317 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r])) 2318 goto err_free_prev; 2319 2320 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r])) 2321 goto err_free_ring; 2322 } 2323 2324 return 0; 2325 2326 err_free_prev: 2327 while (r--) { 2328 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); 2329 err_free_ring: 2330 nfp_net_rx_ring_free(&dp->rx_rings[r]); 2331 } 2332 kfree(dp->rx_rings); 2333 return -ENOMEM; 2334 } 2335 2336 static void nfp_net_rx_rings_free(struct nfp_net_dp *dp) 2337 { 2338 unsigned int r; 2339 2340 for (r = 0; r < dp->num_rx_rings; r++) { 2341 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]); 2342 nfp_net_rx_ring_free(&dp->rx_rings[r]); 2343 } 2344 2345 kfree(dp->rx_rings); 2346 } 2347 2348 static void 2349 nfp_net_vector_assign_rings(struct nfp_net_dp *dp, 2350 struct nfp_net_r_vector *r_vec, int idx) 2351 { 2352 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL; 2353 r_vec->tx_ring = 2354 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL; 2355 2356 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ? 2357 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL; 2358 } 2359 2360 static int 2361 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, 2362 int idx) 2363 { 2364 int err; 2365 2366 /* Setup NAPI */ 2367 if (nn->dp.netdev) 2368 netif_napi_add(nn->dp.netdev, &r_vec->napi, 2369 nfp_net_poll, NAPI_POLL_WEIGHT); 2370 else 2371 tasklet_enable(&r_vec->tasklet); 2372 2373 snprintf(r_vec->name, sizeof(r_vec->name), 2374 "%s-rxtx-%d", nfp_net_name(nn), idx); 2375 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name, 2376 r_vec); 2377 if (err) { 2378 if (nn->dp.netdev) 2379 netif_napi_del(&r_vec->napi); 2380 else 2381 tasklet_disable(&r_vec->tasklet); 2382 2383 nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector); 2384 return err; 2385 } 2386 disable_irq(r_vec->irq_vector); 2387 2388 irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask); 2389 2390 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector, 2391 r_vec->irq_entry); 2392 2393 return 0; 2394 } 2395 2396 static void 2397 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) 2398 { 2399 irq_set_affinity_hint(r_vec->irq_vector, NULL); 2400 if (nn->dp.netdev) 2401 netif_napi_del(&r_vec->napi); 2402 else 2403 tasklet_disable(&r_vec->tasklet); 2404 2405 free_irq(r_vec->irq_vector, r_vec); 2406 } 2407 2408 /** 2409 * nfp_net_rss_write_itbl() - Write RSS indirection table to device 2410 * @nn: NFP Net device to reconfigure 2411 */ 2412 void nfp_net_rss_write_itbl(struct nfp_net *nn) 2413 { 2414 int i; 2415 2416 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4) 2417 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i, 2418 get_unaligned_le32(nn->rss_itbl + i)); 2419 } 2420 2421 /** 2422 * nfp_net_rss_write_key() - Write RSS hash key to device 2423 * @nn: NFP Net device to reconfigure 2424 */ 2425 void nfp_net_rss_write_key(struct nfp_net *nn) 2426 { 2427 int i; 2428 2429 for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4) 2430 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i, 2431 get_unaligned_le32(nn->rss_key + i)); 2432 } 2433 2434 /** 2435 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW 2436 * @nn: NFP Net device to reconfigure 2437 */ 2438 void nfp_net_coalesce_write_cfg(struct nfp_net *nn) 2439 { 2440 u8 i; 2441 u32 factor; 2442 u32 value; 2443 2444 /* Compute factor used to convert coalesce '_usecs' parameters to 2445 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp 2446 * count. 2447 */ 2448 factor = nn->me_freq_mhz / 16; 2449 2450 /* copy RX interrupt coalesce parameters */ 2451 value = (nn->rx_coalesce_max_frames << 16) | 2452 (factor * nn->rx_coalesce_usecs); 2453 for (i = 0; i < nn->dp.num_rx_rings; i++) 2454 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value); 2455 2456 /* copy TX interrupt coalesce parameters */ 2457 value = (nn->tx_coalesce_max_frames << 16) | 2458 (factor * nn->tx_coalesce_usecs); 2459 for (i = 0; i < nn->dp.num_tx_rings; i++) 2460 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value); 2461 } 2462 2463 /** 2464 * nfp_net_write_mac_addr() - Write mac address to the device control BAR 2465 * @nn: NFP Net device to reconfigure 2466 * @addr: MAC address to write 2467 * 2468 * Writes the MAC address from the netdev to the device control BAR. Does not 2469 * perform the required reconfig. We do a bit of byte swapping dance because 2470 * firmware is LE. 2471 */ 2472 static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr) 2473 { 2474 nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr)); 2475 nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4)); 2476 } 2477 2478 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx) 2479 { 2480 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0); 2481 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0); 2482 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0); 2483 2484 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0); 2485 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0); 2486 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0); 2487 } 2488 2489 /** 2490 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP 2491 * @nn: NFP Net device to reconfigure 2492 */ 2493 static void nfp_net_clear_config_and_disable(struct nfp_net *nn) 2494 { 2495 u32 new_ctrl, update; 2496 unsigned int r; 2497 int err; 2498 2499 new_ctrl = nn->dp.ctrl; 2500 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE; 2501 update = NFP_NET_CFG_UPDATE_GEN; 2502 update |= NFP_NET_CFG_UPDATE_MSIX; 2503 update |= NFP_NET_CFG_UPDATE_RING; 2504 2505 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) 2506 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG; 2507 2508 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0); 2509 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0); 2510 2511 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 2512 err = nfp_net_reconfig(nn, update); 2513 if (err) 2514 nn_err(nn, "Could not disable device: %d\n", err); 2515 2516 for (r = 0; r < nn->dp.num_rx_rings; r++) 2517 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]); 2518 for (r = 0; r < nn->dp.num_tx_rings; r++) 2519 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]); 2520 for (r = 0; r < nn->dp.num_r_vecs; r++) 2521 nfp_net_vec_clear_ring_data(nn, r); 2522 2523 nn->dp.ctrl = new_ctrl; 2524 } 2525 2526 static void 2527 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn, 2528 struct nfp_net_rx_ring *rx_ring, unsigned int idx) 2529 { 2530 /* Write the DMA address, size and MSI-X info to the device */ 2531 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma); 2532 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt)); 2533 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry); 2534 } 2535 2536 static void 2537 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn, 2538 struct nfp_net_tx_ring *tx_ring, unsigned int idx) 2539 { 2540 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma); 2541 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt)); 2542 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry); 2543 } 2544 2545 /** 2546 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP 2547 * @nn: NFP Net device to reconfigure 2548 */ 2549 static int nfp_net_set_config_and_enable(struct nfp_net *nn) 2550 { 2551 u32 bufsz, new_ctrl, update = 0; 2552 unsigned int r; 2553 int err; 2554 2555 new_ctrl = nn->dp.ctrl; 2556 2557 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) { 2558 nfp_net_rss_write_key(nn); 2559 nfp_net_rss_write_itbl(nn); 2560 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg); 2561 update |= NFP_NET_CFG_UPDATE_RSS; 2562 } 2563 2564 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) { 2565 nfp_net_coalesce_write_cfg(nn); 2566 update |= NFP_NET_CFG_UPDATE_IRQMOD; 2567 } 2568 2569 for (r = 0; r < nn->dp.num_tx_rings; r++) 2570 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r); 2571 for (r = 0; r < nn->dp.num_rx_rings; r++) 2572 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r); 2573 2574 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ? 2575 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1); 2576 2577 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ? 2578 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1); 2579 2580 if (nn->dp.netdev) 2581 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); 2582 2583 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu); 2584 2585 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA; 2586 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz); 2587 2588 /* Enable device */ 2589 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; 2590 update |= NFP_NET_CFG_UPDATE_GEN; 2591 update |= NFP_NET_CFG_UPDATE_MSIX; 2592 update |= NFP_NET_CFG_UPDATE_RING; 2593 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG) 2594 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG; 2595 2596 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 2597 err = nfp_net_reconfig(nn, update); 2598 if (err) { 2599 nfp_net_clear_config_and_disable(nn); 2600 return err; 2601 } 2602 2603 nn->dp.ctrl = new_ctrl; 2604 2605 for (r = 0; r < nn->dp.num_rx_rings; r++) 2606 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]); 2607 2608 /* Since reconfiguration requests while NFP is down are ignored we 2609 * have to wipe the entire VXLAN configuration and reinitialize it. 2610 */ 2611 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) { 2612 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); 2613 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); 2614 udp_tunnel_get_rx_info(nn->dp.netdev); 2615 } 2616 2617 return 0; 2618 } 2619 2620 /** 2621 * nfp_net_close_stack() - Quiesce the stack (part of close) 2622 * @nn: NFP Net device to reconfigure 2623 */ 2624 static void nfp_net_close_stack(struct nfp_net *nn) 2625 { 2626 unsigned int r; 2627 2628 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); 2629 netif_carrier_off(nn->dp.netdev); 2630 nn->link_up = false; 2631 2632 for (r = 0; r < nn->dp.num_r_vecs; r++) { 2633 disable_irq(nn->r_vecs[r].irq_vector); 2634 napi_disable(&nn->r_vecs[r].napi); 2635 } 2636 2637 netif_tx_disable(nn->dp.netdev); 2638 } 2639 2640 /** 2641 * nfp_net_close_free_all() - Free all runtime resources 2642 * @nn: NFP Net device to reconfigure 2643 */ 2644 static void nfp_net_close_free_all(struct nfp_net *nn) 2645 { 2646 unsigned int r; 2647 2648 nfp_net_tx_rings_free(&nn->dp); 2649 nfp_net_rx_rings_free(&nn->dp); 2650 2651 for (r = 0; r < nn->dp.num_r_vecs; r++) 2652 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 2653 2654 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 2655 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); 2656 } 2657 2658 /** 2659 * nfp_net_netdev_close() - Called when the device is downed 2660 * @netdev: netdev structure 2661 */ 2662 static int nfp_net_netdev_close(struct net_device *netdev) 2663 { 2664 struct nfp_net *nn = netdev_priv(netdev); 2665 2666 /* Step 1: Disable RX and TX rings from the Linux kernel perspective 2667 */ 2668 nfp_net_close_stack(nn); 2669 2670 /* Step 2: Tell NFP 2671 */ 2672 nfp_net_clear_config_and_disable(nn); 2673 nfp_port_configure(netdev, false); 2674 2675 /* Step 3: Free resources 2676 */ 2677 nfp_net_close_free_all(nn); 2678 2679 nn_dbg(nn, "%s down", netdev->name); 2680 return 0; 2681 } 2682 2683 void nfp_ctrl_close(struct nfp_net *nn) 2684 { 2685 int r; 2686 2687 rtnl_lock(); 2688 2689 for (r = 0; r < nn->dp.num_r_vecs; r++) { 2690 disable_irq(nn->r_vecs[r].irq_vector); 2691 tasklet_disable(&nn->r_vecs[r].tasklet); 2692 } 2693 2694 nfp_net_clear_config_and_disable(nn); 2695 2696 nfp_net_close_free_all(nn); 2697 2698 rtnl_unlock(); 2699 } 2700 2701 /** 2702 * nfp_net_open_stack() - Start the device from stack's perspective 2703 * @nn: NFP Net device to reconfigure 2704 */ 2705 static void nfp_net_open_stack(struct nfp_net *nn) 2706 { 2707 unsigned int r; 2708 2709 for (r = 0; r < nn->dp.num_r_vecs; r++) { 2710 napi_enable(&nn->r_vecs[r].napi); 2711 enable_irq(nn->r_vecs[r].irq_vector); 2712 } 2713 2714 netif_tx_wake_all_queues(nn->dp.netdev); 2715 2716 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); 2717 nfp_net_read_link_status(nn); 2718 } 2719 2720 static int nfp_net_open_alloc_all(struct nfp_net *nn) 2721 { 2722 int err, r; 2723 2724 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn", 2725 nn->exn_name, sizeof(nn->exn_name), 2726 NFP_NET_IRQ_EXN_IDX, nn->exn_handler); 2727 if (err) 2728 return err; 2729 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc", 2730 nn->lsc_name, sizeof(nn->lsc_name), 2731 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); 2732 if (err) 2733 goto err_free_exn; 2734 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); 2735 2736 for (r = 0; r < nn->dp.num_r_vecs; r++) { 2737 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); 2738 if (err) 2739 goto err_cleanup_vec_p; 2740 } 2741 2742 err = nfp_net_rx_rings_prepare(nn, &nn->dp); 2743 if (err) 2744 goto err_cleanup_vec; 2745 2746 err = nfp_net_tx_rings_prepare(nn, &nn->dp); 2747 if (err) 2748 goto err_free_rx_rings; 2749 2750 for (r = 0; r < nn->max_r_vecs; r++) 2751 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); 2752 2753 return 0; 2754 2755 err_free_rx_rings: 2756 nfp_net_rx_rings_free(&nn->dp); 2757 err_cleanup_vec: 2758 r = nn->dp.num_r_vecs; 2759 err_cleanup_vec_p: 2760 while (r--) 2761 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 2762 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); 2763 err_free_exn: 2764 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); 2765 return err; 2766 } 2767 2768 static int nfp_net_netdev_open(struct net_device *netdev) 2769 { 2770 struct nfp_net *nn = netdev_priv(netdev); 2771 int err; 2772 2773 /* Step 1: Allocate resources for rings and the like 2774 * - Request interrupts 2775 * - Allocate RX and TX ring resources 2776 * - Setup initial RSS table 2777 */ 2778 err = nfp_net_open_alloc_all(nn); 2779 if (err) 2780 return err; 2781 2782 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings); 2783 if (err) 2784 goto err_free_all; 2785 2786 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings); 2787 if (err) 2788 goto err_free_all; 2789 2790 /* Step 2: Configure the NFP 2791 * - Ifup the physical interface if it exists 2792 * - Enable rings from 0 to tx_rings/rx_rings - 1. 2793 * - Write MAC address (in case it changed) 2794 * - Set the MTU 2795 * - Set the Freelist buffer size 2796 * - Enable the FW 2797 */ 2798 err = nfp_port_configure(netdev, true); 2799 if (err) 2800 goto err_free_all; 2801 2802 err = nfp_net_set_config_and_enable(nn); 2803 if (err) 2804 goto err_port_disable; 2805 2806 /* Step 3: Enable for kernel 2807 * - put some freelist descriptors on each RX ring 2808 * - enable NAPI on each ring 2809 * - enable all TX queues 2810 * - set link state 2811 */ 2812 nfp_net_open_stack(nn); 2813 2814 return 0; 2815 2816 err_port_disable: 2817 nfp_port_configure(netdev, false); 2818 err_free_all: 2819 nfp_net_close_free_all(nn); 2820 return err; 2821 } 2822 2823 int nfp_ctrl_open(struct nfp_net *nn) 2824 { 2825 int err, r; 2826 2827 /* ring dumping depends on vNICs being opened/closed under rtnl */ 2828 rtnl_lock(); 2829 2830 err = nfp_net_open_alloc_all(nn); 2831 if (err) 2832 goto err_unlock; 2833 2834 err = nfp_net_set_config_and_enable(nn); 2835 if (err) 2836 goto err_free_all; 2837 2838 for (r = 0; r < nn->dp.num_r_vecs; r++) 2839 enable_irq(nn->r_vecs[r].irq_vector); 2840 2841 rtnl_unlock(); 2842 2843 return 0; 2844 2845 err_free_all: 2846 nfp_net_close_free_all(nn); 2847 err_unlock: 2848 rtnl_unlock(); 2849 return err; 2850 } 2851 2852 static void nfp_net_set_rx_mode(struct net_device *netdev) 2853 { 2854 struct nfp_net *nn = netdev_priv(netdev); 2855 u32 new_ctrl; 2856 2857 new_ctrl = nn->dp.ctrl; 2858 2859 if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI) 2860 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC; 2861 else 2862 new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC; 2863 2864 if (netdev->flags & IFF_PROMISC) { 2865 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC) 2866 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC; 2867 else 2868 nn_warn(nn, "FW does not support promiscuous mode\n"); 2869 } else { 2870 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC; 2871 } 2872 2873 if (new_ctrl == nn->dp.ctrl) 2874 return; 2875 2876 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 2877 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN); 2878 2879 nn->dp.ctrl = new_ctrl; 2880 } 2881 2882 static void nfp_net_rss_init_itbl(struct nfp_net *nn) 2883 { 2884 int i; 2885 2886 for (i = 0; i < sizeof(nn->rss_itbl); i++) 2887 nn->rss_itbl[i] = 2888 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings); 2889 } 2890 2891 static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp) 2892 { 2893 struct nfp_net_dp new_dp = *dp; 2894 2895 *dp = nn->dp; 2896 nn->dp = new_dp; 2897 2898 nn->dp.netdev->mtu = new_dp.mtu; 2899 2900 if (!netif_is_rxfh_configured(nn->dp.netdev)) 2901 nfp_net_rss_init_itbl(nn); 2902 } 2903 2904 static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp) 2905 { 2906 unsigned int r; 2907 int err; 2908 2909 nfp_net_dp_swap(nn, dp); 2910 2911 for (r = 0; r < nn->max_r_vecs; r++) 2912 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); 2913 2914 err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings); 2915 if (err) 2916 return err; 2917 2918 if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) { 2919 err = netif_set_real_num_tx_queues(nn->dp.netdev, 2920 nn->dp.num_stack_tx_rings); 2921 if (err) 2922 return err; 2923 } 2924 2925 return nfp_net_set_config_and_enable(nn); 2926 } 2927 2928 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn) 2929 { 2930 struct nfp_net_dp *new; 2931 2932 new = kmalloc(sizeof(*new), GFP_KERNEL); 2933 if (!new) 2934 return NULL; 2935 2936 *new = nn->dp; 2937 2938 /* Clear things which need to be recomputed */ 2939 new->fl_bufsz = 0; 2940 new->tx_rings = NULL; 2941 new->rx_rings = NULL; 2942 new->num_r_vecs = 0; 2943 new->num_stack_tx_rings = 0; 2944 2945 return new; 2946 } 2947 2948 static int 2949 nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp, 2950 struct netlink_ext_ack *extack) 2951 { 2952 /* XDP-enabled tests */ 2953 if (!dp->xdp_prog) 2954 return 0; 2955 if (dp->fl_bufsz > PAGE_SIZE) { 2956 NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled"); 2957 return -EINVAL; 2958 } 2959 if (dp->num_tx_rings > nn->max_tx_rings) { 2960 NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled"); 2961 return -EINVAL; 2962 } 2963 2964 return 0; 2965 } 2966 2967 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp, 2968 struct netlink_ext_ack *extack) 2969 { 2970 int r, err; 2971 2972 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp); 2973 2974 dp->num_stack_tx_rings = dp->num_tx_rings; 2975 if (dp->xdp_prog) 2976 dp->num_stack_tx_rings -= dp->num_rx_rings; 2977 2978 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings); 2979 2980 err = nfp_net_check_config(nn, dp, extack); 2981 if (err) 2982 goto exit_free_dp; 2983 2984 if (!netif_running(dp->netdev)) { 2985 nfp_net_dp_swap(nn, dp); 2986 err = 0; 2987 goto exit_free_dp; 2988 } 2989 2990 /* Prepare new rings */ 2991 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) { 2992 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); 2993 if (err) { 2994 dp->num_r_vecs = r; 2995 goto err_cleanup_vecs; 2996 } 2997 } 2998 2999 err = nfp_net_rx_rings_prepare(nn, dp); 3000 if (err) 3001 goto err_cleanup_vecs; 3002 3003 err = nfp_net_tx_rings_prepare(nn, dp); 3004 if (err) 3005 goto err_free_rx; 3006 3007 /* Stop device, swap in new rings, try to start the firmware */ 3008 nfp_net_close_stack(nn); 3009 nfp_net_clear_config_and_disable(nn); 3010 3011 err = nfp_net_dp_swap_enable(nn, dp); 3012 if (err) { 3013 int err2; 3014 3015 nfp_net_clear_config_and_disable(nn); 3016 3017 /* Try with old configuration and old rings */ 3018 err2 = nfp_net_dp_swap_enable(nn, dp); 3019 if (err2) 3020 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n", 3021 err, err2); 3022 } 3023 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) 3024 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 3025 3026 nfp_net_rx_rings_free(dp); 3027 nfp_net_tx_rings_free(dp); 3028 3029 nfp_net_open_stack(nn); 3030 exit_free_dp: 3031 kfree(dp); 3032 3033 return err; 3034 3035 err_free_rx: 3036 nfp_net_rx_rings_free(dp); 3037 err_cleanup_vecs: 3038 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) 3039 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); 3040 kfree(dp); 3041 return err; 3042 } 3043 3044 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) 3045 { 3046 struct nfp_net *nn = netdev_priv(netdev); 3047 struct nfp_net_dp *dp; 3048 3049 dp = nfp_net_clone_dp(nn); 3050 if (!dp) 3051 return -ENOMEM; 3052 3053 dp->mtu = new_mtu; 3054 3055 return nfp_net_ring_reconfig(nn, dp, NULL); 3056 } 3057 3058 static int 3059 nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) 3060 { 3061 struct nfp_net *nn = netdev_priv(netdev); 3062 3063 /* Priority tagged packets with vlan id 0 are processed by the 3064 * NFP as untagged packets 3065 */ 3066 if (!vid) 3067 return 0; 3068 3069 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid); 3070 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q); 3071 3072 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD); 3073 } 3074 3075 static int 3076 nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) 3077 { 3078 struct nfp_net *nn = netdev_priv(netdev); 3079 3080 /* Priority tagged packets with vlan id 0 are processed by the 3081 * NFP as untagged packets 3082 */ 3083 if (!vid) 3084 return 0; 3085 3086 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid); 3087 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q); 3088 3089 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); 3090 } 3091 3092 static void nfp_net_stat64(struct net_device *netdev, 3093 struct rtnl_link_stats64 *stats) 3094 { 3095 struct nfp_net *nn = netdev_priv(netdev); 3096 int r; 3097 3098 for (r = 0; r < nn->dp.num_r_vecs; r++) { 3099 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; 3100 u64 data[3]; 3101 unsigned int start; 3102 3103 do { 3104 start = u64_stats_fetch_begin(&r_vec->rx_sync); 3105 data[0] = r_vec->rx_pkts; 3106 data[1] = r_vec->rx_bytes; 3107 data[2] = r_vec->rx_drops; 3108 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); 3109 stats->rx_packets += data[0]; 3110 stats->rx_bytes += data[1]; 3111 stats->rx_dropped += data[2]; 3112 3113 do { 3114 start = u64_stats_fetch_begin(&r_vec->tx_sync); 3115 data[0] = r_vec->tx_pkts; 3116 data[1] = r_vec->tx_bytes; 3117 data[2] = r_vec->tx_errors; 3118 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); 3119 stats->tx_packets += data[0]; 3120 stats->tx_bytes += data[1]; 3121 stats->tx_errors += data[2]; 3122 } 3123 } 3124 3125 static int nfp_net_set_features(struct net_device *netdev, 3126 netdev_features_t features) 3127 { 3128 netdev_features_t changed = netdev->features ^ features; 3129 struct nfp_net *nn = netdev_priv(netdev); 3130 u32 new_ctrl; 3131 int err; 3132 3133 /* Assume this is not called with features we have not advertised */ 3134 3135 new_ctrl = nn->dp.ctrl; 3136 3137 if (changed & NETIF_F_RXCSUM) { 3138 if (features & NETIF_F_RXCSUM) 3139 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY; 3140 else 3141 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY; 3142 } 3143 3144 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 3145 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) 3146 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM; 3147 else 3148 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM; 3149 } 3150 3151 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { 3152 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) 3153 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?: 3154 NFP_NET_CFG_CTRL_LSO; 3155 else 3156 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY; 3157 } 3158 3159 if (changed & NETIF_F_HW_VLAN_CTAG_RX) { 3160 if (features & NETIF_F_HW_VLAN_CTAG_RX) 3161 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN; 3162 else 3163 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN; 3164 } 3165 3166 if (changed & NETIF_F_HW_VLAN_CTAG_TX) { 3167 if (features & NETIF_F_HW_VLAN_CTAG_TX) 3168 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN; 3169 else 3170 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN; 3171 } 3172 3173 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { 3174 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 3175 new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER; 3176 else 3177 new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER; 3178 } 3179 3180 if (changed & NETIF_F_SG) { 3181 if (features & NETIF_F_SG) 3182 new_ctrl |= NFP_NET_CFG_CTRL_GATHER; 3183 else 3184 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER; 3185 } 3186 3187 if (changed & NETIF_F_HW_TC && nfp_app_tc_busy(nn->app, nn)) { 3188 nn_err(nn, "Cannot disable HW TC offload while in use\n"); 3189 return -EBUSY; 3190 } 3191 3192 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n", 3193 netdev->features, features, changed); 3194 3195 if (new_ctrl == nn->dp.ctrl) 3196 return 0; 3197 3198 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl); 3199 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); 3200 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); 3201 if (err) 3202 return err; 3203 3204 nn->dp.ctrl = new_ctrl; 3205 3206 return 0; 3207 } 3208 3209 static netdev_features_t 3210 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev, 3211 netdev_features_t features) 3212 { 3213 u8 l4_hdr; 3214 3215 /* We can't do TSO over double tagged packets (802.1AD) */ 3216 features &= vlan_features_check(skb, features); 3217 3218 if (!skb->encapsulation) 3219 return features; 3220 3221 /* Ensure that inner L4 header offset fits into TX descriptor field */ 3222 if (skb_is_gso(skb)) { 3223 u32 hdrlen; 3224 3225 hdrlen = skb_inner_transport_header(skb) - skb->data + 3226 inner_tcp_hdrlen(skb); 3227 3228 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ)) 3229 features &= ~NETIF_F_GSO_MASK; 3230 } 3231 3232 /* VXLAN/GRE check */ 3233 switch (vlan_get_protocol(skb)) { 3234 case htons(ETH_P_IP): 3235 l4_hdr = ip_hdr(skb)->protocol; 3236 break; 3237 case htons(ETH_P_IPV6): 3238 l4_hdr = ipv6_hdr(skb)->nexthdr; 3239 break; 3240 default: 3241 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3242 } 3243 3244 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 3245 skb->inner_protocol != htons(ETH_P_TEB) || 3246 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) || 3247 (l4_hdr == IPPROTO_UDP && 3248 (skb_inner_mac_header(skb) - skb_transport_header(skb) != 3249 sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) 3250 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 3251 3252 return features; 3253 } 3254 3255 /** 3256 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW 3257 * @nn: NFP Net device to reconfigure 3258 * @idx: Index into the port table where new port should be written 3259 * @port: UDP port to configure (pass zero to remove VXLAN port) 3260 */ 3261 static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) 3262 { 3263 int i; 3264 3265 nn->vxlan_ports[idx] = port; 3266 3267 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN)) 3268 return; 3269 3270 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); 3271 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) 3272 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port), 3273 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 | 3274 be16_to_cpu(nn->vxlan_ports[i])); 3275 3276 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN); 3277 } 3278 3279 /** 3280 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one 3281 * @nn: NFP Network structure 3282 * @port: UDP port to look for 3283 * 3284 * Return: if the port is already in the table -- it's position; 3285 * if the port is not in the table -- free position to use; 3286 * if the table is full -- -ENOSPC. 3287 */ 3288 static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port) 3289 { 3290 int i, free_idx = -ENOSPC; 3291 3292 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { 3293 if (nn->vxlan_ports[i] == port) 3294 return i; 3295 if (!nn->vxlan_usecnt[i]) 3296 free_idx = i; 3297 } 3298 3299 return free_idx; 3300 } 3301 3302 static void nfp_net_add_vxlan_port(struct net_device *netdev, 3303 struct udp_tunnel_info *ti) 3304 { 3305 struct nfp_net *nn = netdev_priv(netdev); 3306 int idx; 3307 3308 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 3309 return; 3310 3311 idx = nfp_net_find_vxlan_idx(nn, ti->port); 3312 if (idx == -ENOSPC) 3313 return; 3314 3315 if (!nn->vxlan_usecnt[idx]++) 3316 nfp_net_set_vxlan_port(nn, idx, ti->port); 3317 } 3318 3319 static void nfp_net_del_vxlan_port(struct net_device *netdev, 3320 struct udp_tunnel_info *ti) 3321 { 3322 struct nfp_net *nn = netdev_priv(netdev); 3323 int idx; 3324 3325 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) 3326 return; 3327 3328 idx = nfp_net_find_vxlan_idx(nn, ti->port); 3329 if (idx == -ENOSPC || !nn->vxlan_usecnt[idx]) 3330 return; 3331 3332 if (!--nn->vxlan_usecnt[idx]) 3333 nfp_net_set_vxlan_port(nn, idx, 0); 3334 } 3335 3336 static int 3337 nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog, 3338 struct netlink_ext_ack *extack) 3339 { 3340 struct nfp_net_dp *dp; 3341 3342 if (!prog == !nn->dp.xdp_prog) { 3343 WRITE_ONCE(nn->dp.xdp_prog, prog); 3344 return 0; 3345 } 3346 3347 dp = nfp_net_clone_dp(nn); 3348 if (!dp) 3349 return -ENOMEM; 3350 3351 dp->xdp_prog = prog; 3352 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings; 3353 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 3354 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0; 3355 3356 /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */ 3357 return nfp_net_ring_reconfig(nn, dp, extack); 3358 } 3359 3360 static int 3361 nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags, 3362 struct netlink_ext_ack *extack) 3363 { 3364 struct bpf_prog *drv_prog, *offload_prog; 3365 int err; 3366 3367 if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES) 3368 return -EBUSY; 3369 3370 /* Load both when no flags set to allow easy activation of driver path 3371 * when program is replaced by one which can't be offloaded. 3372 */ 3373 drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog; 3374 offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog; 3375 3376 err = nfp_net_xdp_setup_drv(nn, drv_prog, extack); 3377 if (err) 3378 return err; 3379 3380 err = nfp_app_xdp_offload(nn->app, nn, offload_prog); 3381 if (err && flags & XDP_FLAGS_HW_MODE) 3382 return err; 3383 3384 if (nn->xdp_prog) 3385 bpf_prog_put(nn->xdp_prog); 3386 nn->xdp_prog = prog; 3387 nn->xdp_flags = flags; 3388 3389 return 0; 3390 } 3391 3392 static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) 3393 { 3394 struct nfp_net *nn = netdev_priv(netdev); 3395 3396 switch (xdp->command) { 3397 case XDP_SETUP_PROG: 3398 case XDP_SETUP_PROG_HW: 3399 return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags, 3400 xdp->extack); 3401 case XDP_QUERY_PROG: 3402 xdp->prog_attached = !!nn->xdp_prog; 3403 if (nn->dp.bpf_offload_xdp) 3404 xdp->prog_attached = XDP_ATTACHED_HW; 3405 xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0; 3406 xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0; 3407 return 0; 3408 case BPF_OFFLOAD_VERIFIER_PREP: 3409 return nfp_app_bpf_verifier_prep(nn->app, nn, xdp); 3410 case BPF_OFFLOAD_TRANSLATE: 3411 return nfp_app_bpf_translate(nn->app, nn, 3412 xdp->offload.prog); 3413 case BPF_OFFLOAD_DESTROY: 3414 return nfp_app_bpf_destroy(nn->app, nn, 3415 xdp->offload.prog); 3416 default: 3417 return -EINVAL; 3418 } 3419 } 3420 3421 static int nfp_net_set_mac_address(struct net_device *netdev, void *addr) 3422 { 3423 struct nfp_net *nn = netdev_priv(netdev); 3424 struct sockaddr *saddr = addr; 3425 int err; 3426 3427 err = eth_prepare_mac_addr_change(netdev, addr); 3428 if (err) 3429 return err; 3430 3431 nfp_net_write_mac_addr(nn, saddr->sa_data); 3432 3433 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR); 3434 if (err) 3435 return err; 3436 3437 eth_commit_mac_addr_change(netdev, addr); 3438 3439 return 0; 3440 } 3441 3442 const struct net_device_ops nfp_net_netdev_ops = { 3443 .ndo_open = nfp_net_netdev_open, 3444 .ndo_stop = nfp_net_netdev_close, 3445 .ndo_start_xmit = nfp_net_tx, 3446 .ndo_get_stats64 = nfp_net_stat64, 3447 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, 3448 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, 3449 .ndo_set_vf_mac = nfp_app_set_vf_mac, 3450 .ndo_set_vf_vlan = nfp_app_set_vf_vlan, 3451 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, 3452 .ndo_get_vf_config = nfp_app_get_vf_config, 3453 .ndo_set_vf_link_state = nfp_app_set_vf_link_state, 3454 .ndo_setup_tc = nfp_port_setup_tc, 3455 .ndo_tx_timeout = nfp_net_tx_timeout, 3456 .ndo_set_rx_mode = nfp_net_set_rx_mode, 3457 .ndo_change_mtu = nfp_net_change_mtu, 3458 .ndo_set_mac_address = nfp_net_set_mac_address, 3459 .ndo_set_features = nfp_net_set_features, 3460 .ndo_features_check = nfp_net_features_check, 3461 .ndo_get_phys_port_name = nfp_port_get_phys_port_name, 3462 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, 3463 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, 3464 .ndo_bpf = nfp_net_xdp, 3465 }; 3466 3467 /** 3468 * nfp_net_info() - Print general info about the NIC 3469 * @nn: NFP Net device to reconfigure 3470 */ 3471 void nfp_net_info(struct nfp_net *nn) 3472 { 3473 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n", 3474 nn->dp.is_vf ? "VF " : "", 3475 nn->dp.num_tx_rings, nn->max_tx_rings, 3476 nn->dp.num_rx_rings, nn->max_rx_rings); 3477 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n", 3478 nn->fw_ver.resv, nn->fw_ver.class, 3479 nn->fw_ver.major, nn->fw_ver.minor, 3480 nn->max_mtu); 3481 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 3482 nn->cap, 3483 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "", 3484 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "", 3485 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "", 3486 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "", 3487 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "", 3488 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "", 3489 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "", 3490 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "", 3491 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "", 3492 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "", 3493 nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "", 3494 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "", 3495 nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "", 3496 nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "", 3497 nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "", 3498 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "", 3499 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "", 3500 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "", 3501 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "", 3502 nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ? 3503 "RXCSUM_COMPLETE " : "", 3504 nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "", 3505 nfp_app_extra_cap(nn->app, nn)); 3506 } 3507 3508 /** 3509 * nfp_net_alloc() - Allocate netdev and related structure 3510 * @pdev: PCI device 3511 * @needs_netdev: Whether to allocate a netdev for this vNIC 3512 * @max_tx_rings: Maximum number of TX rings supported by device 3513 * @max_rx_rings: Maximum number of RX rings supported by device 3514 * 3515 * This function allocates a netdev device and fills in the initial 3516 * part of the @struct nfp_net structure. In case of control device 3517 * nfp_net structure is allocated without the netdev. 3518 * 3519 * Return: NFP Net device structure, or ERR_PTR on error. 3520 */ 3521 struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, 3522 unsigned int max_tx_rings, 3523 unsigned int max_rx_rings) 3524 { 3525 struct nfp_net *nn; 3526 3527 if (needs_netdev) { 3528 struct net_device *netdev; 3529 3530 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net), 3531 max_tx_rings, max_rx_rings); 3532 if (!netdev) 3533 return ERR_PTR(-ENOMEM); 3534 3535 SET_NETDEV_DEV(netdev, &pdev->dev); 3536 nn = netdev_priv(netdev); 3537 nn->dp.netdev = netdev; 3538 } else { 3539 nn = vzalloc(sizeof(*nn)); 3540 if (!nn) 3541 return ERR_PTR(-ENOMEM); 3542 } 3543 3544 nn->dp.dev = &pdev->dev; 3545 nn->pdev = pdev; 3546 3547 nn->max_tx_rings = max_tx_rings; 3548 nn->max_rx_rings = max_rx_rings; 3549 3550 nn->dp.num_tx_rings = min_t(unsigned int, 3551 max_tx_rings, num_online_cpus()); 3552 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings, 3553 netif_get_num_default_rss_queues()); 3554 3555 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings); 3556 nn->dp.num_r_vecs = min_t(unsigned int, 3557 nn->dp.num_r_vecs, num_online_cpus()); 3558 3559 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; 3560 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; 3561 3562 spin_lock_init(&nn->reconfig_lock); 3563 spin_lock_init(&nn->link_status_lock); 3564 3565 timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0); 3566 3567 return nn; 3568 } 3569 3570 /** 3571 * nfp_net_free() - Undo what @nfp_net_alloc() did 3572 * @nn: NFP Net device to reconfigure 3573 */ 3574 void nfp_net_free(struct nfp_net *nn) 3575 { 3576 if (nn->dp.netdev) 3577 free_netdev(nn->dp.netdev); 3578 else 3579 vfree(nn); 3580 } 3581 3582 /** 3583 * nfp_net_rss_key_sz() - Get current size of the RSS key 3584 * @nn: NFP Net device instance 3585 * 3586 * Return: size of the RSS key for currently selected hash function. 3587 */ 3588 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn) 3589 { 3590 switch (nn->rss_hfunc) { 3591 case ETH_RSS_HASH_TOP: 3592 return NFP_NET_CFG_RSS_KEY_SZ; 3593 case ETH_RSS_HASH_XOR: 3594 return 0; 3595 case ETH_RSS_HASH_CRC32: 3596 return 4; 3597 } 3598 3599 nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc); 3600 return 0; 3601 } 3602 3603 /** 3604 * nfp_net_rss_init() - Set the initial RSS parameters 3605 * @nn: NFP Net device to reconfigure 3606 */ 3607 static void nfp_net_rss_init(struct nfp_net *nn) 3608 { 3609 unsigned long func_bit, rss_cap_hfunc; 3610 u32 reg; 3611 3612 /* Read the RSS function capability and select first supported func */ 3613 reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP); 3614 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg); 3615 if (!rss_cap_hfunc) 3616 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, 3617 NFP_NET_CFG_RSS_TOEPLITZ); 3618 3619 func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS); 3620 if (func_bit == NFP_NET_CFG_RSS_HFUNCS) { 3621 dev_warn(nn->dp.dev, 3622 "Bad RSS config, defaulting to Toeplitz hash\n"); 3623 func_bit = ETH_RSS_HASH_TOP_BIT; 3624 } 3625 nn->rss_hfunc = 1 << func_bit; 3626 3627 netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn)); 3628 3629 nfp_net_rss_init_itbl(nn); 3630 3631 /* Enable IPv4/IPv6 TCP by default */ 3632 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP | 3633 NFP_NET_CFG_RSS_IPV6_TCP | 3634 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) | 3635 NFP_NET_CFG_RSS_MASK; 3636 } 3637 3638 /** 3639 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters 3640 * @nn: NFP Net device to reconfigure 3641 */ 3642 static void nfp_net_irqmod_init(struct nfp_net *nn) 3643 { 3644 nn->rx_coalesce_usecs = 50; 3645 nn->rx_coalesce_max_frames = 64; 3646 nn->tx_coalesce_usecs = 50; 3647 nn->tx_coalesce_max_frames = 64; 3648 } 3649 3650 static void nfp_net_netdev_init(struct nfp_net *nn) 3651 { 3652 struct net_device *netdev = nn->dp.netdev; 3653 3654 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); 3655 3656 netdev->mtu = nn->dp.mtu; 3657 3658 /* Advertise/enable offloads based on capabilities 3659 * 3660 * Note: netdev->features show the currently enabled features 3661 * and netdev->hw_features advertises which features are 3662 * supported. By default we enable most features. 3663 */ 3664 if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) 3665 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 3666 3667 netdev->hw_features = NETIF_F_HIGHDMA; 3668 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) { 3669 netdev->hw_features |= NETIF_F_RXCSUM; 3670 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY; 3671 } 3672 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) { 3673 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3674 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM; 3675 } 3676 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) { 3677 netdev->hw_features |= NETIF_F_SG; 3678 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER; 3679 } 3680 if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) || 3681 nn->cap & NFP_NET_CFG_CTRL_LSO2) { 3682 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 3683 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?: 3684 NFP_NET_CFG_CTRL_LSO; 3685 } 3686 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) 3687 netdev->hw_features |= NETIF_F_RXHASH; 3688 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && 3689 nn->cap & NFP_NET_CFG_CTRL_NVGRE) { 3690 if (nn->cap & NFP_NET_CFG_CTRL_LSO) 3691 netdev->hw_features |= NETIF_F_GSO_GRE | 3692 NETIF_F_GSO_UDP_TUNNEL; 3693 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; 3694 3695 netdev->hw_enc_features = netdev->hw_features; 3696 } 3697 3698 netdev->vlan_features = netdev->hw_features; 3699 3700 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) { 3701 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 3702 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN; 3703 } 3704 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) { 3705 if (nn->cap & NFP_NET_CFG_CTRL_LSO2) { 3706 nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n"); 3707 } else { 3708 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 3709 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN; 3710 } 3711 } 3712 if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) { 3713 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3714 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER; 3715 } 3716 3717 netdev->features = netdev->hw_features; 3718 3719 if (nfp_app_has_tc(nn->app)) 3720 netdev->hw_features |= NETIF_F_HW_TC; 3721 3722 /* Advertise but disable TSO by default. */ 3723 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 3724 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY; 3725 3726 /* Finalise the netdev setup */ 3727 netdev->netdev_ops = &nfp_net_netdev_ops; 3728 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000); 3729 3730 SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); 3731 3732 /* MTU range: 68 - hw-specific max */ 3733 netdev->min_mtu = ETH_MIN_MTU; 3734 netdev->max_mtu = nn->max_mtu; 3735 3736 netif_carrier_off(netdev); 3737 3738 nfp_net_set_ethtool_ops(netdev); 3739 } 3740 3741 /** 3742 * nfp_net_init() - Initialise/finalise the nfp_net structure 3743 * @nn: NFP Net device structure 3744 * 3745 * Return: 0 on success or negative errno on error. 3746 */ 3747 int nfp_net_init(struct nfp_net *nn) 3748 { 3749 int err; 3750 3751 nn->dp.rx_dma_dir = DMA_FROM_DEVICE; 3752 3753 /* Get some of the read-only fields from the BAR */ 3754 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); 3755 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); 3756 3757 /* ABI 4.x and ctrl vNIC always use chained metadata, in other cases 3758 * we allow use of non-chained metadata if RSS(v1) is the only 3759 * advertised capability requiring metadata. 3760 */ 3761 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 || 3762 !nn->dp.netdev || 3763 !(nn->cap & NFP_NET_CFG_CTRL_RSS) || 3764 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META; 3765 /* RSS(v1) uses non-chained metadata format, except in ABI 4.x where 3766 * it has the same meaning as RSSv2. 3767 */ 3768 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4) 3769 nn->cap &= ~NFP_NET_CFG_CTRL_RSS; 3770 3771 /* Determine RX packet/metadata boundary offset */ 3772 if (nn->fw_ver.major >= 2) { 3773 u32 reg; 3774 3775 reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET); 3776 if (reg > NFP_NET_MAX_PREPEND) { 3777 nn_err(nn, "Invalid rx offset: %d\n", reg); 3778 return -EINVAL; 3779 } 3780 nn->dp.rx_offset = reg; 3781 } else { 3782 nn->dp.rx_offset = NFP_NET_RX_OFFSET; 3783 } 3784 3785 /* Set default MTU and Freelist buffer size */ 3786 if (nn->max_mtu < NFP_NET_DEFAULT_MTU) 3787 nn->dp.mtu = nn->max_mtu; 3788 else 3789 nn->dp.mtu = NFP_NET_DEFAULT_MTU; 3790 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); 3791 3792 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) { 3793 nfp_net_rss_init(nn); 3794 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?: 3795 NFP_NET_CFG_CTRL_RSS; 3796 } 3797 3798 /* Allow L2 Broadcast and Multicast through by default, if supported */ 3799 if (nn->cap & NFP_NET_CFG_CTRL_L2BC) 3800 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC; 3801 3802 /* Allow IRQ moderation, if supported */ 3803 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { 3804 nfp_net_irqmod_init(nn); 3805 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; 3806 } 3807 3808 if (nn->dp.netdev) 3809 nfp_net_netdev_init(nn); 3810 3811 /* Stash the re-configuration queue away. First odd queue in TX Bar */ 3812 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; 3813 3814 /* Make sure the FW knows the netdev is supposed to be disabled here */ 3815 nn_writel(nn, NFP_NET_CFG_CTRL, 0); 3816 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0); 3817 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0); 3818 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING | 3819 NFP_NET_CFG_UPDATE_GEN); 3820 if (err) 3821 return err; 3822 3823 nfp_net_vecs_init(nn); 3824 3825 if (!nn->dp.netdev) 3826 return 0; 3827 return register_netdev(nn->dp.netdev); 3828 } 3829 3830 /** 3831 * nfp_net_clean() - Undo what nfp_net_init() did. 3832 * @nn: NFP Net device structure 3833 */ 3834 void nfp_net_clean(struct nfp_net *nn) 3835 { 3836 if (!nn->dp.netdev) 3837 return; 3838 3839 unregister_netdev(nn->dp.netdev); 3840 } 3841