1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 3 #include <linux/bpf.h> 4 #include <linux/crash_dump.h> 5 #include <linux/etherdevice.h> 6 #include <linux/ethtool.h> 7 #include <linux/filter.h> 8 #include <linux/idr.h> 9 #include <linux/if_vlan.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/pci.h> 13 #include <linux/rtnetlink.h> 14 #include <linux/inetdevice.h> 15 16 #include "funeth.h" 17 #include "funeth_devlink.h" 18 #include "funeth_ktls.h" 19 #include "fun_port.h" 20 #include "fun_queue.h" 21 #include "funeth_txrx.h" 22 23 #define ADMIN_SQ_DEPTH 32 24 #define ADMIN_CQ_DEPTH 64 25 #define ADMIN_RQ_DEPTH 16 26 27 /* Default number of Tx/Rx queues. */ 28 #define FUN_DFLT_QUEUES 16U 29 30 enum { 31 FUN_SERV_RES_CHANGE = FUN_SERV_FIRST_AVAIL, 32 FUN_SERV_DEL_PORTS, 33 }; 34 35 static const struct pci_device_id funeth_id_table[] = { 36 { PCI_VDEVICE(FUNGIBLE, 0x0101) }, 37 { PCI_VDEVICE(FUNGIBLE, 0x0181) }, 38 { 0, } 39 }; 40 41 /* Issue a port write admin command with @n key/value pairs. */ 42 static int fun_port_write_cmds(struct funeth_priv *fp, unsigned int n, 43 const int *keys, const u64 *data) 44 { 45 unsigned int cmd_size, i; 46 union { 47 struct fun_admin_port_req req; 48 struct fun_admin_port_rsp rsp; 49 u8 v[ADMIN_SQE_SIZE]; 50 } cmd; 51 52 cmd_size = offsetof(struct fun_admin_port_req, u.write.write48) + 53 n * sizeof(struct fun_admin_write48_req); 54 if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN) 55 return -EINVAL; 56 57 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT, 58 cmd_size); 59 cmd.req.u.write = 60 FUN_ADMIN_PORT_WRITE_REQ_INIT(FUN_ADMIN_SUBOP_WRITE, 0, 61 fp->netdev->dev_port); 62 for (i = 0; i < n; i++) 63 cmd.req.u.write.write48[i] = 64 FUN_ADMIN_WRITE48_REQ_INIT(keys[i], data[i]); 65 66 return fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, 67 &cmd.rsp, cmd_size, 0); 68 } 69 70 int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data) 71 { 72 return fun_port_write_cmds(fp, 1, &key, &data); 73 } 74 75 /* Issue a port read admin command with @n key/value pairs. */ 76 static int fun_port_read_cmds(struct funeth_priv *fp, unsigned int n, 77 const int *keys, u64 *data) 78 { 79 const struct fun_admin_read48_rsp *r48rsp; 80 unsigned int cmd_size, i; 81 int rc; 82 union { 83 struct fun_admin_port_req req; 84 struct fun_admin_port_rsp rsp; 85 u8 v[ADMIN_SQE_SIZE]; 86 } cmd; 87 88 cmd_size = offsetof(struct fun_admin_port_req, u.read.read48) + 89 n * sizeof(struct fun_admin_read48_req); 90 if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN) 91 return -EINVAL; 92 93 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT, 94 cmd_size); 95 cmd.req.u.read = 96 FUN_ADMIN_PORT_READ_REQ_INIT(FUN_ADMIN_SUBOP_READ, 0, 97 fp->netdev->dev_port); 98 for (i = 0; i < n; i++) 99 cmd.req.u.read.read48[i] = FUN_ADMIN_READ48_REQ_INIT(keys[i]); 100 101 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, 102 &cmd.rsp, cmd_size, 0); 103 if (rc) 104 return rc; 105 106 for (r48rsp = cmd.rsp.u.read.read48, i = 0; i < n; i++, r48rsp++) { 107 data[i] = FUN_ADMIN_READ48_RSP_DATA_G(r48rsp->key_to_data); 108 dev_dbg(fp->fdev->dev, 109 "port_read_rsp lport=%u (key_to_data=0x%llx) key=%d data:%lld retval:%lld", 110 fp->lport, r48rsp->key_to_data, keys[i], data[i], 111 FUN_ADMIN_READ48_RSP_RET_G(r48rsp->key_to_data)); 112 } 113 return 0; 114 } 115 116 int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data) 117 { 118 return fun_port_read_cmds(fp, 1, &key, data); 119 } 120 121 static void fun_report_link(struct net_device *netdev) 122 { 123 if (netif_carrier_ok(netdev)) { 124 const struct funeth_priv *fp = netdev_priv(netdev); 125 const char *fec = "", *pause = ""; 126 int speed = fp->link_speed; 127 char unit = 'M'; 128 129 if (fp->link_speed >= SPEED_1000) { 130 speed /= 1000; 131 unit = 'G'; 132 } 133 134 if (fp->active_fec & FUN_PORT_FEC_RS) 135 fec = ", RS-FEC"; 136 else if (fp->active_fec & FUN_PORT_FEC_FC) 137 fec = ", BASER-FEC"; 138 139 if ((fp->active_fc & FUN_PORT_CAP_PAUSE_MASK) == FUN_PORT_CAP_PAUSE_MASK) 140 pause = ", Tx/Rx PAUSE"; 141 else if (fp->active_fc & FUN_PORT_CAP_RX_PAUSE) 142 pause = ", Rx PAUSE"; 143 else if (fp->active_fc & FUN_PORT_CAP_TX_PAUSE) 144 pause = ", Tx PAUSE"; 145 146 netdev_info(netdev, "Link up at %d %cb/s full-duplex%s%s\n", 147 speed, unit, pause, fec); 148 } else { 149 netdev_info(netdev, "Link down\n"); 150 } 151 } 152 153 static int fun_adi_write(struct fun_dev *fdev, enum fun_admin_adi_attr attr, 154 unsigned int adi_id, const struct fun_adi_param *param) 155 { 156 struct fun_admin_adi_req req = { 157 .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ADI, 158 sizeof(req)), 159 .u.write.subop = FUN_ADMIN_SUBOP_WRITE, 160 .u.write.attribute = attr, 161 .u.write.id = cpu_to_be32(adi_id), 162 .u.write.param = *param 163 }; 164 165 return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0); 166 } 167 168 /* Configure RSS for the given port. @op determines whether a new RSS context 169 * is to be created or whether an existing one should be reconfigured. The 170 * remaining parameters specify the hashing algorithm, key, and indirection 171 * table. 172 * 173 * This initiates packet delivery to the Rx queues set in the indirection 174 * table. 175 */ 176 int fun_config_rss(struct net_device *dev, int algo, const u8 *key, 177 const u32 *qtable, u8 op) 178 { 179 struct funeth_priv *fp = netdev_priv(dev); 180 unsigned int table_len = fp->indir_table_nentries; 181 unsigned int len = FUN_ETH_RSS_MAX_KEY_SIZE + sizeof(u32) * table_len; 182 struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs); 183 union { 184 struct { 185 struct fun_admin_rss_req req; 186 struct fun_dataop_gl gl; 187 }; 188 struct fun_admin_generic_create_rsp rsp; 189 } cmd; 190 __be32 *indir_tab; 191 u16 flags; 192 int rc; 193 194 if (op != FUN_ADMIN_SUBOP_CREATE && fp->rss_hw_id == FUN_HCI_ID_INVALID) 195 return -EINVAL; 196 197 flags = op == FUN_ADMIN_SUBOP_CREATE ? 198 FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR : 0; 199 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_RSS, 200 sizeof(cmd)); 201 cmd.req.u.create = 202 FUN_ADMIN_RSS_CREATE_REQ_INIT(op, flags, fp->rss_hw_id, 203 dev->dev_port, algo, 204 FUN_ETH_RSS_MAX_KEY_SIZE, 205 table_len, 0, 206 FUN_ETH_RSS_MAX_KEY_SIZE); 207 cmd.req.u.create.dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len); 208 fun_dataop_gl_init(&cmd.gl, 0, 0, len, fp->rss_dma_addr); 209 210 /* write the key and indirection table into the RSS DMA area */ 211 memcpy(fp->rss_cfg, key, FUN_ETH_RSS_MAX_KEY_SIZE); 212 indir_tab = fp->rss_cfg + FUN_ETH_RSS_MAX_KEY_SIZE; 213 for (rc = 0; rc < table_len; rc++) 214 *indir_tab++ = cpu_to_be32(rxqs[*qtable++]->hw_cqid); 215 216 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, 217 &cmd.rsp, sizeof(cmd.rsp), 0); 218 if (!rc && op == FUN_ADMIN_SUBOP_CREATE) 219 fp->rss_hw_id = be32_to_cpu(cmd.rsp.id); 220 return rc; 221 } 222 223 /* Destroy the HW RSS conntext associated with the given port. This also stops 224 * all packet delivery to our Rx queues. 225 */ 226 static void fun_destroy_rss(struct funeth_priv *fp) 227 { 228 if (fp->rss_hw_id != FUN_HCI_ID_INVALID) { 229 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_RSS, 0, fp->rss_hw_id); 230 fp->rss_hw_id = FUN_HCI_ID_INVALID; 231 } 232 } 233 234 static void fun_irq_aff_notify(struct irq_affinity_notify *notify, 235 const cpumask_t *mask) 236 { 237 struct fun_irq *p = container_of(notify, struct fun_irq, aff_notify); 238 239 cpumask_copy(&p->affinity_mask, mask); 240 } 241 242 static void fun_irq_aff_release(struct kref __always_unused *ref) 243 { 244 } 245 246 /* Allocate an IRQ structure, assign an MSI-X index and initial affinity to it, 247 * and add it to the IRQ XArray. 248 */ 249 static struct fun_irq *fun_alloc_qirq(struct funeth_priv *fp, unsigned int idx, 250 int node, unsigned int xa_idx_offset) 251 { 252 struct fun_irq *irq; 253 int cpu, res; 254 255 cpu = cpumask_local_spread(idx, node); 256 node = cpu_to_mem(cpu); 257 258 irq = kzalloc_node(sizeof(*irq), GFP_KERNEL, node); 259 if (!irq) 260 return ERR_PTR(-ENOMEM); 261 262 res = fun_reserve_irqs(fp->fdev, 1, &irq->irq_idx); 263 if (res != 1) 264 goto free_irq; 265 266 res = xa_insert(&fp->irqs, idx + xa_idx_offset, irq, GFP_KERNEL); 267 if (res) 268 goto release_irq; 269 270 irq->irq = pci_irq_vector(fp->pdev, irq->irq_idx); 271 cpumask_set_cpu(cpu, &irq->affinity_mask); 272 irq->aff_notify.notify = fun_irq_aff_notify; 273 irq->aff_notify.release = fun_irq_aff_release; 274 irq->state = FUN_IRQ_INIT; 275 return irq; 276 277 release_irq: 278 fun_release_irqs(fp->fdev, 1, &irq->irq_idx); 279 free_irq: 280 kfree(irq); 281 return ERR_PTR(res); 282 } 283 284 static void fun_free_qirq(struct funeth_priv *fp, struct fun_irq *irq) 285 { 286 netif_napi_del(&irq->napi); 287 fun_release_irqs(fp->fdev, 1, &irq->irq_idx); 288 kfree(irq); 289 } 290 291 /* Release the IRQs reserved for Tx/Rx queues that aren't being used. */ 292 static void fun_prune_queue_irqs(struct net_device *dev) 293 { 294 struct funeth_priv *fp = netdev_priv(dev); 295 unsigned int nreleased = 0; 296 struct fun_irq *irq; 297 unsigned long idx; 298 299 xa_for_each(&fp->irqs, idx, irq) { 300 if (irq->txq || irq->rxq) /* skip those in use */ 301 continue; 302 303 xa_erase(&fp->irqs, idx); 304 fun_free_qirq(fp, irq); 305 nreleased++; 306 if (idx < fp->rx_irq_ofst) 307 fp->num_tx_irqs--; 308 else 309 fp->num_rx_irqs--; 310 } 311 netif_info(fp, intr, dev, "Released %u queue IRQs\n", nreleased); 312 } 313 314 /* Reserve IRQs, one per queue, to acommodate the requested queue numbers @ntx 315 * and @nrx. IRQs are added incrementally to those we already have. 316 * We hold on to allocated IRQs until garbage collection of unused IRQs is 317 * separately requested. 318 */ 319 static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx, 320 unsigned int nrx) 321 { 322 struct funeth_priv *fp = netdev_priv(dev); 323 int node = dev_to_node(&fp->pdev->dev); 324 struct fun_irq *irq; 325 unsigned int i; 326 327 for (i = fp->num_tx_irqs; i < ntx; i++) { 328 irq = fun_alloc_qirq(fp, i, node, 0); 329 if (IS_ERR(irq)) 330 return PTR_ERR(irq); 331 332 fp->num_tx_irqs++; 333 netif_napi_add_tx(dev, &irq->napi, fun_txq_napi_poll); 334 } 335 336 for (i = fp->num_rx_irqs; i < nrx; i++) { 337 irq = fun_alloc_qirq(fp, i, node, fp->rx_irq_ofst); 338 if (IS_ERR(irq)) 339 return PTR_ERR(irq); 340 341 fp->num_rx_irqs++; 342 netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll); 343 } 344 345 netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n", 346 ntx, nrx); 347 return 0; 348 } 349 350 static void free_txqs(struct funeth_txq **txqs, unsigned int nqs, 351 unsigned int start, int state) 352 { 353 unsigned int i; 354 355 for (i = start; i < nqs && txqs[i]; i++) 356 txqs[i] = funeth_txq_free(txqs[i], state); 357 } 358 359 static int alloc_txqs(struct net_device *dev, struct funeth_txq **txqs, 360 unsigned int nqs, unsigned int depth, unsigned int start, 361 int state) 362 { 363 struct funeth_priv *fp = netdev_priv(dev); 364 unsigned int i; 365 int err; 366 367 for (i = start; i < nqs; i++) { 368 err = funeth_txq_create(dev, i, depth, xa_load(&fp->irqs, i), 369 state, &txqs[i]); 370 if (err) { 371 free_txqs(txqs, nqs, start, FUN_QSTATE_DESTROYED); 372 return err; 373 } 374 } 375 return 0; 376 } 377 378 static void free_rxqs(struct funeth_rxq **rxqs, unsigned int nqs, 379 unsigned int start, int state) 380 { 381 unsigned int i; 382 383 for (i = start; i < nqs && rxqs[i]; i++) 384 rxqs[i] = funeth_rxq_free(rxqs[i], state); 385 } 386 387 static int alloc_rxqs(struct net_device *dev, struct funeth_rxq **rxqs, 388 unsigned int nqs, unsigned int ncqe, unsigned int nrqe, 389 unsigned int start, int state) 390 { 391 struct funeth_priv *fp = netdev_priv(dev); 392 unsigned int i; 393 int err; 394 395 for (i = start; i < nqs; i++) { 396 err = funeth_rxq_create(dev, i, ncqe, nrqe, 397 xa_load(&fp->irqs, i + fp->rx_irq_ofst), 398 state, &rxqs[i]); 399 if (err) { 400 free_rxqs(rxqs, nqs, start, FUN_QSTATE_DESTROYED); 401 return err; 402 } 403 } 404 return 0; 405 } 406 407 static void free_xdpqs(struct funeth_txq **xdpqs, unsigned int nqs, 408 unsigned int start, int state) 409 { 410 unsigned int i; 411 412 for (i = start; i < nqs && xdpqs[i]; i++) 413 xdpqs[i] = funeth_txq_free(xdpqs[i], state); 414 415 if (state == FUN_QSTATE_DESTROYED) 416 kfree(xdpqs); 417 } 418 419 static struct funeth_txq **alloc_xdpqs(struct net_device *dev, unsigned int nqs, 420 unsigned int depth, unsigned int start, 421 int state) 422 { 423 struct funeth_txq **xdpqs; 424 unsigned int i; 425 int err; 426 427 xdpqs = kcalloc(nqs, sizeof(*xdpqs), GFP_KERNEL); 428 if (!xdpqs) 429 return ERR_PTR(-ENOMEM); 430 431 for (i = start; i < nqs; i++) { 432 err = funeth_txq_create(dev, i, depth, NULL, state, &xdpqs[i]); 433 if (err) { 434 free_xdpqs(xdpqs, nqs, start, FUN_QSTATE_DESTROYED); 435 return ERR_PTR(err); 436 } 437 } 438 return xdpqs; 439 } 440 441 static void fun_free_rings(struct net_device *netdev, struct fun_qset *qset) 442 { 443 struct funeth_priv *fp = netdev_priv(netdev); 444 struct funeth_txq **xdpqs = qset->xdpqs; 445 struct funeth_rxq **rxqs = qset->rxqs; 446 447 /* qset may not specify any queues to operate on. In that case the 448 * currently installed queues are implied. 449 */ 450 if (!rxqs) { 451 rxqs = rtnl_dereference(fp->rxqs); 452 xdpqs = rtnl_dereference(fp->xdpqs); 453 qset->txqs = fp->txqs; 454 qset->nrxqs = netdev->real_num_rx_queues; 455 qset->ntxqs = netdev->real_num_tx_queues; 456 qset->nxdpqs = fp->num_xdpqs; 457 } 458 if (!rxqs) 459 return; 460 461 if (rxqs == rtnl_dereference(fp->rxqs)) { 462 rcu_assign_pointer(fp->rxqs, NULL); 463 rcu_assign_pointer(fp->xdpqs, NULL); 464 synchronize_net(); 465 fp->txqs = NULL; 466 } 467 468 free_rxqs(rxqs, qset->nrxqs, qset->rxq_start, qset->state); 469 free_txqs(qset->txqs, qset->ntxqs, qset->txq_start, qset->state); 470 free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, qset->state); 471 if (qset->state == FUN_QSTATE_DESTROYED) 472 kfree(rxqs); 473 474 /* Tell the caller which queues were operated on. */ 475 qset->rxqs = rxqs; 476 qset->xdpqs = xdpqs; 477 } 478 479 static int fun_alloc_rings(struct net_device *netdev, struct fun_qset *qset) 480 { 481 struct funeth_txq **xdpqs = NULL, **txqs; 482 struct funeth_rxq **rxqs; 483 int err; 484 485 err = fun_alloc_queue_irqs(netdev, qset->ntxqs, qset->nrxqs); 486 if (err) 487 return err; 488 489 rxqs = kcalloc(qset->ntxqs + qset->nrxqs, sizeof(*rxqs), GFP_KERNEL); 490 if (!rxqs) 491 return -ENOMEM; 492 493 if (qset->nxdpqs) { 494 xdpqs = alloc_xdpqs(netdev, qset->nxdpqs, qset->sq_depth, 495 qset->xdpq_start, qset->state); 496 if (IS_ERR(xdpqs)) { 497 err = PTR_ERR(xdpqs); 498 goto free_qvec; 499 } 500 } 501 502 txqs = (struct funeth_txq **)&rxqs[qset->nrxqs]; 503 err = alloc_txqs(netdev, txqs, qset->ntxqs, qset->sq_depth, 504 qset->txq_start, qset->state); 505 if (err) 506 goto free_xdpqs; 507 508 err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth, 509 qset->rq_depth, qset->rxq_start, qset->state); 510 if (err) 511 goto free_txqs; 512 513 qset->rxqs = rxqs; 514 qset->txqs = txqs; 515 qset->xdpqs = xdpqs; 516 return 0; 517 518 free_txqs: 519 free_txqs(txqs, qset->ntxqs, qset->txq_start, FUN_QSTATE_DESTROYED); 520 free_xdpqs: 521 free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, FUN_QSTATE_DESTROYED); 522 free_qvec: 523 kfree(rxqs); 524 return err; 525 } 526 527 /* Take queues to the next level. Presently this means creating them on the 528 * device. 529 */ 530 static int fun_advance_ring_state(struct net_device *dev, struct fun_qset *qset) 531 { 532 struct funeth_priv *fp = netdev_priv(dev); 533 int i, err; 534 535 for (i = 0; i < qset->nrxqs; i++) { 536 err = fun_rxq_create_dev(qset->rxqs[i], 537 xa_load(&fp->irqs, 538 i + fp->rx_irq_ofst)); 539 if (err) 540 goto out; 541 } 542 543 for (i = 0; i < qset->ntxqs; i++) { 544 err = fun_txq_create_dev(qset->txqs[i], xa_load(&fp->irqs, i)); 545 if (err) 546 goto out; 547 } 548 549 for (i = 0; i < qset->nxdpqs; i++) { 550 err = fun_txq_create_dev(qset->xdpqs[i], NULL); 551 if (err) 552 goto out; 553 } 554 555 return 0; 556 557 out: 558 fun_free_rings(dev, qset); 559 return err; 560 } 561 562 static int fun_port_create(struct net_device *netdev) 563 { 564 struct funeth_priv *fp = netdev_priv(netdev); 565 union { 566 struct fun_admin_port_req req; 567 struct fun_admin_port_rsp rsp; 568 } cmd; 569 int rc; 570 571 if (fp->lport != INVALID_LPORT) 572 return 0; 573 574 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT, 575 sizeof(cmd.req)); 576 cmd.req.u.create = 577 FUN_ADMIN_PORT_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, 0, 578 netdev->dev_port); 579 580 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, 581 sizeof(cmd.rsp), 0); 582 583 if (!rc) 584 fp->lport = be16_to_cpu(cmd.rsp.u.create.lport); 585 return rc; 586 } 587 588 static int fun_port_destroy(struct net_device *netdev) 589 { 590 struct funeth_priv *fp = netdev_priv(netdev); 591 592 if (fp->lport == INVALID_LPORT) 593 return 0; 594 595 fp->lport = INVALID_LPORT; 596 return fun_res_destroy(fp->fdev, FUN_ADMIN_OP_PORT, 0, 597 netdev->dev_port); 598 } 599 600 static int fun_eth_create(struct funeth_priv *fp) 601 { 602 union { 603 struct fun_admin_eth_req req; 604 struct fun_admin_generic_create_rsp rsp; 605 } cmd; 606 int rc; 607 608 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ETH, 609 sizeof(cmd.req)); 610 cmd.req.u.create = FUN_ADMIN_ETH_CREATE_REQ_INIT( 611 FUN_ADMIN_SUBOP_CREATE, 612 FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 613 0, fp->netdev->dev_port); 614 615 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, 616 sizeof(cmd.rsp), 0); 617 return rc ? rc : be32_to_cpu(cmd.rsp.id); 618 } 619 620 static int fun_vi_create(struct funeth_priv *fp) 621 { 622 struct fun_admin_vi_req req = { 623 .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_VI, 624 sizeof(req)), 625 .u.create = FUN_ADMIN_VI_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, 626 0, 627 fp->netdev->dev_port, 628 fp->netdev->dev_port) 629 }; 630 631 return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0); 632 } 633 634 /* Helper to create an ETH flow and bind an SQ to it. 635 * Returns the ETH id (>= 0) on success or a negative error. 636 */ 637 int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid) 638 { 639 int rc, ethid; 640 641 ethid = fun_eth_create(fp); 642 if (ethid >= 0) { 643 rc = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_EPSQ, sqid, 644 FUN_ADMIN_BIND_TYPE_ETH, ethid); 645 if (rc) { 646 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, ethid); 647 ethid = rc; 648 } 649 } 650 return ethid; 651 } 652 653 static irqreturn_t fun_queue_irq_handler(int irq, void *data) 654 { 655 struct fun_irq *p = data; 656 657 if (p->rxq) { 658 prefetch(p->rxq->next_cqe_info); 659 p->rxq->irq_cnt++; 660 } 661 napi_schedule_irqoff(&p->napi); 662 return IRQ_HANDLED; 663 } 664 665 static int fun_enable_irqs(struct net_device *dev) 666 { 667 struct funeth_priv *fp = netdev_priv(dev); 668 unsigned long idx, last; 669 unsigned int qidx; 670 struct fun_irq *p; 671 const char *qtype; 672 int err; 673 674 xa_for_each(&fp->irqs, idx, p) { 675 if (p->txq) { 676 qtype = "tx"; 677 qidx = p->txq->qidx; 678 } else if (p->rxq) { 679 qtype = "rx"; 680 qidx = p->rxq->qidx; 681 } else { 682 continue; 683 } 684 685 if (p->state != FUN_IRQ_INIT) 686 continue; 687 688 snprintf(p->name, sizeof(p->name) - 1, "%s-%s-%u", dev->name, 689 qtype, qidx); 690 err = request_irq(p->irq, fun_queue_irq_handler, 0, p->name, p); 691 if (err) { 692 netdev_err(dev, "Failed to allocate IRQ %u, err %d\n", 693 p->irq, err); 694 goto unroll; 695 } 696 p->state = FUN_IRQ_REQUESTED; 697 } 698 699 xa_for_each(&fp->irqs, idx, p) { 700 if (p->state != FUN_IRQ_REQUESTED) 701 continue; 702 irq_set_affinity_notifier(p->irq, &p->aff_notify); 703 irq_set_affinity_and_hint(p->irq, &p->affinity_mask); 704 napi_enable(&p->napi); 705 p->state = FUN_IRQ_ENABLED; 706 } 707 708 return 0; 709 710 unroll: 711 last = idx - 1; 712 xa_for_each_range(&fp->irqs, idx, p, 0, last) 713 if (p->state == FUN_IRQ_REQUESTED) { 714 free_irq(p->irq, p); 715 p->state = FUN_IRQ_INIT; 716 } 717 718 return err; 719 } 720 721 static void fun_disable_one_irq(struct fun_irq *irq) 722 { 723 napi_disable(&irq->napi); 724 irq_set_affinity_notifier(irq->irq, NULL); 725 irq_update_affinity_hint(irq->irq, NULL); 726 free_irq(irq->irq, irq); 727 irq->state = FUN_IRQ_INIT; 728 } 729 730 static void fun_disable_irqs(struct net_device *dev) 731 { 732 struct funeth_priv *fp = netdev_priv(dev); 733 struct fun_irq *p; 734 unsigned long idx; 735 736 xa_for_each(&fp->irqs, idx, p) 737 if (p->state == FUN_IRQ_ENABLED) 738 fun_disable_one_irq(p); 739 } 740 741 static void fun_down(struct net_device *dev, struct fun_qset *qset) 742 { 743 struct funeth_priv *fp = netdev_priv(dev); 744 745 /* If we don't have queues the data path is already down. 746 * Note netif_running(dev) may be true. 747 */ 748 if (!rcu_access_pointer(fp->rxqs)) 749 return; 750 751 /* It is also down if the queues aren't on the device. */ 752 if (fp->txqs[0]->init_state >= FUN_QSTATE_INIT_FULL) { 753 netif_info(fp, ifdown, dev, 754 "Tearing down data path on device\n"); 755 fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_DISABLE, 0); 756 757 netif_carrier_off(dev); 758 netif_tx_disable(dev); 759 760 fun_destroy_rss(fp); 761 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port); 762 fun_disable_irqs(dev); 763 } 764 765 fun_free_rings(dev, qset); 766 } 767 768 static int fun_up(struct net_device *dev, struct fun_qset *qset) 769 { 770 static const int port_keys[] = { 771 FUN_ADMIN_PORT_KEY_STATS_DMA_LOW, 772 FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH, 773 FUN_ADMIN_PORT_KEY_ENABLE 774 }; 775 776 struct funeth_priv *fp = netdev_priv(dev); 777 u64 vals[] = { 778 lower_32_bits(fp->stats_dma_addr), 779 upper_32_bits(fp->stats_dma_addr), 780 FUN_PORT_FLAG_ENABLE_NOTIFY 781 }; 782 int err; 783 784 netif_info(fp, ifup, dev, "Setting up data path on device\n"); 785 786 if (qset->rxqs[0]->init_state < FUN_QSTATE_INIT_FULL) { 787 err = fun_advance_ring_state(dev, qset); 788 if (err) 789 return err; 790 } 791 792 err = fun_vi_create(fp); 793 if (err) 794 goto free_queues; 795 796 fp->txqs = qset->txqs; 797 rcu_assign_pointer(fp->rxqs, qset->rxqs); 798 rcu_assign_pointer(fp->xdpqs, qset->xdpqs); 799 800 err = fun_enable_irqs(dev); 801 if (err) 802 goto destroy_vi; 803 804 if (fp->rss_cfg) { 805 err = fun_config_rss(dev, fp->hash_algo, fp->rss_key, 806 fp->indir_table, FUN_ADMIN_SUBOP_CREATE); 807 } else { 808 /* The non-RSS case has only 1 queue. */ 809 err = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_VI, dev->dev_port, 810 FUN_ADMIN_BIND_TYPE_EPCQ, 811 qset->rxqs[0]->hw_cqid); 812 } 813 if (err) 814 goto disable_irqs; 815 816 err = fun_port_write_cmds(fp, 3, port_keys, vals); 817 if (err) 818 goto free_rss; 819 820 netif_tx_start_all_queues(dev); 821 return 0; 822 823 free_rss: 824 fun_destroy_rss(fp); 825 disable_irqs: 826 fun_disable_irqs(dev); 827 destroy_vi: 828 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port); 829 free_queues: 830 fun_free_rings(dev, qset); 831 return err; 832 } 833 834 static int funeth_open(struct net_device *netdev) 835 { 836 struct funeth_priv *fp = netdev_priv(netdev); 837 struct fun_qset qset = { 838 .nrxqs = netdev->real_num_rx_queues, 839 .ntxqs = netdev->real_num_tx_queues, 840 .nxdpqs = fp->num_xdpqs, 841 .cq_depth = fp->cq_depth, 842 .rq_depth = fp->rq_depth, 843 .sq_depth = fp->sq_depth, 844 .state = FUN_QSTATE_INIT_FULL, 845 }; 846 int rc; 847 848 rc = fun_alloc_rings(netdev, &qset); 849 if (rc) 850 return rc; 851 852 rc = fun_up(netdev, &qset); 853 if (rc) { 854 qset.state = FUN_QSTATE_DESTROYED; 855 fun_free_rings(netdev, &qset); 856 } 857 858 return rc; 859 } 860 861 static int funeth_close(struct net_device *netdev) 862 { 863 struct fun_qset qset = { .state = FUN_QSTATE_DESTROYED }; 864 865 fun_down(netdev, &qset); 866 return 0; 867 } 868 869 static void fun_get_stats64(struct net_device *netdev, 870 struct rtnl_link_stats64 *stats) 871 { 872 struct funeth_priv *fp = netdev_priv(netdev); 873 struct funeth_txq **xdpqs; 874 struct funeth_rxq **rxqs; 875 unsigned int i, start; 876 877 stats->tx_packets = fp->tx_packets; 878 stats->tx_bytes = fp->tx_bytes; 879 stats->tx_dropped = fp->tx_dropped; 880 881 stats->rx_packets = fp->rx_packets; 882 stats->rx_bytes = fp->rx_bytes; 883 stats->rx_dropped = fp->rx_dropped; 884 885 rcu_read_lock(); 886 rxqs = rcu_dereference(fp->rxqs); 887 if (!rxqs) 888 goto unlock; 889 890 for (i = 0; i < netdev->real_num_tx_queues; i++) { 891 struct funeth_txq_stats txs; 892 893 FUN_QSTAT_READ(fp->txqs[i], start, txs); 894 stats->tx_packets += txs.tx_pkts; 895 stats->tx_bytes += txs.tx_bytes; 896 stats->tx_dropped += txs.tx_map_err; 897 } 898 899 for (i = 0; i < netdev->real_num_rx_queues; i++) { 900 struct funeth_rxq_stats rxs; 901 902 FUN_QSTAT_READ(rxqs[i], start, rxs); 903 stats->rx_packets += rxs.rx_pkts; 904 stats->rx_bytes += rxs.rx_bytes; 905 stats->rx_dropped += rxs.rx_map_err + rxs.rx_mem_drops; 906 } 907 908 xdpqs = rcu_dereference(fp->xdpqs); 909 if (!xdpqs) 910 goto unlock; 911 912 for (i = 0; i < fp->num_xdpqs; i++) { 913 struct funeth_txq_stats txs; 914 915 FUN_QSTAT_READ(xdpqs[i], start, txs); 916 stats->tx_packets += txs.tx_pkts; 917 stats->tx_bytes += txs.tx_bytes; 918 } 919 unlock: 920 rcu_read_unlock(); 921 } 922 923 static int fun_change_mtu(struct net_device *netdev, int new_mtu) 924 { 925 struct funeth_priv *fp = netdev_priv(netdev); 926 int rc; 927 928 rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu); 929 if (!rc) 930 netdev->mtu = new_mtu; 931 return rc; 932 } 933 934 static int fun_set_macaddr(struct net_device *netdev, void *addr) 935 { 936 struct funeth_priv *fp = netdev_priv(netdev); 937 struct sockaddr *saddr = addr; 938 int rc; 939 940 if (!is_valid_ether_addr(saddr->sa_data)) 941 return -EADDRNOTAVAIL; 942 943 if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) 944 return 0; 945 946 rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR, 947 ether_addr_to_u64(saddr->sa_data)); 948 if (!rc) 949 eth_hw_addr_set(netdev, saddr->sa_data); 950 return rc; 951 } 952 953 static int fun_get_port_attributes(struct net_device *netdev) 954 { 955 static const int keys[] = { 956 FUN_ADMIN_PORT_KEY_MACADDR, FUN_ADMIN_PORT_KEY_CAPABILITIES, 957 FUN_ADMIN_PORT_KEY_ADVERT, FUN_ADMIN_PORT_KEY_MTU 958 }; 959 static const int phys_keys[] = { 960 FUN_ADMIN_PORT_KEY_LANE_ATTRS, 961 }; 962 963 struct funeth_priv *fp = netdev_priv(netdev); 964 u64 data[ARRAY_SIZE(keys)]; 965 u8 mac[ETH_ALEN]; 966 int i, rc; 967 968 rc = fun_port_read_cmds(fp, ARRAY_SIZE(keys), keys, data); 969 if (rc) 970 return rc; 971 972 for (i = 0; i < ARRAY_SIZE(keys); i++) { 973 switch (keys[i]) { 974 case FUN_ADMIN_PORT_KEY_MACADDR: 975 u64_to_ether_addr(data[i], mac); 976 if (is_zero_ether_addr(mac)) { 977 eth_hw_addr_random(netdev); 978 } else if (is_valid_ether_addr(mac)) { 979 eth_hw_addr_set(netdev, mac); 980 } else { 981 netdev_err(netdev, 982 "device provided a bad MAC address %pM\n", 983 mac); 984 return -EINVAL; 985 } 986 break; 987 988 case FUN_ADMIN_PORT_KEY_CAPABILITIES: 989 fp->port_caps = data[i]; 990 break; 991 992 case FUN_ADMIN_PORT_KEY_ADVERT: 993 fp->advertising = data[i]; 994 break; 995 996 case FUN_ADMIN_PORT_KEY_MTU: 997 netdev->mtu = data[i]; 998 break; 999 } 1000 } 1001 1002 if (!(fp->port_caps & FUN_PORT_CAP_VPORT)) { 1003 rc = fun_port_read_cmds(fp, ARRAY_SIZE(phys_keys), phys_keys, 1004 data); 1005 if (rc) 1006 return rc; 1007 1008 fp->lane_attrs = data[0]; 1009 } 1010 1011 if (netdev->addr_assign_type == NET_ADDR_RANDOM) 1012 return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR, 1013 ether_addr_to_u64(netdev->dev_addr)); 1014 return 0; 1015 } 1016 1017 static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 1018 { 1019 const struct funeth_priv *fp = netdev_priv(dev); 1020 1021 return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg, 1022 sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0; 1023 } 1024 1025 static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 1026 { 1027 struct funeth_priv *fp = netdev_priv(dev); 1028 struct hwtstamp_config cfg; 1029 1030 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1031 return -EFAULT; 1032 1033 /* no TX HW timestamps */ 1034 cfg.tx_type = HWTSTAMP_TX_OFF; 1035 1036 switch (cfg.rx_filter) { 1037 case HWTSTAMP_FILTER_NONE: 1038 break; 1039 case HWTSTAMP_FILTER_ALL: 1040 case HWTSTAMP_FILTER_SOME: 1041 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1042 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1043 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1044 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1045 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1046 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1047 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1048 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1049 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1050 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1051 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1052 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1053 case HWTSTAMP_FILTER_NTP_ALL: 1054 cfg.rx_filter = HWTSTAMP_FILTER_ALL; 1055 break; 1056 default: 1057 return -ERANGE; 1058 } 1059 1060 fp->hwtstamp_cfg = cfg; 1061 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1062 } 1063 1064 static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1065 { 1066 switch (cmd) { 1067 case SIOCSHWTSTAMP: 1068 return fun_hwtstamp_set(dev, ifr); 1069 case SIOCGHWTSTAMP: 1070 return fun_hwtstamp_get(dev, ifr); 1071 default: 1072 return -EOPNOTSUPP; 1073 } 1074 } 1075 1076 /* Prepare the queues for XDP. */ 1077 static int fun_enter_xdp(struct net_device *dev, struct bpf_prog *prog) 1078 { 1079 struct funeth_priv *fp = netdev_priv(dev); 1080 unsigned int i, nqs = num_online_cpus(); 1081 struct funeth_txq **xdpqs; 1082 struct funeth_rxq **rxqs; 1083 int err; 1084 1085 xdpqs = alloc_xdpqs(dev, nqs, fp->sq_depth, 0, FUN_QSTATE_INIT_FULL); 1086 if (IS_ERR(xdpqs)) 1087 return PTR_ERR(xdpqs); 1088 1089 rxqs = rtnl_dereference(fp->rxqs); 1090 for (i = 0; i < dev->real_num_rx_queues; i++) { 1091 err = fun_rxq_set_bpf(rxqs[i], prog); 1092 if (err) 1093 goto out; 1094 } 1095 1096 fp->num_xdpqs = nqs; 1097 rcu_assign_pointer(fp->xdpqs, xdpqs); 1098 return 0; 1099 out: 1100 while (i--) 1101 fun_rxq_set_bpf(rxqs[i], NULL); 1102 1103 free_xdpqs(xdpqs, nqs, 0, FUN_QSTATE_DESTROYED); 1104 return err; 1105 } 1106 1107 /* Set the queues for non-XDP operation. */ 1108 static void fun_end_xdp(struct net_device *dev) 1109 { 1110 struct funeth_priv *fp = netdev_priv(dev); 1111 struct funeth_txq **xdpqs; 1112 struct funeth_rxq **rxqs; 1113 unsigned int i; 1114 1115 xdpqs = rtnl_dereference(fp->xdpqs); 1116 rcu_assign_pointer(fp->xdpqs, NULL); 1117 synchronize_net(); 1118 /* at this point both Rx and Tx XDP processing has ended */ 1119 1120 free_xdpqs(xdpqs, fp->num_xdpqs, 0, FUN_QSTATE_DESTROYED); 1121 fp->num_xdpqs = 0; 1122 1123 rxqs = rtnl_dereference(fp->rxqs); 1124 for (i = 0; i < dev->real_num_rx_queues; i++) 1125 fun_rxq_set_bpf(rxqs[i], NULL); 1126 } 1127 1128 #define XDP_MAX_MTU \ 1129 (PAGE_SIZE - FUN_XDP_HEADROOM - VLAN_ETH_HLEN - FUN_RX_TAILROOM) 1130 1131 static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp) 1132 { 1133 struct bpf_prog *old_prog, *prog = xdp->prog; 1134 struct funeth_priv *fp = netdev_priv(dev); 1135 int i, err; 1136 1137 /* XDP uses at most one buffer */ 1138 if (prog && dev->mtu > XDP_MAX_MTU) { 1139 netdev_err(dev, "device MTU %u too large for XDP\n", dev->mtu); 1140 NL_SET_ERR_MSG_MOD(xdp->extack, 1141 "Device MTU too large for XDP"); 1142 return -EINVAL; 1143 } 1144 1145 if (!netif_running(dev)) { 1146 fp->num_xdpqs = prog ? num_online_cpus() : 0; 1147 } else if (prog && !fp->xdp_prog) { 1148 err = fun_enter_xdp(dev, prog); 1149 if (err) { 1150 NL_SET_ERR_MSG_MOD(xdp->extack, 1151 "Failed to set queues for XDP."); 1152 return err; 1153 } 1154 } else if (!prog && fp->xdp_prog) { 1155 fun_end_xdp(dev); 1156 } else { 1157 struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs); 1158 1159 for (i = 0; i < dev->real_num_rx_queues; i++) 1160 WRITE_ONCE(rxqs[i]->xdp_prog, prog); 1161 } 1162 1163 dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU; 1164 old_prog = xchg(&fp->xdp_prog, prog); 1165 if (old_prog) 1166 bpf_prog_put(old_prog); 1167 1168 return 0; 1169 } 1170 1171 static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1172 { 1173 switch (xdp->command) { 1174 case XDP_SETUP_PROG: 1175 return fun_xdp_setup(dev, xdp); 1176 default: 1177 return -EINVAL; 1178 } 1179 } 1180 1181 static struct devlink_port *fun_get_devlink_port(struct net_device *netdev) 1182 { 1183 struct funeth_priv *fp = netdev_priv(netdev); 1184 1185 return &fp->dl_port; 1186 } 1187 1188 static int fun_init_vports(struct fun_ethdev *ed, unsigned int n) 1189 { 1190 if (ed->num_vports) 1191 return -EINVAL; 1192 1193 ed->vport_info = kvcalloc(n, sizeof(*ed->vport_info), GFP_KERNEL); 1194 if (!ed->vport_info) 1195 return -ENOMEM; 1196 ed->num_vports = n; 1197 return 0; 1198 } 1199 1200 static void fun_free_vports(struct fun_ethdev *ed) 1201 { 1202 kvfree(ed->vport_info); 1203 ed->vport_info = NULL; 1204 ed->num_vports = 0; 1205 } 1206 1207 static struct fun_vport_info *fun_get_vport(struct fun_ethdev *ed, 1208 unsigned int vport) 1209 { 1210 if (!ed->vport_info || vport >= ed->num_vports) 1211 return NULL; 1212 1213 return ed->vport_info + vport; 1214 } 1215 1216 static int fun_set_vf_mac(struct net_device *dev, int vf, u8 *mac) 1217 { 1218 struct funeth_priv *fp = netdev_priv(dev); 1219 struct fun_adi_param mac_param = {}; 1220 struct fun_dev *fdev = fp->fdev; 1221 struct fun_ethdev *ed = to_fun_ethdev(fdev); 1222 struct fun_vport_info *vi; 1223 int rc = -EINVAL; 1224 1225 if (is_multicast_ether_addr(mac)) 1226 return -EINVAL; 1227 1228 mutex_lock(&ed->state_mutex); 1229 vi = fun_get_vport(ed, vf); 1230 if (!vi) 1231 goto unlock; 1232 1233 mac_param.u.mac = FUN_ADI_MAC_INIT(ether_addr_to_u64(mac)); 1234 rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_MACADDR, vf + 1, 1235 &mac_param); 1236 if (!rc) 1237 ether_addr_copy(vi->mac, mac); 1238 unlock: 1239 mutex_unlock(&ed->state_mutex); 1240 return rc; 1241 } 1242 1243 static int fun_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, 1244 __be16 vlan_proto) 1245 { 1246 struct funeth_priv *fp = netdev_priv(dev); 1247 struct fun_adi_param vlan_param = {}; 1248 struct fun_dev *fdev = fp->fdev; 1249 struct fun_ethdev *ed = to_fun_ethdev(fdev); 1250 struct fun_vport_info *vi; 1251 int rc = -EINVAL; 1252 1253 if (vlan > 4095 || qos > 7) 1254 return -EINVAL; 1255 if (vlan_proto && vlan_proto != htons(ETH_P_8021Q) && 1256 vlan_proto != htons(ETH_P_8021AD)) 1257 return -EINVAL; 1258 1259 mutex_lock(&ed->state_mutex); 1260 vi = fun_get_vport(ed, vf); 1261 if (!vi) 1262 goto unlock; 1263 1264 vlan_param.u.vlan = FUN_ADI_VLAN_INIT(be16_to_cpu(vlan_proto), 1265 ((u16)qos << VLAN_PRIO_SHIFT) | vlan); 1266 rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_VLAN, vf + 1, &vlan_param); 1267 if (!rc) { 1268 vi->vlan = vlan; 1269 vi->qos = qos; 1270 vi->vlan_proto = vlan_proto; 1271 } 1272 unlock: 1273 mutex_unlock(&ed->state_mutex); 1274 return rc; 1275 } 1276 1277 static int fun_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, 1278 int max_tx_rate) 1279 { 1280 struct funeth_priv *fp = netdev_priv(dev); 1281 struct fun_adi_param rate_param = {}; 1282 struct fun_dev *fdev = fp->fdev; 1283 struct fun_ethdev *ed = to_fun_ethdev(fdev); 1284 struct fun_vport_info *vi; 1285 int rc = -EINVAL; 1286 1287 if (min_tx_rate) 1288 return -EINVAL; 1289 1290 mutex_lock(&ed->state_mutex); 1291 vi = fun_get_vport(ed, vf); 1292 if (!vi) 1293 goto unlock; 1294 1295 rate_param.u.rate = FUN_ADI_RATE_INIT(max_tx_rate); 1296 rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_RATE, vf + 1, &rate_param); 1297 if (!rc) 1298 vi->max_rate = max_tx_rate; 1299 unlock: 1300 mutex_unlock(&ed->state_mutex); 1301 return rc; 1302 } 1303 1304 static int fun_get_vf_config(struct net_device *dev, int vf, 1305 struct ifla_vf_info *ivi) 1306 { 1307 struct funeth_priv *fp = netdev_priv(dev); 1308 struct fun_ethdev *ed = to_fun_ethdev(fp->fdev); 1309 const struct fun_vport_info *vi; 1310 1311 mutex_lock(&ed->state_mutex); 1312 vi = fun_get_vport(ed, vf); 1313 if (!vi) 1314 goto unlock; 1315 1316 memset(ivi, 0, sizeof(*ivi)); 1317 ivi->vf = vf; 1318 ether_addr_copy(ivi->mac, vi->mac); 1319 ivi->vlan = vi->vlan; 1320 ivi->qos = vi->qos; 1321 ivi->vlan_proto = vi->vlan_proto; 1322 ivi->max_tx_rate = vi->max_rate; 1323 ivi->spoofchk = vi->spoofchk; 1324 unlock: 1325 mutex_unlock(&ed->state_mutex); 1326 return vi ? 0 : -EINVAL; 1327 } 1328 1329 static void fun_uninit(struct net_device *dev) 1330 { 1331 struct funeth_priv *fp = netdev_priv(dev); 1332 1333 fun_prune_queue_irqs(dev); 1334 xa_destroy(&fp->irqs); 1335 } 1336 1337 static const struct net_device_ops fun_netdev_ops = { 1338 .ndo_open = funeth_open, 1339 .ndo_stop = funeth_close, 1340 .ndo_start_xmit = fun_start_xmit, 1341 .ndo_get_stats64 = fun_get_stats64, 1342 .ndo_change_mtu = fun_change_mtu, 1343 .ndo_set_mac_address = fun_set_macaddr, 1344 .ndo_validate_addr = eth_validate_addr, 1345 .ndo_eth_ioctl = fun_ioctl, 1346 .ndo_uninit = fun_uninit, 1347 .ndo_bpf = fun_xdp, 1348 .ndo_xdp_xmit = fun_xdp_xmit_frames, 1349 .ndo_set_vf_mac = fun_set_vf_mac, 1350 .ndo_set_vf_vlan = fun_set_vf_vlan, 1351 .ndo_set_vf_rate = fun_set_vf_rate, 1352 .ndo_get_vf_config = fun_get_vf_config, 1353 .ndo_get_devlink_port = fun_get_devlink_port, 1354 }; 1355 1356 #define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \ 1357 NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \ 1358 NETIF_F_GSO_UDP_TUNNEL_CSUM) 1359 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \ 1360 NETIF_F_GSO_UDP_L4) 1361 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \ 1362 GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA) 1363 1364 static void fun_dflt_rss_indir(struct funeth_priv *fp, unsigned int nrx) 1365 { 1366 unsigned int i; 1367 1368 for (i = 0; i < fp->indir_table_nentries; i++) 1369 fp->indir_table[i] = ethtool_rxfh_indir_default(i, nrx); 1370 } 1371 1372 /* Reset the RSS indirection table to equal distribution across the current 1373 * number of Rx queues. Called at init time and whenever the number of Rx 1374 * queues changes subsequently. Note that this may also resize the indirection 1375 * table. 1376 */ 1377 static void fun_reset_rss_indir(struct net_device *dev, unsigned int nrx) 1378 { 1379 struct funeth_priv *fp = netdev_priv(dev); 1380 1381 if (!fp->rss_cfg) 1382 return; 1383 1384 /* Set the table size to the max possible that allows an equal number 1385 * of occurrences of each CQ. 1386 */ 1387 fp->indir_table_nentries = rounddown(FUN_ETH_RSS_MAX_INDIR_ENT, nrx); 1388 fun_dflt_rss_indir(fp, nrx); 1389 } 1390 1391 /* Update the RSS LUT to contain only queues in [0, nrx). Normally this will 1392 * update the LUT to an equal distribution among nrx queues, If @only_if_needed 1393 * is set the LUT is left unchanged if it already does not reference any queues 1394 * >= nrx. 1395 */ 1396 static int fun_rss_set_qnum(struct net_device *dev, unsigned int nrx, 1397 bool only_if_needed) 1398 { 1399 struct funeth_priv *fp = netdev_priv(dev); 1400 u32 old_lut[FUN_ETH_RSS_MAX_INDIR_ENT]; 1401 unsigned int i, oldsz; 1402 int err; 1403 1404 if (!fp->rss_cfg) 1405 return 0; 1406 1407 if (only_if_needed) { 1408 for (i = 0; i < fp->indir_table_nentries; i++) 1409 if (fp->indir_table[i] >= nrx) 1410 break; 1411 1412 if (i >= fp->indir_table_nentries) 1413 return 0; 1414 } 1415 1416 memcpy(old_lut, fp->indir_table, sizeof(old_lut)); 1417 oldsz = fp->indir_table_nentries; 1418 fun_reset_rss_indir(dev, nrx); 1419 1420 err = fun_config_rss(dev, fp->hash_algo, fp->rss_key, 1421 fp->indir_table, FUN_ADMIN_SUBOP_MODIFY); 1422 if (!err) 1423 return 0; 1424 1425 memcpy(fp->indir_table, old_lut, sizeof(old_lut)); 1426 fp->indir_table_nentries = oldsz; 1427 return err; 1428 } 1429 1430 /* Allocate the DMA area for the RSS configuration commands to the device, and 1431 * initialize the hash, hash key, indirection table size and its entries to 1432 * their defaults. The indirection table defaults to equal distribution across 1433 * the Rx queues. 1434 */ 1435 static int fun_init_rss(struct net_device *dev) 1436 { 1437 struct funeth_priv *fp = netdev_priv(dev); 1438 size_t size = sizeof(fp->rss_key) + sizeof(fp->indir_table); 1439 1440 fp->rss_hw_id = FUN_HCI_ID_INVALID; 1441 if (!(fp->port_caps & FUN_PORT_CAP_OFFLOADS)) 1442 return 0; 1443 1444 fp->rss_cfg = dma_alloc_coherent(&fp->pdev->dev, size, 1445 &fp->rss_dma_addr, GFP_KERNEL); 1446 if (!fp->rss_cfg) 1447 return -ENOMEM; 1448 1449 fp->hash_algo = FUN_ETH_RSS_ALG_TOEPLITZ; 1450 netdev_rss_key_fill(fp->rss_key, sizeof(fp->rss_key)); 1451 fun_reset_rss_indir(dev, dev->real_num_rx_queues); 1452 return 0; 1453 } 1454 1455 static void fun_free_rss(struct funeth_priv *fp) 1456 { 1457 if (fp->rss_cfg) { 1458 dma_free_coherent(&fp->pdev->dev, 1459 sizeof(fp->rss_key) + sizeof(fp->indir_table), 1460 fp->rss_cfg, fp->rss_dma_addr); 1461 fp->rss_cfg = NULL; 1462 } 1463 } 1464 1465 void fun_set_ring_count(struct net_device *netdev, unsigned int ntx, 1466 unsigned int nrx) 1467 { 1468 netif_set_real_num_tx_queues(netdev, ntx); 1469 if (nrx != netdev->real_num_rx_queues) { 1470 netif_set_real_num_rx_queues(netdev, nrx); 1471 fun_reset_rss_indir(netdev, nrx); 1472 } 1473 } 1474 1475 static int fun_init_stats_area(struct funeth_priv *fp) 1476 { 1477 unsigned int nstats; 1478 1479 if (!(fp->port_caps & FUN_PORT_CAP_STATS)) 1480 return 0; 1481 1482 nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX + 1483 PORT_MAC_FEC_STATS_MAX; 1484 1485 fp->stats = dma_alloc_coherent(&fp->pdev->dev, nstats * sizeof(u64), 1486 &fp->stats_dma_addr, GFP_KERNEL); 1487 if (!fp->stats) 1488 return -ENOMEM; 1489 return 0; 1490 } 1491 1492 static void fun_free_stats_area(struct funeth_priv *fp) 1493 { 1494 unsigned int nstats; 1495 1496 if (fp->stats) { 1497 nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX; 1498 dma_free_coherent(&fp->pdev->dev, nstats * sizeof(u64), 1499 fp->stats, fp->stats_dma_addr); 1500 fp->stats = NULL; 1501 } 1502 } 1503 1504 static int fun_dl_port_register(struct net_device *netdev) 1505 { 1506 struct funeth_priv *fp = netdev_priv(netdev); 1507 struct devlink *dl = priv_to_devlink(fp->fdev); 1508 struct devlink_port_attrs attrs = {}; 1509 unsigned int idx; 1510 1511 if (fp->port_caps & FUN_PORT_CAP_VPORT) { 1512 attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL; 1513 idx = fp->lport; 1514 } else { 1515 idx = netdev->dev_port; 1516 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 1517 attrs.lanes = fp->lane_attrs & 7; 1518 if (fp->lane_attrs & FUN_PORT_LANE_SPLIT) { 1519 attrs.split = 1; 1520 attrs.phys.port_number = fp->lport & ~3; 1521 attrs.phys.split_subport_number = fp->lport & 3; 1522 } else { 1523 attrs.phys.port_number = fp->lport; 1524 } 1525 } 1526 1527 devlink_port_attrs_set(&fp->dl_port, &attrs); 1528 1529 return devlink_port_register(dl, &fp->dl_port, idx); 1530 } 1531 1532 /* Determine the max Tx/Rx queues for a port. */ 1533 static int fun_max_qs(struct fun_ethdev *ed, unsigned int *ntx, 1534 unsigned int *nrx) 1535 { 1536 int neth; 1537 1538 if (ed->num_ports > 1 || is_kdump_kernel()) { 1539 *ntx = 1; 1540 *nrx = 1; 1541 return 0; 1542 } 1543 1544 neth = fun_get_res_count(&ed->fdev, FUN_ADMIN_OP_ETH); 1545 if (neth < 0) 1546 return neth; 1547 1548 /* We determine the max number of queues based on the CPU 1549 * cores, device interrupts and queues, RSS size, and device Tx flows. 1550 * 1551 * - At least 1 Rx and 1 Tx queues. 1552 * - At most 1 Rx/Tx queue per core. 1553 * - Each Rx/Tx queue needs 1 SQ. 1554 */ 1555 *ntx = min(ed->nsqs_per_port - 1, num_online_cpus()); 1556 *nrx = *ntx; 1557 if (*ntx > neth) 1558 *ntx = neth; 1559 if (*nrx > FUN_ETH_RSS_MAX_INDIR_ENT) 1560 *nrx = FUN_ETH_RSS_MAX_INDIR_ENT; 1561 return 0; 1562 } 1563 1564 static void fun_queue_defaults(struct net_device *dev, unsigned int nsqs) 1565 { 1566 unsigned int ntx, nrx; 1567 1568 ntx = min(dev->num_tx_queues, FUN_DFLT_QUEUES); 1569 nrx = min(dev->num_rx_queues, FUN_DFLT_QUEUES); 1570 if (ntx <= nrx) { 1571 ntx = min(ntx, nsqs / 2); 1572 nrx = min(nrx, nsqs - ntx); 1573 } else { 1574 nrx = min(nrx, nsqs / 2); 1575 ntx = min(ntx, nsqs - nrx); 1576 } 1577 1578 netif_set_real_num_tx_queues(dev, ntx); 1579 netif_set_real_num_rx_queues(dev, nrx); 1580 } 1581 1582 /* Replace the existing Rx/Tx/XDP queues with equal number of queues with 1583 * different settings, e.g. depth. This is a disruptive replacement that 1584 * temporarily shuts down the data path and should be limited to changes that 1585 * can't be applied to live queues. The old queues are always discarded. 1586 */ 1587 int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs, 1588 struct netlink_ext_ack *extack) 1589 { 1590 struct fun_qset oldqs = { .state = FUN_QSTATE_DESTROYED }; 1591 struct funeth_priv *fp = netdev_priv(dev); 1592 int err; 1593 1594 newqs->nrxqs = dev->real_num_rx_queues; 1595 newqs->ntxqs = dev->real_num_tx_queues; 1596 newqs->nxdpqs = fp->num_xdpqs; 1597 newqs->state = FUN_QSTATE_INIT_SW; 1598 err = fun_alloc_rings(dev, newqs); 1599 if (err) { 1600 NL_SET_ERR_MSG_MOD(extack, 1601 "Unable to allocate memory for new queues, keeping current settings"); 1602 return err; 1603 } 1604 1605 fun_down(dev, &oldqs); 1606 1607 err = fun_up(dev, newqs); 1608 if (!err) 1609 return 0; 1610 1611 /* The new queues couldn't be installed. We do not retry the old queues 1612 * as they are the same to the device as the new queues and would 1613 * similarly fail. 1614 */ 1615 newqs->state = FUN_QSTATE_DESTROYED; 1616 fun_free_rings(dev, newqs); 1617 NL_SET_ERR_MSG_MOD(extack, "Unable to restore the data path with the new queues."); 1618 return err; 1619 } 1620 1621 /* Change the number of Rx/Tx queues of a device while it is up. This is done 1622 * by incrementally adding/removing queues to meet the new requirements while 1623 * handling ongoing traffic. 1624 */ 1625 int fun_change_num_queues(struct net_device *dev, unsigned int ntx, 1626 unsigned int nrx) 1627 { 1628 unsigned int keep_tx = min(dev->real_num_tx_queues, ntx); 1629 unsigned int keep_rx = min(dev->real_num_rx_queues, nrx); 1630 struct funeth_priv *fp = netdev_priv(dev); 1631 struct fun_qset oldqs = { 1632 .rxqs = rtnl_dereference(fp->rxqs), 1633 .txqs = fp->txqs, 1634 .nrxqs = dev->real_num_rx_queues, 1635 .ntxqs = dev->real_num_tx_queues, 1636 .rxq_start = keep_rx, 1637 .txq_start = keep_tx, 1638 .state = FUN_QSTATE_DESTROYED 1639 }; 1640 struct fun_qset newqs = { 1641 .nrxqs = nrx, 1642 .ntxqs = ntx, 1643 .rxq_start = keep_rx, 1644 .txq_start = keep_tx, 1645 .cq_depth = fp->cq_depth, 1646 .rq_depth = fp->rq_depth, 1647 .sq_depth = fp->sq_depth, 1648 .state = FUN_QSTATE_INIT_FULL 1649 }; 1650 int i, err; 1651 1652 err = fun_alloc_rings(dev, &newqs); 1653 if (err) 1654 goto free_irqs; 1655 1656 err = fun_enable_irqs(dev); /* of any newly added queues */ 1657 if (err) 1658 goto free_rings; 1659 1660 /* copy the queues we are keeping to the new set */ 1661 memcpy(newqs.rxqs, oldqs.rxqs, keep_rx * sizeof(*oldqs.rxqs)); 1662 memcpy(newqs.txqs, fp->txqs, keep_tx * sizeof(*fp->txqs)); 1663 1664 if (nrx < dev->real_num_rx_queues) { 1665 err = fun_rss_set_qnum(dev, nrx, true); 1666 if (err) 1667 goto disable_tx_irqs; 1668 1669 for (i = nrx; i < dev->real_num_rx_queues; i++) 1670 fun_disable_one_irq(container_of(oldqs.rxqs[i]->napi, 1671 struct fun_irq, napi)); 1672 1673 netif_set_real_num_rx_queues(dev, nrx); 1674 } 1675 1676 if (ntx < dev->real_num_tx_queues) 1677 netif_set_real_num_tx_queues(dev, ntx); 1678 1679 rcu_assign_pointer(fp->rxqs, newqs.rxqs); 1680 fp->txqs = newqs.txqs; 1681 synchronize_net(); 1682 1683 if (ntx > dev->real_num_tx_queues) 1684 netif_set_real_num_tx_queues(dev, ntx); 1685 1686 if (nrx > dev->real_num_rx_queues) { 1687 netif_set_real_num_rx_queues(dev, nrx); 1688 fun_rss_set_qnum(dev, nrx, false); 1689 } 1690 1691 /* disable interrupts of any excess Tx queues */ 1692 for (i = keep_tx; i < oldqs.ntxqs; i++) 1693 fun_disable_one_irq(oldqs.txqs[i]->irq); 1694 1695 fun_free_rings(dev, &oldqs); 1696 fun_prune_queue_irqs(dev); 1697 return 0; 1698 1699 disable_tx_irqs: 1700 for (i = oldqs.ntxqs; i < ntx; i++) 1701 fun_disable_one_irq(newqs.txqs[i]->irq); 1702 free_rings: 1703 newqs.state = FUN_QSTATE_DESTROYED; 1704 fun_free_rings(dev, &newqs); 1705 free_irqs: 1706 fun_prune_queue_irqs(dev); 1707 return err; 1708 } 1709 1710 static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid) 1711 { 1712 struct fun_dev *fdev = &ed->fdev; 1713 struct net_device *netdev; 1714 struct funeth_priv *fp; 1715 unsigned int ntx, nrx; 1716 int rc; 1717 1718 rc = fun_max_qs(ed, &ntx, &nrx); 1719 if (rc) 1720 return rc; 1721 1722 netdev = alloc_etherdev_mqs(sizeof(*fp), ntx, nrx); 1723 if (!netdev) { 1724 rc = -ENOMEM; 1725 goto done; 1726 } 1727 1728 netdev->dev_port = portid; 1729 fun_queue_defaults(netdev, ed->nsqs_per_port); 1730 1731 fp = netdev_priv(netdev); 1732 fp->fdev = fdev; 1733 fp->pdev = to_pci_dev(fdev->dev); 1734 fp->netdev = netdev; 1735 xa_init(&fp->irqs); 1736 fp->rx_irq_ofst = ntx; 1737 seqcount_init(&fp->link_seq); 1738 1739 fp->lport = INVALID_LPORT; 1740 rc = fun_port_create(netdev); 1741 if (rc) 1742 goto free_netdev; 1743 1744 /* bind port to admin CQ for async events */ 1745 rc = fun_bind(fdev, FUN_ADMIN_BIND_TYPE_PORT, portid, 1746 FUN_ADMIN_BIND_TYPE_EPCQ, 0); 1747 if (rc) 1748 goto destroy_port; 1749 1750 rc = fun_get_port_attributes(netdev); 1751 if (rc) 1752 goto destroy_port; 1753 1754 rc = fun_init_rss(netdev); 1755 if (rc) 1756 goto destroy_port; 1757 1758 rc = fun_init_stats_area(fp); 1759 if (rc) 1760 goto free_rss; 1761 1762 SET_NETDEV_DEV(netdev, fdev->dev); 1763 netdev->netdev_ops = &fun_netdev_ops; 1764 1765 netdev->hw_features = NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_RXCSUM; 1766 if (fp->port_caps & FUN_PORT_CAP_OFFLOADS) 1767 netdev->hw_features |= NETIF_F_HW_CSUM | TSO_FLAGS; 1768 if (fp->port_caps & FUN_PORT_CAP_ENCAP_OFFLOADS) 1769 netdev->hw_features |= GSO_ENCAP_FLAGS; 1770 1771 netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA; 1772 netdev->vlan_features = netdev->features & VLAN_FEAT; 1773 netdev->mpls_features = netdev->vlan_features; 1774 netdev->hw_enc_features = netdev->hw_features; 1775 1776 netdev->min_mtu = ETH_MIN_MTU; 1777 netdev->max_mtu = FUN_MAX_MTU; 1778 1779 fun_set_ethtool_ops(netdev); 1780 1781 /* configurable parameters */ 1782 fp->sq_depth = min(SQ_DEPTH, fdev->q_depth); 1783 fp->cq_depth = min(CQ_DEPTH, fdev->q_depth); 1784 fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth); 1785 fp->rx_coal_usec = CQ_INTCOAL_USEC; 1786 fp->rx_coal_count = CQ_INTCOAL_NPKT; 1787 fp->tx_coal_usec = SQ_INTCOAL_USEC; 1788 fp->tx_coal_count = SQ_INTCOAL_NPKT; 1789 fp->cq_irq_db = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count); 1790 1791 rc = fun_dl_port_register(netdev); 1792 if (rc) 1793 goto free_stats; 1794 1795 fp->ktls_id = FUN_HCI_ID_INVALID; 1796 fun_ktls_init(netdev); /* optional, failure OK */ 1797 1798 netif_carrier_off(netdev); 1799 ed->netdevs[portid] = netdev; 1800 rc = register_netdev(netdev); 1801 if (rc) 1802 goto unreg_devlink; 1803 1804 devlink_port_type_eth_set(&fp->dl_port, netdev); 1805 1806 return 0; 1807 1808 unreg_devlink: 1809 ed->netdevs[portid] = NULL; 1810 fun_ktls_cleanup(fp); 1811 devlink_port_unregister(&fp->dl_port); 1812 free_stats: 1813 fun_free_stats_area(fp); 1814 free_rss: 1815 fun_free_rss(fp); 1816 destroy_port: 1817 fun_port_destroy(netdev); 1818 free_netdev: 1819 free_netdev(netdev); 1820 done: 1821 dev_err(fdev->dev, "couldn't allocate port %u, error %d", portid, rc); 1822 return rc; 1823 } 1824 1825 static void fun_destroy_netdev(struct net_device *netdev) 1826 { 1827 struct funeth_priv *fp; 1828 1829 fp = netdev_priv(netdev); 1830 devlink_port_type_clear(&fp->dl_port); 1831 unregister_netdev(netdev); 1832 devlink_port_unregister(&fp->dl_port); 1833 fun_ktls_cleanup(fp); 1834 fun_free_stats_area(fp); 1835 fun_free_rss(fp); 1836 fun_port_destroy(netdev); 1837 free_netdev(netdev); 1838 } 1839 1840 static int fun_create_ports(struct fun_ethdev *ed, unsigned int nports) 1841 { 1842 struct fun_dev *fd = &ed->fdev; 1843 int i, rc; 1844 1845 /* The admin queue takes 1 IRQ and 2 SQs. */ 1846 ed->nsqs_per_port = min(fd->num_irqs - 1, 1847 fd->kern_end_qid - 2) / nports; 1848 if (ed->nsqs_per_port < 2) { 1849 dev_err(fd->dev, "Too few SQs for %u ports", nports); 1850 return -EINVAL; 1851 } 1852 1853 ed->netdevs = kcalloc(nports, sizeof(*ed->netdevs), GFP_KERNEL); 1854 if (!ed->netdevs) 1855 return -ENOMEM; 1856 1857 ed->num_ports = nports; 1858 for (i = 0; i < nports; i++) { 1859 rc = fun_create_netdev(ed, i); 1860 if (rc) 1861 goto free_netdevs; 1862 } 1863 1864 return 0; 1865 1866 free_netdevs: 1867 while (i) 1868 fun_destroy_netdev(ed->netdevs[--i]); 1869 kfree(ed->netdevs); 1870 ed->netdevs = NULL; 1871 ed->num_ports = 0; 1872 return rc; 1873 } 1874 1875 static void fun_destroy_ports(struct fun_ethdev *ed) 1876 { 1877 unsigned int i; 1878 1879 for (i = 0; i < ed->num_ports; i++) 1880 fun_destroy_netdev(ed->netdevs[i]); 1881 1882 kfree(ed->netdevs); 1883 ed->netdevs = NULL; 1884 ed->num_ports = 0; 1885 } 1886 1887 static void fun_update_link_state(const struct fun_ethdev *ed, 1888 const struct fun_admin_port_notif *notif) 1889 { 1890 unsigned int port_idx = be16_to_cpu(notif->id); 1891 struct net_device *netdev; 1892 struct funeth_priv *fp; 1893 1894 if (port_idx >= ed->num_ports) 1895 return; 1896 1897 netdev = ed->netdevs[port_idx]; 1898 fp = netdev_priv(netdev); 1899 1900 write_seqcount_begin(&fp->link_seq); 1901 fp->link_speed = be32_to_cpu(notif->speed) * 10; /* 10 Mbps->Mbps */ 1902 fp->active_fc = notif->flow_ctrl; 1903 fp->active_fec = notif->fec; 1904 fp->xcvr_type = notif->xcvr_type; 1905 fp->link_down_reason = notif->link_down_reason; 1906 fp->lp_advertising = be64_to_cpu(notif->lp_advertising); 1907 1908 if ((notif->link_state | notif->missed_events) & FUN_PORT_FLAG_MAC_DOWN) 1909 netif_carrier_off(netdev); 1910 if (notif->link_state & FUN_PORT_FLAG_MAC_UP) 1911 netif_carrier_on(netdev); 1912 1913 write_seqcount_end(&fp->link_seq); 1914 fun_report_link(netdev); 1915 } 1916 1917 /* handler for async events delivered through the admin CQ */ 1918 static void fun_event_cb(struct fun_dev *fdev, void *entry) 1919 { 1920 u8 op = ((struct fun_admin_rsp_common *)entry)->op; 1921 1922 if (op == FUN_ADMIN_OP_PORT) { 1923 const struct fun_admin_port_notif *rsp = entry; 1924 1925 if (rsp->subop == FUN_ADMIN_SUBOP_NOTIFY) { 1926 fun_update_link_state(to_fun_ethdev(fdev), rsp); 1927 } else if (rsp->subop == FUN_ADMIN_SUBOP_RES_COUNT) { 1928 const struct fun_admin_res_count_rsp *r = entry; 1929 1930 if (r->count.data) 1931 set_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags); 1932 else 1933 set_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags); 1934 fun_serv_sched(fdev); 1935 } else { 1936 dev_info(fdev->dev, "adminq event unexpected op %u subop %u", 1937 op, rsp->subop); 1938 } 1939 } else { 1940 dev_info(fdev->dev, "adminq event unexpected op %u", op); 1941 } 1942 } 1943 1944 /* handler for pending work managed by the service task */ 1945 static void fun_service_cb(struct fun_dev *fdev) 1946 { 1947 struct fun_ethdev *ed = to_fun_ethdev(fdev); 1948 int rc; 1949 1950 if (test_and_clear_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags)) 1951 fun_destroy_ports(ed); 1952 1953 if (!test_and_clear_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags)) 1954 return; 1955 1956 rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT); 1957 if (rc < 0 || rc == ed->num_ports) 1958 return; 1959 1960 if (ed->num_ports) 1961 fun_destroy_ports(ed); 1962 if (rc) 1963 fun_create_ports(ed, rc); 1964 } 1965 1966 static int funeth_sriov_configure(struct pci_dev *pdev, int nvfs) 1967 { 1968 struct fun_dev *fdev = pci_get_drvdata(pdev); 1969 struct fun_ethdev *ed = to_fun_ethdev(fdev); 1970 int rc; 1971 1972 if (nvfs == 0) { 1973 if (pci_vfs_assigned(pdev)) { 1974 dev_warn(&pdev->dev, 1975 "Cannot disable SR-IOV while VFs are assigned\n"); 1976 return -EPERM; 1977 } 1978 1979 mutex_lock(&ed->state_mutex); 1980 fun_free_vports(ed); 1981 mutex_unlock(&ed->state_mutex); 1982 pci_disable_sriov(pdev); 1983 return 0; 1984 } 1985 1986 rc = pci_enable_sriov(pdev, nvfs); 1987 if (rc) 1988 return rc; 1989 1990 mutex_lock(&ed->state_mutex); 1991 rc = fun_init_vports(ed, nvfs); 1992 mutex_unlock(&ed->state_mutex); 1993 if (rc) { 1994 pci_disable_sriov(pdev); 1995 return rc; 1996 } 1997 1998 return nvfs; 1999 } 2000 2001 static int funeth_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2002 { 2003 struct fun_dev_params aqreq = { 2004 .cqe_size_log2 = ilog2(ADMIN_CQE_SIZE), 2005 .sqe_size_log2 = ilog2(ADMIN_SQE_SIZE), 2006 .cq_depth = ADMIN_CQ_DEPTH, 2007 .sq_depth = ADMIN_SQ_DEPTH, 2008 .rq_depth = ADMIN_RQ_DEPTH, 2009 .min_msix = 2, /* 1 Rx + 1 Tx */ 2010 .event_cb = fun_event_cb, 2011 .serv_cb = fun_service_cb, 2012 }; 2013 struct devlink *devlink; 2014 struct fun_ethdev *ed; 2015 struct fun_dev *fdev; 2016 int rc; 2017 2018 devlink = fun_devlink_alloc(&pdev->dev); 2019 if (!devlink) { 2020 dev_err(&pdev->dev, "devlink alloc failed\n"); 2021 return -ENOMEM; 2022 } 2023 2024 ed = devlink_priv(devlink); 2025 mutex_init(&ed->state_mutex); 2026 2027 fdev = &ed->fdev; 2028 rc = fun_dev_enable(fdev, pdev, &aqreq, KBUILD_MODNAME); 2029 if (rc) 2030 goto free_devlink; 2031 2032 rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT); 2033 if (rc > 0) 2034 rc = fun_create_ports(ed, rc); 2035 if (rc < 0) 2036 goto disable_dev; 2037 2038 fun_serv_restart(fdev); 2039 fun_devlink_register(devlink); 2040 return 0; 2041 2042 disable_dev: 2043 fun_dev_disable(fdev); 2044 free_devlink: 2045 mutex_destroy(&ed->state_mutex); 2046 fun_devlink_free(devlink); 2047 return rc; 2048 } 2049 2050 static void funeth_remove(struct pci_dev *pdev) 2051 { 2052 struct fun_dev *fdev = pci_get_drvdata(pdev); 2053 struct devlink *devlink; 2054 struct fun_ethdev *ed; 2055 2056 ed = to_fun_ethdev(fdev); 2057 devlink = priv_to_devlink(ed); 2058 fun_devlink_unregister(devlink); 2059 2060 #ifdef CONFIG_PCI_IOV 2061 funeth_sriov_configure(pdev, 0); 2062 #endif 2063 2064 fun_serv_stop(fdev); 2065 fun_destroy_ports(ed); 2066 fun_dev_disable(fdev); 2067 mutex_destroy(&ed->state_mutex); 2068 2069 fun_devlink_free(devlink); 2070 } 2071 2072 static struct pci_driver funeth_driver = { 2073 .name = KBUILD_MODNAME, 2074 .id_table = funeth_id_table, 2075 .probe = funeth_probe, 2076 .remove = funeth_remove, 2077 .shutdown = funeth_remove, 2078 .sriov_configure = funeth_sriov_configure, 2079 }; 2080 2081 module_pci_driver(funeth_driver); 2082 2083 MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>"); 2084 MODULE_DESCRIPTION("Fungible Ethernet Network Driver"); 2085 MODULE_LICENSE("Dual BSD/GPL"); 2086 MODULE_DEVICE_TABLE(pci, funeth_id_table); 2087