1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/printk.h> 5 #include <linux/dynamic_debug.h> 6 #include <linux/netdevice.h> 7 #include <linux/etherdevice.h> 8 #include <linux/if_vlan.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/interrupt.h> 11 #include <linux/pci.h> 12 #include <linux/cpumask.h> 13 14 #include "ionic.h" 15 #include "ionic_bus.h" 16 #include "ionic_lif.h" 17 #include "ionic_txrx.h" 18 #include "ionic_ethtool.h" 19 #include "ionic_debugfs.h" 20 21 /* queuetype support level */ 22 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 23 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 24 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 25 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */ 26 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support 27 * 1 = ... with Tx SG version 1 28 */ 29 }; 30 31 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode); 32 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); 33 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); 34 static void ionic_link_status_check(struct ionic_lif *lif); 35 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 36 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 37 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 38 39 static void ionic_txrx_deinit(struct ionic_lif *lif); 40 static int ionic_txrx_init(struct ionic_lif *lif); 41 static int ionic_start_queues(struct ionic_lif *lif); 42 static void ionic_stop_queues(struct ionic_lif *lif); 43 static void ionic_lif_queue_identify(struct ionic_lif *lif); 44 45 static void ionic_lif_deferred_work(struct work_struct *work) 46 { 47 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 48 struct ionic_deferred *def = &lif->deferred; 49 struct ionic_deferred_work *w = NULL; 50 51 spin_lock_bh(&def->lock); 52 if (!list_empty(&def->list)) { 53 w = list_first_entry(&def->list, 54 struct ionic_deferred_work, list); 55 list_del(&w->list); 56 } 57 spin_unlock_bh(&def->lock); 58 59 if (w) { 60 switch (w->type) { 61 case IONIC_DW_TYPE_RX_MODE: 62 ionic_lif_rx_mode(lif, w->rx_mode); 63 break; 64 case IONIC_DW_TYPE_RX_ADDR_ADD: 65 ionic_lif_addr_add(lif, w->addr); 66 break; 67 case IONIC_DW_TYPE_RX_ADDR_DEL: 68 ionic_lif_addr_del(lif, w->addr); 69 break; 70 case IONIC_DW_TYPE_LINK_STATUS: 71 ionic_link_status_check(lif); 72 break; 73 case IONIC_DW_TYPE_LIF_RESET: 74 if (w->fw_status) 75 ionic_lif_handle_fw_up(lif); 76 else 77 ionic_lif_handle_fw_down(lif); 78 break; 79 default: 80 break; 81 } 82 kfree(w); 83 schedule_work(&def->work); 84 } 85 } 86 87 void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 88 struct ionic_deferred_work *work) 89 { 90 spin_lock_bh(&def->lock); 91 list_add_tail(&work->list, &def->list); 92 spin_unlock_bh(&def->lock); 93 schedule_work(&def->work); 94 } 95 96 static void ionic_link_status_check(struct ionic_lif *lif) 97 { 98 struct net_device *netdev = lif->netdev; 99 u16 link_status; 100 bool link_up; 101 102 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 103 return; 104 105 link_status = le16_to_cpu(lif->info->status.link_status); 106 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 107 108 if (link_up) { 109 if (!netif_carrier_ok(netdev)) { 110 u32 link_speed; 111 112 ionic_port_identify(lif->ionic); 113 link_speed = le32_to_cpu(lif->info->status.link_speed); 114 netdev_info(netdev, "Link up - %d Gbps\n", 115 link_speed / 1000); 116 netif_carrier_on(netdev); 117 } 118 119 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { 120 mutex_lock(&lif->queue_lock); 121 ionic_start_queues(lif); 122 mutex_unlock(&lif->queue_lock); 123 } 124 } else { 125 if (netif_carrier_ok(netdev)) { 126 netdev_info(netdev, "Link down\n"); 127 netif_carrier_off(netdev); 128 } 129 130 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { 131 mutex_lock(&lif->queue_lock); 132 ionic_stop_queues(lif); 133 mutex_unlock(&lif->queue_lock); 134 } 135 } 136 137 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 138 } 139 140 void ionic_link_status_check_request(struct ionic_lif *lif) 141 { 142 struct ionic_deferred_work *work; 143 144 /* we only need one request outstanding at a time */ 145 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 146 return; 147 148 if (in_interrupt()) { 149 work = kzalloc(sizeof(*work), GFP_ATOMIC); 150 if (!work) 151 return; 152 153 work->type = IONIC_DW_TYPE_LINK_STATUS; 154 ionic_lif_deferred_enqueue(&lif->deferred, work); 155 } else { 156 ionic_link_status_check(lif); 157 } 158 } 159 160 static irqreturn_t ionic_isr(int irq, void *data) 161 { 162 struct napi_struct *napi = data; 163 164 napi_schedule_irqoff(napi); 165 166 return IRQ_HANDLED; 167 } 168 169 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 170 { 171 struct ionic_intr_info *intr = &qcq->intr; 172 struct device *dev = lif->ionic->dev; 173 struct ionic_queue *q = &qcq->q; 174 const char *name; 175 176 if (lif->registered) 177 name = lif->netdev->name; 178 else 179 name = dev_name(dev); 180 181 snprintf(intr->name, sizeof(intr->name), 182 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 183 184 return devm_request_irq(dev, intr->vector, ionic_isr, 185 0, intr->name, &qcq->napi); 186 } 187 188 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 189 { 190 struct ionic *ionic = lif->ionic; 191 int index; 192 193 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 194 if (index == ionic->nintrs) { 195 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 196 __func__, index, ionic->nintrs); 197 return -ENOSPC; 198 } 199 200 set_bit(index, ionic->intrs); 201 ionic_intr_init(&ionic->idev, intr, index); 202 203 return 0; 204 } 205 206 static void ionic_intr_free(struct ionic *ionic, int index) 207 { 208 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) 209 clear_bit(index, ionic->intrs); 210 } 211 212 static int ionic_qcq_enable(struct ionic_qcq *qcq) 213 { 214 struct ionic_queue *q = &qcq->q; 215 struct ionic_lif *lif = q->lif; 216 struct ionic_dev *idev; 217 struct device *dev; 218 219 struct ionic_admin_ctx ctx = { 220 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 221 .cmd.q_control = { 222 .opcode = IONIC_CMD_Q_CONTROL, 223 .lif_index = cpu_to_le16(lif->index), 224 .type = q->type, 225 .index = cpu_to_le32(q->index), 226 .oper = IONIC_Q_ENABLE, 227 }, 228 }; 229 230 idev = &lif->ionic->idev; 231 dev = lif->ionic->dev; 232 233 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 234 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 235 236 if (qcq->flags & IONIC_QCQ_F_INTR) { 237 irq_set_affinity_hint(qcq->intr.vector, 238 &qcq->intr.affinity_mask); 239 napi_enable(&qcq->napi); 240 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 241 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 242 IONIC_INTR_MASK_CLEAR); 243 } 244 245 return ionic_adminq_post_wait(lif, &ctx); 246 } 247 248 static int ionic_qcq_disable(struct ionic_qcq *qcq) 249 { 250 struct ionic_queue *q = &qcq->q; 251 struct ionic_lif *lif = q->lif; 252 struct ionic_dev *idev; 253 struct device *dev; 254 255 struct ionic_admin_ctx ctx = { 256 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 257 .cmd.q_control = { 258 .opcode = IONIC_CMD_Q_CONTROL, 259 .lif_index = cpu_to_le16(lif->index), 260 .type = q->type, 261 .index = cpu_to_le32(q->index), 262 .oper = IONIC_Q_DISABLE, 263 }, 264 }; 265 266 idev = &lif->ionic->idev; 267 dev = lif->ionic->dev; 268 269 dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n", 270 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 271 272 if (qcq->flags & IONIC_QCQ_F_INTR) { 273 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 274 IONIC_INTR_MASK_SET); 275 synchronize_irq(qcq->intr.vector); 276 irq_set_affinity_hint(qcq->intr.vector, NULL); 277 napi_disable(&qcq->napi); 278 } 279 280 return ionic_adminq_post_wait(lif, &ctx); 281 } 282 283 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 284 { 285 struct ionic_dev *idev = &lif->ionic->idev; 286 287 if (!qcq) 288 return; 289 290 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 291 return; 292 293 if (qcq->flags & IONIC_QCQ_F_INTR) { 294 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 295 IONIC_INTR_MASK_SET); 296 netif_napi_del(&qcq->napi); 297 } 298 299 qcq->flags &= ~IONIC_QCQ_F_INITED; 300 } 301 302 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 303 { 304 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) 305 return; 306 307 irq_set_affinity_hint(qcq->intr.vector, NULL); 308 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); 309 qcq->intr.vector = 0; 310 ionic_intr_free(lif->ionic, qcq->intr.index); 311 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 312 } 313 314 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 315 { 316 struct device *dev = lif->ionic->dev; 317 318 if (!qcq) 319 return; 320 321 ionic_debugfs_del_qcq(qcq); 322 323 if (qcq->q_base) { 324 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); 325 qcq->q_base = NULL; 326 qcq->q_base_pa = 0; 327 } 328 329 if (qcq->cq_base) { 330 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); 331 qcq->cq_base = NULL; 332 qcq->cq_base_pa = 0; 333 } 334 335 if (qcq->sg_base) { 336 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); 337 qcq->sg_base = NULL; 338 qcq->sg_base_pa = 0; 339 } 340 341 ionic_qcq_intr_free(lif, qcq); 342 343 if (qcq->cq.info) { 344 devm_kfree(dev, qcq->cq.info); 345 qcq->cq.info = NULL; 346 } 347 if (qcq->q.info) { 348 devm_kfree(dev, qcq->q.info); 349 qcq->q.info = NULL; 350 } 351 } 352 353 static void ionic_qcqs_free(struct ionic_lif *lif) 354 { 355 struct device *dev = lif->ionic->dev; 356 357 if (lif->notifyqcq) { 358 ionic_qcq_free(lif, lif->notifyqcq); 359 devm_kfree(dev, lif->notifyqcq); 360 lif->notifyqcq = NULL; 361 } 362 363 if (lif->adminqcq) { 364 ionic_qcq_free(lif, lif->adminqcq); 365 devm_kfree(dev, lif->adminqcq); 366 lif->adminqcq = NULL; 367 } 368 369 if (lif->rxqcqs) { 370 devm_kfree(dev, lif->rxqstats); 371 lif->rxqstats = NULL; 372 devm_kfree(dev, lif->rxqcqs); 373 lif->rxqcqs = NULL; 374 } 375 376 if (lif->txqcqs) { 377 devm_kfree(dev, lif->txqstats); 378 lif->txqstats = NULL; 379 devm_kfree(dev, lif->txqcqs); 380 lif->txqcqs = NULL; 381 } 382 } 383 384 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 385 struct ionic_qcq *n_qcq) 386 { 387 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { 388 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index); 389 n_qcq->flags &= ~IONIC_QCQ_F_INTR; 390 } 391 392 n_qcq->intr.vector = src_qcq->intr.vector; 393 n_qcq->intr.index = src_qcq->intr.index; 394 } 395 396 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) 397 { 398 int err; 399 400 if (!(qcq->flags & IONIC_QCQ_F_INTR)) { 401 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 402 return 0; 403 } 404 405 err = ionic_intr_alloc(lif, &qcq->intr); 406 if (err) { 407 netdev_warn(lif->netdev, "no intr for %s: %d\n", 408 qcq->q.name, err); 409 goto err_out; 410 } 411 412 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index); 413 if (err < 0) { 414 netdev_warn(lif->netdev, "no vector for %s: %d\n", 415 qcq->q.name, err); 416 goto err_out_free_intr; 417 } 418 qcq->intr.vector = err; 419 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, 420 IONIC_INTR_MASK_SET); 421 422 err = ionic_request_irq(lif, qcq); 423 if (err) { 424 netdev_warn(lif->netdev, "irq request failed %d\n", err); 425 goto err_out_free_intr; 426 } 427 428 /* try to get the irq on the local numa node first */ 429 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index, 430 dev_to_node(lif->ionic->dev)); 431 if (qcq->intr.cpu != -1) 432 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask); 433 434 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); 435 return 0; 436 437 err_out_free_intr: 438 ionic_intr_free(lif->ionic, qcq->intr.index); 439 err_out: 440 return err; 441 } 442 443 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 444 unsigned int index, 445 const char *name, unsigned int flags, 446 unsigned int num_descs, unsigned int desc_size, 447 unsigned int cq_desc_size, 448 unsigned int sg_desc_size, 449 unsigned int pid, struct ionic_qcq **qcq) 450 { 451 struct ionic_dev *idev = &lif->ionic->idev; 452 struct device *dev = lif->ionic->dev; 453 void *q_base, *cq_base, *sg_base; 454 dma_addr_t cq_base_pa = 0; 455 dma_addr_t sg_base_pa = 0; 456 dma_addr_t q_base_pa = 0; 457 struct ionic_qcq *new; 458 int err; 459 460 *qcq = NULL; 461 462 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 463 if (!new) { 464 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 465 err = -ENOMEM; 466 goto err_out; 467 } 468 469 new->flags = flags; 470 471 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info), 472 GFP_KERNEL); 473 if (!new->q.info) { 474 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 475 err = -ENOMEM; 476 goto err_out_free_qcq; 477 } 478 479 new->q.type = type; 480 481 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 482 desc_size, sg_desc_size, pid); 483 if (err) { 484 netdev_err(lif->netdev, "Cannot initialize queue\n"); 485 goto err_out_free_q_info; 486 } 487 488 err = ionic_alloc_qcq_interrupt(lif, new); 489 if (err) 490 goto err_out; 491 492 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info), 493 GFP_KERNEL); 494 if (!new->cq.info) { 495 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 496 err = -ENOMEM; 497 goto err_out_free_irq; 498 } 499 500 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 501 if (err) { 502 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 503 goto err_out_free_cq_info; 504 } 505 506 new->q_size = PAGE_SIZE + (num_descs * desc_size); 507 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, 508 GFP_KERNEL); 509 if (!new->q_base) { 510 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 511 err = -ENOMEM; 512 goto err_out_free_cq_info; 513 } 514 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 515 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 516 ionic_q_map(&new->q, q_base, q_base_pa); 517 518 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); 519 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, 520 GFP_KERNEL); 521 if (!new->cq_base) { 522 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); 523 err = -ENOMEM; 524 goto err_out_free_q; 525 } 526 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); 527 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); 528 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 529 ionic_cq_bind(&new->cq, &new->q); 530 531 if (flags & IONIC_QCQ_F_SG) { 532 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); 533 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, 534 GFP_KERNEL); 535 if (!new->sg_base) { 536 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); 537 err = -ENOMEM; 538 goto err_out_free_cq; 539 } 540 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); 541 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); 542 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 543 } 544 545 *qcq = new; 546 547 return 0; 548 549 err_out_free_cq: 550 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); 551 err_out_free_q: 552 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); 553 err_out_free_cq_info: 554 devm_kfree(dev, new->cq.info); 555 err_out_free_irq: 556 if (flags & IONIC_QCQ_F_INTR) { 557 devm_free_irq(dev, new->intr.vector, &new->napi); 558 ionic_intr_free(lif->ionic, new->intr.index); 559 } 560 err_out_free_q_info: 561 devm_kfree(dev, new->q.info); 562 err_out_free_qcq: 563 devm_kfree(dev, new); 564 err_out: 565 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 566 return err; 567 } 568 569 static int ionic_qcqs_alloc(struct ionic_lif *lif) 570 { 571 struct device *dev = lif->ionic->dev; 572 unsigned int flags; 573 int err; 574 575 flags = IONIC_QCQ_F_INTR; 576 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 577 IONIC_ADMINQ_LENGTH, 578 sizeof(struct ionic_admin_cmd), 579 sizeof(struct ionic_admin_comp), 580 0, lif->kern_pid, &lif->adminqcq); 581 if (err) 582 return err; 583 ionic_debugfs_add_qcq(lif, lif->adminqcq); 584 585 if (lif->ionic->nnqs_per_lif) { 586 flags = IONIC_QCQ_F_NOTIFYQ; 587 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 588 flags, IONIC_NOTIFYQ_LENGTH, 589 sizeof(struct ionic_notifyq_cmd), 590 sizeof(union ionic_notifyq_comp), 591 0, lif->kern_pid, &lif->notifyqcq); 592 if (err) 593 goto err_out; 594 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 595 596 /* Let the notifyq ride on the adminq interrupt */ 597 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 598 } 599 600 err = -ENOMEM; 601 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 602 sizeof(struct ionic_qcq *), GFP_KERNEL); 603 if (!lif->txqcqs) 604 goto err_out; 605 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 606 sizeof(struct ionic_qcq *), GFP_KERNEL); 607 if (!lif->rxqcqs) 608 goto err_out; 609 610 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 611 sizeof(struct ionic_tx_stats), GFP_KERNEL); 612 if (!lif->txqstats) 613 goto err_out; 614 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 615 sizeof(struct ionic_rx_stats), GFP_KERNEL); 616 if (!lif->rxqstats) 617 goto err_out; 618 619 return 0; 620 621 err_out: 622 ionic_qcqs_free(lif); 623 return err; 624 } 625 626 static void ionic_qcq_sanitize(struct ionic_qcq *qcq) 627 { 628 qcq->q.tail_idx = 0; 629 qcq->q.head_idx = 0; 630 qcq->cq.tail_idx = 0; 631 qcq->cq.done_color = 1; 632 memset(qcq->q_base, 0, qcq->q_size); 633 memset(qcq->cq_base, 0, qcq->cq_size); 634 memset(qcq->sg_base, 0, qcq->sg_size); 635 } 636 637 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 638 { 639 struct device *dev = lif->ionic->dev; 640 struct ionic_queue *q = &qcq->q; 641 struct ionic_cq *cq = &qcq->cq; 642 struct ionic_admin_ctx ctx = { 643 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 644 .cmd.q_init = { 645 .opcode = IONIC_CMD_Q_INIT, 646 .lif_index = cpu_to_le16(lif->index), 647 .type = q->type, 648 .ver = lif->qtype_info[q->type].version, 649 .index = cpu_to_le32(q->index), 650 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 651 IONIC_QINIT_F_SG), 652 .pid = cpu_to_le16(q->pid), 653 .ring_size = ilog2(q->num_descs), 654 .ring_base = cpu_to_le64(q->base_pa), 655 .cq_ring_base = cpu_to_le64(cq->base_pa), 656 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 657 }, 658 }; 659 unsigned int intr_index; 660 int err; 661 662 if (qcq->flags & IONIC_QCQ_F_INTR) 663 intr_index = qcq->intr.index; 664 else 665 intr_index = lif->rxqcqs[q->index]->intr.index; 666 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); 667 668 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 669 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 670 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 671 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 672 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 673 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 674 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 675 676 ionic_qcq_sanitize(qcq); 677 678 err = ionic_adminq_post_wait(lif, &ctx); 679 if (err) 680 return err; 681 682 q->hw_type = ctx.comp.q_init.hw_type; 683 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 684 q->dbval = IONIC_DBELL_QID(q->hw_index); 685 686 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 687 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 688 689 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 690 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi, 691 NAPI_POLL_WEIGHT); 692 693 qcq->flags |= IONIC_QCQ_F_INITED; 694 695 return 0; 696 } 697 698 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 699 { 700 struct device *dev = lif->ionic->dev; 701 struct ionic_queue *q = &qcq->q; 702 struct ionic_cq *cq = &qcq->cq; 703 struct ionic_admin_ctx ctx = { 704 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 705 .cmd.q_init = { 706 .opcode = IONIC_CMD_Q_INIT, 707 .lif_index = cpu_to_le16(lif->index), 708 .type = q->type, 709 .ver = lif->qtype_info[q->type].version, 710 .index = cpu_to_le32(q->index), 711 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 712 IONIC_QINIT_F_SG), 713 .intr_index = cpu_to_le16(cq->bound_intr->index), 714 .pid = cpu_to_le16(q->pid), 715 .ring_size = ilog2(q->num_descs), 716 .ring_base = cpu_to_le64(q->base_pa), 717 .cq_ring_base = cpu_to_le64(cq->base_pa), 718 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 719 }, 720 }; 721 int err; 722 723 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 724 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 725 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 726 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 727 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 728 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 729 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 730 731 ionic_qcq_sanitize(qcq); 732 733 err = ionic_adminq_post_wait(lif, &ctx); 734 if (err) 735 return err; 736 737 q->hw_type = ctx.comp.q_init.hw_type; 738 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 739 q->dbval = IONIC_DBELL_QID(q->hw_index); 740 741 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 742 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 743 744 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 745 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, 746 NAPI_POLL_WEIGHT); 747 else 748 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi, 749 NAPI_POLL_WEIGHT); 750 751 qcq->flags |= IONIC_QCQ_F_INITED; 752 753 return 0; 754 } 755 756 static bool ionic_notifyq_service(struct ionic_cq *cq, 757 struct ionic_cq_info *cq_info) 758 { 759 union ionic_notifyq_comp *comp = cq_info->cq_desc; 760 struct ionic_deferred_work *work; 761 struct net_device *netdev; 762 struct ionic_queue *q; 763 struct ionic_lif *lif; 764 u64 eid; 765 766 q = cq->bound_q; 767 lif = q->info[0].cb_arg; 768 netdev = lif->netdev; 769 eid = le64_to_cpu(comp->event.eid); 770 771 /* Have we run out of new completions to process? */ 772 if ((s64)(eid - lif->last_eid) <= 0) 773 return false; 774 775 lif->last_eid = eid; 776 777 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 778 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 779 comp, sizeof(*comp), true); 780 781 switch (le16_to_cpu(comp->event.ecode)) { 782 case IONIC_EVENT_LINK_CHANGE: 783 ionic_link_status_check_request(lif); 784 break; 785 case IONIC_EVENT_RESET: 786 work = kzalloc(sizeof(*work), GFP_ATOMIC); 787 if (!work) { 788 netdev_err(lif->netdev, "%s OOM\n", __func__); 789 } else { 790 work->type = IONIC_DW_TYPE_LIF_RESET; 791 ionic_lif_deferred_enqueue(&lif->deferred, work); 792 } 793 break; 794 default: 795 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", 796 comp->event.ecode, eid); 797 break; 798 } 799 800 return true; 801 } 802 803 static bool ionic_adminq_service(struct ionic_cq *cq, 804 struct ionic_cq_info *cq_info) 805 { 806 struct ionic_admin_comp *comp = cq_info->cq_desc; 807 808 if (!color_match(comp->color, cq->done_color)) 809 return false; 810 811 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 812 813 return true; 814 } 815 816 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 817 { 818 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; 819 struct ionic_lif *lif = napi_to_cq(napi)->lif; 820 struct ionic_dev *idev = &lif->ionic->idev; 821 unsigned int flags = 0; 822 int n_work = 0; 823 int a_work = 0; 824 int work_done; 825 826 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) 827 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, 828 ionic_notifyq_service, NULL, NULL); 829 830 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) 831 a_work = ionic_cq_service(&lif->adminqcq->cq, budget, 832 ionic_adminq_service, NULL, NULL); 833 834 work_done = max(n_work, a_work); 835 if (work_done < budget && napi_complete_done(napi, work_done)) { 836 flags |= IONIC_INTR_CRED_UNMASK; 837 DEBUG_STATS_INTR_REARM(intr); 838 } 839 840 if (work_done || flags) { 841 flags |= IONIC_INTR_CRED_RESET_COALESCE; 842 ionic_intr_credits(idev->intr_ctrl, 843 intr->index, 844 n_work + a_work, flags); 845 } 846 847 return work_done; 848 } 849 850 void ionic_get_stats64(struct net_device *netdev, 851 struct rtnl_link_stats64 *ns) 852 { 853 struct ionic_lif *lif = netdev_priv(netdev); 854 struct ionic_lif_stats *ls; 855 856 memset(ns, 0, sizeof(*ns)); 857 ls = &lif->info->stats; 858 859 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 860 le64_to_cpu(ls->rx_mcast_packets) + 861 le64_to_cpu(ls->rx_bcast_packets); 862 863 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 864 le64_to_cpu(ls->tx_mcast_packets) + 865 le64_to_cpu(ls->tx_bcast_packets); 866 867 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 868 le64_to_cpu(ls->rx_mcast_bytes) + 869 le64_to_cpu(ls->rx_bcast_bytes); 870 871 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 872 le64_to_cpu(ls->tx_mcast_bytes) + 873 le64_to_cpu(ls->tx_bcast_bytes); 874 875 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 876 le64_to_cpu(ls->rx_mcast_drop_packets) + 877 le64_to_cpu(ls->rx_bcast_drop_packets); 878 879 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 880 le64_to_cpu(ls->tx_mcast_drop_packets) + 881 le64_to_cpu(ls->tx_bcast_drop_packets); 882 883 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 884 885 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 886 887 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 888 le64_to_cpu(ls->rx_queue_disabled) + 889 le64_to_cpu(ls->rx_desc_fetch_error) + 890 le64_to_cpu(ls->rx_desc_data_error); 891 892 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 893 le64_to_cpu(ls->tx_queue_disabled) + 894 le64_to_cpu(ls->tx_desc_fetch_error) + 895 le64_to_cpu(ls->tx_desc_data_error); 896 897 ns->rx_errors = ns->rx_over_errors + 898 ns->rx_missed_errors; 899 900 ns->tx_errors = ns->tx_aborted_errors; 901 } 902 903 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) 904 { 905 struct ionic_admin_ctx ctx = { 906 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 907 .cmd.rx_filter_add = { 908 .opcode = IONIC_CMD_RX_FILTER_ADD, 909 .lif_index = cpu_to_le16(lif->index), 910 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 911 }, 912 }; 913 struct ionic_rx_filter *f; 914 int err; 915 916 /* don't bother if we already have it */ 917 spin_lock_bh(&lif->rx_filters.lock); 918 f = ionic_rx_filter_by_addr(lif, addr); 919 spin_unlock_bh(&lif->rx_filters.lock); 920 if (f) 921 return 0; 922 923 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); 924 925 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 926 err = ionic_adminq_post_wait(lif, &ctx); 927 if (err && err != -EEXIST) 928 return err; 929 930 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 931 } 932 933 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) 934 { 935 struct ionic_admin_ctx ctx = { 936 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 937 .cmd.rx_filter_del = { 938 .opcode = IONIC_CMD_RX_FILTER_DEL, 939 .lif_index = cpu_to_le16(lif->index), 940 }, 941 }; 942 struct ionic_rx_filter *f; 943 int err; 944 945 spin_lock_bh(&lif->rx_filters.lock); 946 f = ionic_rx_filter_by_addr(lif, addr); 947 if (!f) { 948 spin_unlock_bh(&lif->rx_filters.lock); 949 return -ENOENT; 950 } 951 952 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", 953 addr, f->filter_id); 954 955 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 956 ionic_rx_filter_free(lif, f); 957 spin_unlock_bh(&lif->rx_filters.lock); 958 959 err = ionic_adminq_post_wait(lif, &ctx); 960 if (err && err != -EEXIST) 961 return err; 962 963 return 0; 964 } 965 966 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) 967 { 968 struct ionic *ionic = lif->ionic; 969 struct ionic_deferred_work *work; 970 unsigned int nmfilters; 971 unsigned int nufilters; 972 973 if (add) { 974 /* Do we have space for this filter? We test the counters 975 * here before checking the need for deferral so that we 976 * can return an overflow error to the stack. 977 */ 978 nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters); 979 nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters); 980 981 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) 982 lif->nmcast++; 983 else if (!is_multicast_ether_addr(addr) && 984 lif->nucast < nufilters) 985 lif->nucast++; 986 else 987 return -ENOSPC; 988 } else { 989 if (is_multicast_ether_addr(addr) && lif->nmcast) 990 lif->nmcast--; 991 else if (!is_multicast_ether_addr(addr) && lif->nucast) 992 lif->nucast--; 993 } 994 995 if (in_interrupt()) { 996 work = kzalloc(sizeof(*work), GFP_ATOMIC); 997 if (!work) { 998 netdev_err(lif->netdev, "%s OOM\n", __func__); 999 return -ENOMEM; 1000 } 1001 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : 1002 IONIC_DW_TYPE_RX_ADDR_DEL; 1003 memcpy(work->addr, addr, ETH_ALEN); 1004 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n", 1005 add ? "add" : "del", addr); 1006 ionic_lif_deferred_enqueue(&lif->deferred, work); 1007 } else { 1008 netdev_dbg(lif->netdev, "rx_filter %s %pM\n", 1009 add ? "add" : "del", addr); 1010 if (add) 1011 return ionic_lif_addr_add(lif, addr); 1012 else 1013 return ionic_lif_addr_del(lif, addr); 1014 } 1015 1016 return 0; 1017 } 1018 1019 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 1020 { 1021 return ionic_lif_addr(netdev_priv(netdev), addr, true); 1022 } 1023 1024 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1025 { 1026 return ionic_lif_addr(netdev_priv(netdev), addr, false); 1027 } 1028 1029 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 1030 { 1031 struct ionic_admin_ctx ctx = { 1032 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1033 .cmd.rx_mode_set = { 1034 .opcode = IONIC_CMD_RX_MODE_SET, 1035 .lif_index = cpu_to_le16(lif->index), 1036 .rx_mode = cpu_to_le16(rx_mode), 1037 }, 1038 }; 1039 char buf[128]; 1040 int err; 1041 int i; 1042 #define REMAIN(__x) (sizeof(buf) - (__x)) 1043 1044 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 1045 lif->rx_mode, rx_mode); 1046 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 1047 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 1048 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 1049 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 1050 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1051 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1052 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1053 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1054 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1055 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1056 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); 1057 1058 err = ionic_adminq_post_wait(lif, &ctx); 1059 if (err) 1060 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n", 1061 rx_mode, err); 1062 else 1063 lif->rx_mode = rx_mode; 1064 } 1065 1066 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 1067 { 1068 struct ionic_deferred_work *work; 1069 1070 if (in_interrupt()) { 1071 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1072 if (!work) { 1073 netdev_err(lif->netdev, "%s OOM\n", __func__); 1074 return; 1075 } 1076 work->type = IONIC_DW_TYPE_RX_MODE; 1077 work->rx_mode = rx_mode; 1078 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1079 ionic_lif_deferred_enqueue(&lif->deferred, work); 1080 } else { 1081 ionic_lif_rx_mode(lif, rx_mode); 1082 } 1083 } 1084 1085 static void ionic_set_rx_mode(struct net_device *netdev) 1086 { 1087 struct ionic_lif *lif = netdev_priv(netdev); 1088 struct ionic_identity *ident; 1089 unsigned int nfilters; 1090 unsigned int rx_mode; 1091 1092 ident = &lif->ionic->ident; 1093 1094 rx_mode = IONIC_RX_MODE_F_UNICAST; 1095 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1096 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1097 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1098 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1099 1100 /* sync unicast addresses 1101 * next check to see if we're in an overflow state 1102 * if so, we track that we overflowed and enable NIC PROMISC 1103 * else if the overflow is set and not needed 1104 * we remove our overflow flag and check the netdev flags 1105 * to see if we can disable NIC PROMISC 1106 */ 1107 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1108 nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters); 1109 if (netdev_uc_count(netdev) + 1 > nfilters) { 1110 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1111 lif->uc_overflow = true; 1112 } else if (lif->uc_overflow) { 1113 lif->uc_overflow = false; 1114 if (!(netdev->flags & IFF_PROMISC)) 1115 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1116 } 1117 1118 /* same for multicast */ 1119 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1120 nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters); 1121 if (netdev_mc_count(netdev) > nfilters) { 1122 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1123 lif->mc_overflow = true; 1124 } else if (lif->mc_overflow) { 1125 lif->mc_overflow = false; 1126 if (!(netdev->flags & IFF_ALLMULTI)) 1127 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1128 } 1129 1130 if (lif->rx_mode != rx_mode) 1131 _ionic_lif_rx_mode(lif, rx_mode); 1132 } 1133 1134 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1135 { 1136 u64 wanted = 0; 1137 1138 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1139 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1140 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1141 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1142 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1143 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1144 if (features & NETIF_F_RXHASH) 1145 wanted |= IONIC_ETH_HW_RX_HASH; 1146 if (features & NETIF_F_RXCSUM) 1147 wanted |= IONIC_ETH_HW_RX_CSUM; 1148 if (features & NETIF_F_SG) 1149 wanted |= IONIC_ETH_HW_TX_SG; 1150 if (features & NETIF_F_HW_CSUM) 1151 wanted |= IONIC_ETH_HW_TX_CSUM; 1152 if (features & NETIF_F_TSO) 1153 wanted |= IONIC_ETH_HW_TSO; 1154 if (features & NETIF_F_TSO6) 1155 wanted |= IONIC_ETH_HW_TSO_IPV6; 1156 if (features & NETIF_F_TSO_ECN) 1157 wanted |= IONIC_ETH_HW_TSO_ECN; 1158 if (features & NETIF_F_GSO_GRE) 1159 wanted |= IONIC_ETH_HW_TSO_GRE; 1160 if (features & NETIF_F_GSO_GRE_CSUM) 1161 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1162 if (features & NETIF_F_GSO_IPXIP4) 1163 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1164 if (features & NETIF_F_GSO_IPXIP6) 1165 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1166 if (features & NETIF_F_GSO_UDP_TUNNEL) 1167 wanted |= IONIC_ETH_HW_TSO_UDP; 1168 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1169 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1170 1171 return cpu_to_le64(wanted); 1172 } 1173 1174 static int ionic_set_nic_features(struct ionic_lif *lif, 1175 netdev_features_t features) 1176 { 1177 struct device *dev = lif->ionic->dev; 1178 struct ionic_admin_ctx ctx = { 1179 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1180 .cmd.lif_setattr = { 1181 .opcode = IONIC_CMD_LIF_SETATTR, 1182 .index = cpu_to_le16(lif->index), 1183 .attr = IONIC_LIF_ATTR_FEATURES, 1184 }, 1185 }; 1186 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1187 IONIC_ETH_HW_VLAN_RX_STRIP | 1188 IONIC_ETH_HW_VLAN_RX_FILTER; 1189 u64 old_hw_features; 1190 int err; 1191 1192 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1193 err = ionic_adminq_post_wait(lif, &ctx); 1194 if (err) 1195 return err; 1196 1197 old_hw_features = lif->hw_features; 1198 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1199 ctx.comp.lif_setattr.features); 1200 1201 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1202 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1203 1204 if ((vlan_flags & features) && 1205 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1206 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1207 1208 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1209 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1210 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1211 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1212 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1213 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1214 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1215 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1216 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1217 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1218 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1219 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1220 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1221 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1222 if (lif->hw_features & IONIC_ETH_HW_TSO) 1223 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1224 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1225 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1226 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1227 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1228 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1229 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1230 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1231 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1232 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1233 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1234 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1235 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1236 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1237 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1238 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1239 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1240 1241 return 0; 1242 } 1243 1244 static int ionic_init_nic_features(struct ionic_lif *lif) 1245 { 1246 struct net_device *netdev = lif->netdev; 1247 netdev_features_t features; 1248 int err; 1249 1250 /* set up what we expect to support by default */ 1251 features = NETIF_F_HW_VLAN_CTAG_TX | 1252 NETIF_F_HW_VLAN_CTAG_RX | 1253 NETIF_F_HW_VLAN_CTAG_FILTER | 1254 NETIF_F_RXHASH | 1255 NETIF_F_SG | 1256 NETIF_F_HW_CSUM | 1257 NETIF_F_RXCSUM | 1258 NETIF_F_TSO | 1259 NETIF_F_TSO6 | 1260 NETIF_F_TSO_ECN; 1261 1262 err = ionic_set_nic_features(lif, features); 1263 if (err) 1264 return err; 1265 1266 /* tell the netdev what we actually can support */ 1267 netdev->features |= NETIF_F_HIGHDMA; 1268 1269 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1270 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1271 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1272 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1273 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1274 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1275 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1276 netdev->hw_features |= NETIF_F_RXHASH; 1277 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1278 netdev->hw_features |= NETIF_F_SG; 1279 1280 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1281 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1282 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1283 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1284 if (lif->hw_features & IONIC_ETH_HW_TSO) 1285 netdev->hw_enc_features |= NETIF_F_TSO; 1286 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1287 netdev->hw_enc_features |= NETIF_F_TSO6; 1288 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1289 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1290 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1291 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1292 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1293 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1294 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1295 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1296 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1297 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1298 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1299 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1300 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1301 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1302 1303 netdev->hw_features |= netdev->hw_enc_features; 1304 netdev->features |= netdev->hw_features; 1305 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1306 1307 netdev->priv_flags |= IFF_UNICAST_FLT | 1308 IFF_LIVE_ADDR_CHANGE; 1309 1310 return 0; 1311 } 1312 1313 static int ionic_set_features(struct net_device *netdev, 1314 netdev_features_t features) 1315 { 1316 struct ionic_lif *lif = netdev_priv(netdev); 1317 int err; 1318 1319 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1320 __func__, (u64)lif->netdev->features, (u64)features); 1321 1322 err = ionic_set_nic_features(lif, features); 1323 1324 return err; 1325 } 1326 1327 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1328 { 1329 struct sockaddr *addr = sa; 1330 u8 *mac; 1331 int err; 1332 1333 mac = (u8 *)addr->sa_data; 1334 if (ether_addr_equal(netdev->dev_addr, mac)) 1335 return 0; 1336 1337 err = eth_prepare_mac_addr_change(netdev, addr); 1338 if (err) 1339 return err; 1340 1341 if (!is_zero_ether_addr(netdev->dev_addr)) { 1342 netdev_info(netdev, "deleting mac addr %pM\n", 1343 netdev->dev_addr); 1344 ionic_addr_del(netdev, netdev->dev_addr); 1345 } 1346 1347 eth_commit_mac_addr_change(netdev, addr); 1348 netdev_info(netdev, "updating mac addr %pM\n", mac); 1349 1350 return ionic_addr_add(netdev, mac); 1351 } 1352 1353 static void ionic_stop_queues_reconfig(struct ionic_lif *lif) 1354 { 1355 /* Stop and clean the queues before reconfiguration */ 1356 mutex_lock(&lif->queue_lock); 1357 netif_device_detach(lif->netdev); 1358 ionic_stop_queues(lif); 1359 ionic_txrx_deinit(lif); 1360 } 1361 1362 static int ionic_start_queues_reconfig(struct ionic_lif *lif) 1363 { 1364 int err; 1365 1366 /* Re-init the queues after reconfiguration */ 1367 1368 /* The only way txrx_init can fail here is if communication 1369 * with FW is suddenly broken. There's not much we can do 1370 * at this point - error messages have already been printed, 1371 * so we can continue on and the user can eventually do a 1372 * DOWN and UP to try to reset and clear the issue. 1373 */ 1374 err = ionic_txrx_init(lif); 1375 mutex_unlock(&lif->queue_lock); 1376 ionic_link_status_check_request(lif); 1377 netif_device_attach(lif->netdev); 1378 1379 return err; 1380 } 1381 1382 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1383 { 1384 struct ionic_lif *lif = netdev_priv(netdev); 1385 struct ionic_admin_ctx ctx = { 1386 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1387 .cmd.lif_setattr = { 1388 .opcode = IONIC_CMD_LIF_SETATTR, 1389 .index = cpu_to_le16(lif->index), 1390 .attr = IONIC_LIF_ATTR_MTU, 1391 .mtu = cpu_to_le32(new_mtu), 1392 }, 1393 }; 1394 int err; 1395 1396 err = ionic_adminq_post_wait(lif, &ctx); 1397 if (err) 1398 return err; 1399 1400 netdev->mtu = new_mtu; 1401 /* if we're not running, nothing more to do */ 1402 if (!netif_running(netdev)) 1403 return 0; 1404 1405 ionic_stop_queues_reconfig(lif); 1406 return ionic_start_queues_reconfig(lif); 1407 } 1408 1409 static void ionic_tx_timeout_work(struct work_struct *ws) 1410 { 1411 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1412 1413 netdev_info(lif->netdev, "Tx Timeout recovery\n"); 1414 1415 /* if we were stopped before this scheduled job was launched, 1416 * don't bother the queues as they are already stopped. 1417 */ 1418 if (!netif_running(lif->netdev)) 1419 return; 1420 1421 ionic_stop_queues_reconfig(lif); 1422 ionic_start_queues_reconfig(lif); 1423 } 1424 1425 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1426 { 1427 struct ionic_lif *lif = netdev_priv(netdev); 1428 1429 schedule_work(&lif->tx_timeout_work); 1430 } 1431 1432 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1433 u16 vid) 1434 { 1435 struct ionic_lif *lif = netdev_priv(netdev); 1436 struct ionic_admin_ctx ctx = { 1437 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1438 .cmd.rx_filter_add = { 1439 .opcode = IONIC_CMD_RX_FILTER_ADD, 1440 .lif_index = cpu_to_le16(lif->index), 1441 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), 1442 .vlan.vlan = cpu_to_le16(vid), 1443 }, 1444 }; 1445 int err; 1446 1447 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); 1448 err = ionic_adminq_post_wait(lif, &ctx); 1449 if (err) 1450 return err; 1451 1452 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1453 } 1454 1455 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1456 u16 vid) 1457 { 1458 struct ionic_lif *lif = netdev_priv(netdev); 1459 struct ionic_admin_ctx ctx = { 1460 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1461 .cmd.rx_filter_del = { 1462 .opcode = IONIC_CMD_RX_FILTER_DEL, 1463 .lif_index = cpu_to_le16(lif->index), 1464 }, 1465 }; 1466 struct ionic_rx_filter *f; 1467 1468 spin_lock_bh(&lif->rx_filters.lock); 1469 1470 f = ionic_rx_filter_by_vlan(lif, vid); 1471 if (!f) { 1472 spin_unlock_bh(&lif->rx_filters.lock); 1473 return -ENOENT; 1474 } 1475 1476 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", 1477 vid, f->filter_id); 1478 1479 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1480 ionic_rx_filter_free(lif, f); 1481 spin_unlock_bh(&lif->rx_filters.lock); 1482 1483 return ionic_adminq_post_wait(lif, &ctx); 1484 } 1485 1486 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1487 const u8 *key, const u32 *indir) 1488 { 1489 struct ionic_admin_ctx ctx = { 1490 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1491 .cmd.lif_setattr = { 1492 .opcode = IONIC_CMD_LIF_SETATTR, 1493 .attr = IONIC_LIF_ATTR_RSS, 1494 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1495 }, 1496 }; 1497 unsigned int i, tbl_sz; 1498 1499 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1500 lif->rss_types = types; 1501 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1502 } 1503 1504 if (key) 1505 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1506 1507 if (indir) { 1508 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1509 for (i = 0; i < tbl_sz; i++) 1510 lif->rss_ind_tbl[i] = indir[i]; 1511 } 1512 1513 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1514 IONIC_RSS_HASH_KEY_SIZE); 1515 1516 return ionic_adminq_post_wait(lif, &ctx); 1517 } 1518 1519 static int ionic_lif_rss_init(struct ionic_lif *lif) 1520 { 1521 unsigned int tbl_sz; 1522 unsigned int i; 1523 1524 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1525 IONIC_RSS_TYPE_IPV4_TCP | 1526 IONIC_RSS_TYPE_IPV4_UDP | 1527 IONIC_RSS_TYPE_IPV6 | 1528 IONIC_RSS_TYPE_IPV6_TCP | 1529 IONIC_RSS_TYPE_IPV6_UDP; 1530 1531 /* Fill indirection table with 'default' values */ 1532 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1533 for (i = 0; i < tbl_sz; i++) 1534 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1535 1536 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1537 } 1538 1539 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1540 { 1541 int tbl_sz; 1542 1543 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1544 memset(lif->rss_ind_tbl, 0, tbl_sz); 1545 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1546 1547 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1548 } 1549 1550 static void ionic_txrx_disable(struct ionic_lif *lif) 1551 { 1552 unsigned int i; 1553 int err; 1554 1555 if (lif->txqcqs) { 1556 for (i = 0; i < lif->nxqs; i++) { 1557 err = ionic_qcq_disable(lif->txqcqs[i]); 1558 if (err == -ETIMEDOUT) 1559 break; 1560 } 1561 } 1562 1563 if (lif->rxqcqs) { 1564 for (i = 0; i < lif->nxqs; i++) { 1565 err = ionic_qcq_disable(lif->rxqcqs[i]); 1566 if (err == -ETIMEDOUT) 1567 break; 1568 } 1569 } 1570 } 1571 1572 static void ionic_txrx_deinit(struct ionic_lif *lif) 1573 { 1574 unsigned int i; 1575 1576 if (lif->txqcqs) { 1577 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { 1578 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1579 ionic_tx_flush(&lif->txqcqs[i]->cq); 1580 ionic_tx_empty(&lif->txqcqs[i]->q); 1581 } 1582 } 1583 1584 if (lif->rxqcqs) { 1585 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { 1586 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 1587 ionic_rx_flush(&lif->rxqcqs[i]->cq); 1588 ionic_rx_empty(&lif->rxqcqs[i]->q); 1589 } 1590 } 1591 lif->rx_mode = 0; 1592 } 1593 1594 static void ionic_txrx_free(struct ionic_lif *lif) 1595 { 1596 unsigned int i; 1597 1598 if (lif->txqcqs) { 1599 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { 1600 ionic_qcq_free(lif, lif->txqcqs[i]); 1601 devm_kfree(lif->ionic->dev, lif->txqcqs[i]); 1602 lif->txqcqs[i] = NULL; 1603 } 1604 } 1605 1606 if (lif->rxqcqs) { 1607 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 1608 ionic_qcq_free(lif, lif->rxqcqs[i]); 1609 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); 1610 lif->rxqcqs[i] = NULL; 1611 } 1612 } 1613 } 1614 1615 static int ionic_txrx_alloc(struct ionic_lif *lif) 1616 { 1617 unsigned int sg_desc_sz; 1618 unsigned int flags; 1619 unsigned int i; 1620 int err = 0; 1621 1622 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 1623 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 1624 sizeof(struct ionic_txq_sg_desc_v1)) 1625 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 1626 else 1627 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 1628 1629 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 1630 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 1631 flags |= IONIC_QCQ_F_INTR; 1632 for (i = 0; i < lif->nxqs; i++) { 1633 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 1634 lif->ntxq_descs, 1635 sizeof(struct ionic_txq_desc), 1636 sizeof(struct ionic_txq_comp), 1637 sg_desc_sz, 1638 lif->kern_pid, &lif->txqcqs[i]); 1639 if (err) 1640 goto err_out; 1641 1642 if (flags & IONIC_QCQ_F_INTR) 1643 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 1644 lif->txqcqs[i]->intr.index, 1645 lif->tx_coalesce_hw); 1646 1647 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 1648 } 1649 1650 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 1651 for (i = 0; i < lif->nxqs; i++) { 1652 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 1653 lif->nrxq_descs, 1654 sizeof(struct ionic_rxq_desc), 1655 sizeof(struct ionic_rxq_comp), 1656 sizeof(struct ionic_rxq_sg_desc), 1657 lif->kern_pid, &lif->rxqcqs[i]); 1658 if (err) 1659 goto err_out; 1660 1661 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 1662 lif->rxqcqs[i]->intr.index, 1663 lif->rx_coalesce_hw); 1664 1665 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 1666 ionic_link_qcq_interrupts(lif->rxqcqs[i], 1667 lif->txqcqs[i]); 1668 1669 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 1670 } 1671 1672 return 0; 1673 1674 err_out: 1675 ionic_txrx_free(lif); 1676 1677 return err; 1678 } 1679 1680 static int ionic_txrx_init(struct ionic_lif *lif) 1681 { 1682 unsigned int i; 1683 int err; 1684 1685 for (i = 0; i < lif->nxqs; i++) { 1686 err = ionic_lif_txq_init(lif, lif->txqcqs[i]); 1687 if (err) 1688 goto err_out; 1689 1690 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); 1691 if (err) { 1692 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1693 goto err_out; 1694 } 1695 } 1696 1697 if (lif->netdev->features & NETIF_F_RXHASH) 1698 ionic_lif_rss_init(lif); 1699 1700 ionic_set_rx_mode(lif->netdev); 1701 1702 return 0; 1703 1704 err_out: 1705 while (i--) { 1706 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1707 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 1708 } 1709 1710 return err; 1711 } 1712 1713 static int ionic_txrx_enable(struct ionic_lif *lif) 1714 { 1715 int i, err; 1716 1717 for (i = 0; i < lif->nxqs; i++) { 1718 ionic_rx_fill(&lif->rxqcqs[i]->q); 1719 err = ionic_qcq_enable(lif->rxqcqs[i]); 1720 if (err) 1721 goto err_out; 1722 1723 err = ionic_qcq_enable(lif->txqcqs[i]); 1724 if (err) { 1725 if (err != -ETIMEDOUT) 1726 ionic_qcq_disable(lif->rxqcqs[i]); 1727 goto err_out; 1728 } 1729 } 1730 1731 return 0; 1732 1733 err_out: 1734 while (i--) { 1735 err = ionic_qcq_disable(lif->txqcqs[i]); 1736 if (err == -ETIMEDOUT) 1737 break; 1738 err = ionic_qcq_disable(lif->rxqcqs[i]); 1739 if (err == -ETIMEDOUT) 1740 break; 1741 } 1742 1743 return err; 1744 } 1745 1746 static int ionic_start_queues(struct ionic_lif *lif) 1747 { 1748 int err; 1749 1750 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 1751 return 0; 1752 1753 err = ionic_txrx_enable(lif); 1754 if (err) { 1755 clear_bit(IONIC_LIF_F_UP, lif->state); 1756 return err; 1757 } 1758 netif_tx_wake_all_queues(lif->netdev); 1759 1760 return 0; 1761 } 1762 1763 static int ionic_open(struct net_device *netdev) 1764 { 1765 struct ionic_lif *lif = netdev_priv(netdev); 1766 int err; 1767 1768 err = ionic_txrx_alloc(lif); 1769 if (err) 1770 return err; 1771 1772 err = ionic_txrx_init(lif); 1773 if (err) 1774 goto err_out; 1775 1776 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 1777 if (err) 1778 goto err_txrx_deinit; 1779 1780 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 1781 if (err) 1782 goto err_txrx_deinit; 1783 1784 /* don't start the queues until we have link */ 1785 if (netif_carrier_ok(netdev)) { 1786 err = ionic_start_queues(lif); 1787 if (err) 1788 goto err_txrx_deinit; 1789 } 1790 1791 return 0; 1792 1793 err_txrx_deinit: 1794 ionic_txrx_deinit(lif); 1795 err_out: 1796 ionic_txrx_free(lif); 1797 return err; 1798 } 1799 1800 static void ionic_stop_queues(struct ionic_lif *lif) 1801 { 1802 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 1803 return; 1804 1805 netif_tx_disable(lif->netdev); 1806 ionic_txrx_disable(lif); 1807 } 1808 1809 static int ionic_stop(struct net_device *netdev) 1810 { 1811 struct ionic_lif *lif = netdev_priv(netdev); 1812 1813 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1814 return 0; 1815 1816 ionic_stop_queues(lif); 1817 ionic_txrx_deinit(lif); 1818 ionic_txrx_free(lif); 1819 1820 return 0; 1821 } 1822 1823 static int ionic_get_vf_config(struct net_device *netdev, 1824 int vf, struct ifla_vf_info *ivf) 1825 { 1826 struct ionic_lif *lif = netdev_priv(netdev); 1827 struct ionic *ionic = lif->ionic; 1828 int ret = 0; 1829 1830 if (!netif_device_present(netdev)) 1831 return -EBUSY; 1832 1833 down_read(&ionic->vf_op_lock); 1834 1835 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1836 ret = -EINVAL; 1837 } else { 1838 ivf->vf = vf; 1839 ivf->vlan = ionic->vfs[vf].vlanid; 1840 ivf->qos = 0; 1841 ivf->spoofchk = ionic->vfs[vf].spoofchk; 1842 ivf->linkstate = ionic->vfs[vf].linkstate; 1843 ivf->max_tx_rate = ionic->vfs[vf].maxrate; 1844 ivf->trusted = ionic->vfs[vf].trusted; 1845 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr); 1846 } 1847 1848 up_read(&ionic->vf_op_lock); 1849 return ret; 1850 } 1851 1852 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 1853 struct ifla_vf_stats *vf_stats) 1854 { 1855 struct ionic_lif *lif = netdev_priv(netdev); 1856 struct ionic *ionic = lif->ionic; 1857 struct ionic_lif_stats *vs; 1858 int ret = 0; 1859 1860 if (!netif_device_present(netdev)) 1861 return -EBUSY; 1862 1863 down_read(&ionic->vf_op_lock); 1864 1865 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1866 ret = -EINVAL; 1867 } else { 1868 memset(vf_stats, 0, sizeof(*vf_stats)); 1869 vs = &ionic->vfs[vf].stats; 1870 1871 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 1872 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 1873 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 1874 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 1875 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 1876 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 1877 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 1878 le64_to_cpu(vs->rx_mcast_drop_packets) + 1879 le64_to_cpu(vs->rx_bcast_drop_packets); 1880 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 1881 le64_to_cpu(vs->tx_mcast_drop_packets) + 1882 le64_to_cpu(vs->tx_bcast_drop_packets); 1883 } 1884 1885 up_read(&ionic->vf_op_lock); 1886 return ret; 1887 } 1888 1889 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1890 { 1891 struct ionic_lif *lif = netdev_priv(netdev); 1892 struct ionic *ionic = lif->ionic; 1893 int ret; 1894 1895 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 1896 return -EINVAL; 1897 1898 if (!netif_device_present(netdev)) 1899 return -EBUSY; 1900 1901 down_write(&ionic->vf_op_lock); 1902 1903 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1904 ret = -EINVAL; 1905 } else { 1906 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac); 1907 if (!ret) 1908 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 1909 } 1910 1911 up_write(&ionic->vf_op_lock); 1912 return ret; 1913 } 1914 1915 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1916 u8 qos, __be16 proto) 1917 { 1918 struct ionic_lif *lif = netdev_priv(netdev); 1919 struct ionic *ionic = lif->ionic; 1920 int ret; 1921 1922 /* until someday when we support qos */ 1923 if (qos) 1924 return -EINVAL; 1925 1926 if (vlan > 4095) 1927 return -EINVAL; 1928 1929 if (proto != htons(ETH_P_8021Q)) 1930 return -EPROTONOSUPPORT; 1931 1932 if (!netif_device_present(netdev)) 1933 return -EBUSY; 1934 1935 down_write(&ionic->vf_op_lock); 1936 1937 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1938 ret = -EINVAL; 1939 } else { 1940 ret = ionic_set_vf_config(ionic, vf, 1941 IONIC_VF_ATTR_VLAN, (u8 *)&vlan); 1942 if (!ret) 1943 ionic->vfs[vf].vlanid = vlan; 1944 } 1945 1946 up_write(&ionic->vf_op_lock); 1947 return ret; 1948 } 1949 1950 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 1951 int tx_min, int tx_max) 1952 { 1953 struct ionic_lif *lif = netdev_priv(netdev); 1954 struct ionic *ionic = lif->ionic; 1955 int ret; 1956 1957 /* setting the min just seems silly */ 1958 if (tx_min) 1959 return -EINVAL; 1960 1961 if (!netif_device_present(netdev)) 1962 return -EBUSY; 1963 1964 down_write(&ionic->vf_op_lock); 1965 1966 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1967 ret = -EINVAL; 1968 } else { 1969 ret = ionic_set_vf_config(ionic, vf, 1970 IONIC_VF_ATTR_RATE, (u8 *)&tx_max); 1971 if (!ret) 1972 lif->ionic->vfs[vf].maxrate = tx_max; 1973 } 1974 1975 up_write(&ionic->vf_op_lock); 1976 return ret; 1977 } 1978 1979 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 1980 { 1981 struct ionic_lif *lif = netdev_priv(netdev); 1982 struct ionic *ionic = lif->ionic; 1983 u8 data = set; /* convert to u8 for config */ 1984 int ret; 1985 1986 if (!netif_device_present(netdev)) 1987 return -EBUSY; 1988 1989 down_write(&ionic->vf_op_lock); 1990 1991 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1992 ret = -EINVAL; 1993 } else { 1994 ret = ionic_set_vf_config(ionic, vf, 1995 IONIC_VF_ATTR_SPOOFCHK, &data); 1996 if (!ret) 1997 ionic->vfs[vf].spoofchk = data; 1998 } 1999 2000 up_write(&ionic->vf_op_lock); 2001 return ret; 2002 } 2003 2004 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 2005 { 2006 struct ionic_lif *lif = netdev_priv(netdev); 2007 struct ionic *ionic = lif->ionic; 2008 u8 data = set; /* convert to u8 for config */ 2009 int ret; 2010 2011 if (!netif_device_present(netdev)) 2012 return -EBUSY; 2013 2014 down_write(&ionic->vf_op_lock); 2015 2016 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2017 ret = -EINVAL; 2018 } else { 2019 ret = ionic_set_vf_config(ionic, vf, 2020 IONIC_VF_ATTR_TRUST, &data); 2021 if (!ret) 2022 ionic->vfs[vf].trusted = data; 2023 } 2024 2025 up_write(&ionic->vf_op_lock); 2026 return ret; 2027 } 2028 2029 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 2030 { 2031 struct ionic_lif *lif = netdev_priv(netdev); 2032 struct ionic *ionic = lif->ionic; 2033 u8 data; 2034 int ret; 2035 2036 switch (set) { 2037 case IFLA_VF_LINK_STATE_ENABLE: 2038 data = IONIC_VF_LINK_STATUS_UP; 2039 break; 2040 case IFLA_VF_LINK_STATE_DISABLE: 2041 data = IONIC_VF_LINK_STATUS_DOWN; 2042 break; 2043 case IFLA_VF_LINK_STATE_AUTO: 2044 data = IONIC_VF_LINK_STATUS_AUTO; 2045 break; 2046 default: 2047 return -EINVAL; 2048 } 2049 2050 if (!netif_device_present(netdev)) 2051 return -EBUSY; 2052 2053 down_write(&ionic->vf_op_lock); 2054 2055 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2056 ret = -EINVAL; 2057 } else { 2058 ret = ionic_set_vf_config(ionic, vf, 2059 IONIC_VF_ATTR_LINKSTATE, &data); 2060 if (!ret) 2061 ionic->vfs[vf].linkstate = set; 2062 } 2063 2064 up_write(&ionic->vf_op_lock); 2065 return ret; 2066 } 2067 2068 static const struct net_device_ops ionic_netdev_ops = { 2069 .ndo_open = ionic_open, 2070 .ndo_stop = ionic_stop, 2071 .ndo_start_xmit = ionic_start_xmit, 2072 .ndo_get_stats64 = ionic_get_stats64, 2073 .ndo_set_rx_mode = ionic_set_rx_mode, 2074 .ndo_set_features = ionic_set_features, 2075 .ndo_set_mac_address = ionic_set_mac_address, 2076 .ndo_validate_addr = eth_validate_addr, 2077 .ndo_tx_timeout = ionic_tx_timeout, 2078 .ndo_change_mtu = ionic_change_mtu, 2079 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 2080 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 2081 .ndo_set_vf_vlan = ionic_set_vf_vlan, 2082 .ndo_set_vf_trust = ionic_set_vf_trust, 2083 .ndo_set_vf_mac = ionic_set_vf_mac, 2084 .ndo_set_vf_rate = ionic_set_vf_rate, 2085 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 2086 .ndo_get_vf_config = ionic_get_vf_config, 2087 .ndo_set_vf_link_state = ionic_set_vf_link_state, 2088 .ndo_get_vf_stats = ionic_get_vf_stats, 2089 }; 2090 2091 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) 2092 { 2093 /* only swapping the queues, not the napi, flags, or other stuff */ 2094 swap(a->q.num_descs, b->q.num_descs); 2095 swap(a->q.base, b->q.base); 2096 swap(a->q.base_pa, b->q.base_pa); 2097 swap(a->q.info, b->q.info); 2098 swap(a->q_base, b->q_base); 2099 swap(a->q_base_pa, b->q_base_pa); 2100 swap(a->q_size, b->q_size); 2101 2102 swap(a->q.sg_base, b->q.sg_base); 2103 swap(a->q.sg_base_pa, b->q.sg_base_pa); 2104 swap(a->sg_base, b->sg_base); 2105 swap(a->sg_base_pa, b->sg_base_pa); 2106 swap(a->sg_size, b->sg_size); 2107 2108 swap(a->cq.num_descs, b->cq.num_descs); 2109 swap(a->cq.base, b->cq.base); 2110 swap(a->cq.base_pa, b->cq.base_pa); 2111 swap(a->cq.info, b->cq.info); 2112 swap(a->cq_base, b->cq_base); 2113 swap(a->cq_base_pa, b->cq_base_pa); 2114 swap(a->cq_size, b->cq_size); 2115 } 2116 2117 int ionic_reconfigure_queues(struct ionic_lif *lif, 2118 struct ionic_queue_params *qparam) 2119 { 2120 struct ionic_qcq **tx_qcqs = NULL; 2121 struct ionic_qcq **rx_qcqs = NULL; 2122 unsigned int sg_desc_sz; 2123 unsigned int flags; 2124 int err = -ENOMEM; 2125 unsigned int i; 2126 2127 /* allocate temporary qcq arrays to hold new queue structs */ 2128 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { 2129 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, 2130 sizeof(struct ionic_qcq *), GFP_KERNEL); 2131 if (!tx_qcqs) 2132 goto err_out; 2133 } 2134 if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) { 2135 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, 2136 sizeof(struct ionic_qcq *), GFP_KERNEL); 2137 if (!rx_qcqs) 2138 goto err_out; 2139 } 2140 2141 /* allocate new desc_info and rings, but leave the interrupt setup 2142 * until later so as to not mess with the still-running queues 2143 */ 2144 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2145 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2146 sizeof(struct ionic_txq_sg_desc_v1)) 2147 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2148 else 2149 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2150 2151 if (tx_qcqs) { 2152 for (i = 0; i < qparam->nxqs; i++) { 2153 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2154 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2155 qparam->ntxq_descs, 2156 sizeof(struct ionic_txq_desc), 2157 sizeof(struct ionic_txq_comp), 2158 sg_desc_sz, 2159 lif->kern_pid, &tx_qcqs[i]); 2160 if (err) 2161 goto err_out; 2162 } 2163 } 2164 2165 if (rx_qcqs) { 2166 for (i = 0; i < qparam->nxqs; i++) { 2167 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2168 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2169 qparam->nrxq_descs, 2170 sizeof(struct ionic_rxq_desc), 2171 sizeof(struct ionic_rxq_comp), 2172 sizeof(struct ionic_rxq_sg_desc), 2173 lif->kern_pid, &rx_qcqs[i]); 2174 if (err) 2175 goto err_out; 2176 } 2177 } 2178 2179 /* stop and clean the queues */ 2180 ionic_stop_queues_reconfig(lif); 2181 2182 if (qparam->nxqs != lif->nxqs) { 2183 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); 2184 if (err) 2185 goto err_out_reinit_unlock; 2186 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); 2187 if (err) { 2188 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); 2189 goto err_out_reinit_unlock; 2190 } 2191 } 2192 2193 /* swap new desc_info and rings, keeping existing interrupt config */ 2194 if (tx_qcqs) { 2195 lif->ntxq_descs = qparam->ntxq_descs; 2196 for (i = 0; i < qparam->nxqs; i++) 2197 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); 2198 } 2199 2200 if (rx_qcqs) { 2201 lif->nrxq_descs = qparam->nrxq_descs; 2202 for (i = 0; i < qparam->nxqs; i++) 2203 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); 2204 } 2205 2206 /* if we need to change the interrupt layout, this is the time */ 2207 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || 2208 qparam->nxqs != lif->nxqs) { 2209 if (qparam->intr_split) { 2210 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2211 } else { 2212 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2213 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2214 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2215 } 2216 2217 /* clear existing interrupt assignments */ 2218 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { 2219 ionic_qcq_intr_free(lif, lif->txqcqs[i]); 2220 ionic_qcq_intr_free(lif, lif->rxqcqs[i]); 2221 } 2222 2223 /* re-assign the interrupts */ 2224 for (i = 0; i < qparam->nxqs; i++) { 2225 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2226 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); 2227 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2228 lif->rxqcqs[i]->intr.index, 2229 lif->rx_coalesce_hw); 2230 2231 if (qparam->intr_split) { 2232 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2233 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); 2234 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2235 lif->txqcqs[i]->intr.index, 2236 lif->tx_coalesce_hw); 2237 } else { 2238 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2239 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); 2240 } 2241 } 2242 } 2243 2244 swap(lif->nxqs, qparam->nxqs); 2245 2246 err_out_reinit_unlock: 2247 /* re-init the queues, but don't loose an error code */ 2248 if (err) 2249 ionic_start_queues_reconfig(lif); 2250 else 2251 err = ionic_start_queues_reconfig(lif); 2252 2253 err_out: 2254 /* free old allocs without cleaning intr */ 2255 for (i = 0; i < qparam->nxqs; i++) { 2256 if (tx_qcqs && tx_qcqs[i]) { 2257 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2258 ionic_qcq_free(lif, tx_qcqs[i]); 2259 devm_kfree(lif->ionic->dev, tx_qcqs[i]); 2260 tx_qcqs[i] = NULL; 2261 } 2262 if (rx_qcqs && rx_qcqs[i]) { 2263 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2264 ionic_qcq_free(lif, rx_qcqs[i]); 2265 devm_kfree(lif->ionic->dev, rx_qcqs[i]); 2266 rx_qcqs[i] = NULL; 2267 } 2268 } 2269 2270 /* free q array */ 2271 if (rx_qcqs) { 2272 devm_kfree(lif->ionic->dev, rx_qcqs); 2273 rx_qcqs = NULL; 2274 } 2275 if (tx_qcqs) { 2276 devm_kfree(lif->ionic->dev, tx_qcqs); 2277 tx_qcqs = NULL; 2278 } 2279 2280 /* clean the unused dma and info allocations when new set is smaller 2281 * than the full array, but leave the qcq shells in place 2282 */ 2283 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { 2284 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2285 ionic_qcq_free(lif, lif->txqcqs[i]); 2286 2287 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2288 ionic_qcq_free(lif, lif->rxqcqs[i]); 2289 } 2290 2291 return err; 2292 } 2293 2294 int ionic_lif_alloc(struct ionic *ionic) 2295 { 2296 struct device *dev = ionic->dev; 2297 union ionic_lif_identity *lid; 2298 struct net_device *netdev; 2299 struct ionic_lif *lif; 2300 int tbl_sz; 2301 int err; 2302 2303 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 2304 if (!lid) 2305 return -ENOMEM; 2306 2307 netdev = alloc_etherdev_mqs(sizeof(*lif), 2308 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 2309 if (!netdev) { 2310 dev_err(dev, "Cannot allocate netdev, aborting\n"); 2311 err = -ENOMEM; 2312 goto err_out_free_lid; 2313 } 2314 2315 SET_NETDEV_DEV(netdev, dev); 2316 2317 lif = netdev_priv(netdev); 2318 lif->netdev = netdev; 2319 ionic->lif = lif; 2320 netdev->netdev_ops = &ionic_netdev_ops; 2321 ionic_ethtool_set_ops(netdev); 2322 2323 netdev->watchdog_timeo = 2 * HZ; 2324 netif_carrier_off(netdev); 2325 2326 lif->identity = lid; 2327 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 2328 ionic_lif_identify(ionic, lif->lif_type, lif->identity); 2329 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, 2330 le32_to_cpu(lif->identity->eth.min_frame_size)); 2331 lif->netdev->max_mtu = 2332 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; 2333 2334 lif->neqs = ionic->neqs_per_lif; 2335 lif->nxqs = ionic->ntxqs_per_lif; 2336 2337 lif->ionic = ionic; 2338 lif->index = 0; 2339 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 2340 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 2341 lif->tx_budget = IONIC_TX_BUDGET_DEFAULT; 2342 2343 /* Convert the default coalesce value to actual hw resolution */ 2344 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 2345 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 2346 lif->rx_coalesce_usecs); 2347 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2348 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2349 2350 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); 2351 2352 spin_lock_init(&lif->adminq_lock); 2353 2354 spin_lock_init(&lif->deferred.lock); 2355 INIT_LIST_HEAD(&lif->deferred.list); 2356 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 2357 2358 /* allocate lif info */ 2359 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 2360 lif->info = dma_alloc_coherent(dev, lif->info_sz, 2361 &lif->info_pa, GFP_KERNEL); 2362 if (!lif->info) { 2363 dev_err(dev, "Failed to allocate lif info, aborting\n"); 2364 err = -ENOMEM; 2365 goto err_out_free_netdev; 2366 } 2367 2368 ionic_debugfs_add_lif(lif); 2369 2370 /* allocate control queues and txrx queue arrays */ 2371 ionic_lif_queue_identify(lif); 2372 err = ionic_qcqs_alloc(lif); 2373 if (err) 2374 goto err_out_free_lif_info; 2375 2376 /* allocate rss indirection table */ 2377 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 2378 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 2379 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 2380 &lif->rss_ind_tbl_pa, 2381 GFP_KERNEL); 2382 2383 if (!lif->rss_ind_tbl) { 2384 err = -ENOMEM; 2385 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 2386 goto err_out_free_qcqs; 2387 } 2388 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 2389 2390 return 0; 2391 2392 err_out_free_qcqs: 2393 ionic_qcqs_free(lif); 2394 err_out_free_lif_info: 2395 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2396 lif->info = NULL; 2397 lif->info_pa = 0; 2398 err_out_free_netdev: 2399 free_netdev(lif->netdev); 2400 lif = NULL; 2401 err_out_free_lid: 2402 kfree(lid); 2403 2404 return err; 2405 } 2406 2407 static void ionic_lif_reset(struct ionic_lif *lif) 2408 { 2409 struct ionic_dev *idev = &lif->ionic->idev; 2410 2411 mutex_lock(&lif->ionic->dev_cmd_lock); 2412 ionic_dev_cmd_lif_reset(idev, lif->index); 2413 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2414 mutex_unlock(&lif->ionic->dev_cmd_lock); 2415 } 2416 2417 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 2418 { 2419 struct ionic *ionic = lif->ionic; 2420 2421 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2422 return; 2423 2424 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 2425 2426 netif_device_detach(lif->netdev); 2427 2428 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 2429 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 2430 mutex_lock(&lif->queue_lock); 2431 ionic_stop_queues(lif); 2432 mutex_unlock(&lif->queue_lock); 2433 } 2434 2435 if (netif_running(lif->netdev)) { 2436 ionic_txrx_deinit(lif); 2437 ionic_txrx_free(lif); 2438 } 2439 ionic_lif_deinit(lif); 2440 ionic_reset(ionic); 2441 ionic_qcqs_free(lif); 2442 2443 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 2444 } 2445 2446 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 2447 { 2448 struct ionic *ionic = lif->ionic; 2449 int err; 2450 2451 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2452 return; 2453 2454 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 2455 2456 ionic_init_devinfo(ionic); 2457 ionic_port_init(ionic); 2458 err = ionic_qcqs_alloc(lif); 2459 if (err) 2460 goto err_out; 2461 2462 err = ionic_lif_init(lif); 2463 if (err) 2464 goto err_qcqs_free; 2465 2466 if (lif->registered) 2467 ionic_lif_set_netdev_info(lif); 2468 2469 ionic_rx_filter_replay(lif); 2470 2471 if (netif_running(lif->netdev)) { 2472 err = ionic_txrx_alloc(lif); 2473 if (err) 2474 goto err_lifs_deinit; 2475 2476 err = ionic_txrx_init(lif); 2477 if (err) 2478 goto err_txrx_free; 2479 } 2480 2481 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 2482 ionic_link_status_check_request(lif); 2483 netif_device_attach(lif->netdev); 2484 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 2485 2486 return; 2487 2488 err_txrx_free: 2489 ionic_txrx_free(lif); 2490 err_lifs_deinit: 2491 ionic_lif_deinit(lif); 2492 err_qcqs_free: 2493 ionic_qcqs_free(lif); 2494 err_out: 2495 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 2496 } 2497 2498 void ionic_lif_free(struct ionic_lif *lif) 2499 { 2500 struct device *dev = lif->ionic->dev; 2501 2502 /* free rss indirection table */ 2503 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 2504 lif->rss_ind_tbl_pa); 2505 lif->rss_ind_tbl = NULL; 2506 lif->rss_ind_tbl_pa = 0; 2507 2508 /* free queues */ 2509 ionic_qcqs_free(lif); 2510 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2511 ionic_lif_reset(lif); 2512 2513 /* free lif info */ 2514 kfree(lif->identity); 2515 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2516 lif->info = NULL; 2517 lif->info_pa = 0; 2518 2519 /* unmap doorbell page */ 2520 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2521 lif->kern_dbpage = NULL; 2522 kfree(lif->dbid_inuse); 2523 lif->dbid_inuse = NULL; 2524 2525 /* free netdev & lif */ 2526 ionic_debugfs_del_lif(lif); 2527 free_netdev(lif->netdev); 2528 } 2529 2530 void ionic_lif_deinit(struct ionic_lif *lif) 2531 { 2532 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 2533 return; 2534 2535 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 2536 cancel_work_sync(&lif->deferred.work); 2537 cancel_work_sync(&lif->tx_timeout_work); 2538 ionic_rx_filters_deinit(lif); 2539 if (lif->netdev->features & NETIF_F_RXHASH) 2540 ionic_lif_rss_deinit(lif); 2541 } 2542 2543 napi_disable(&lif->adminqcq->napi); 2544 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2545 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2546 2547 mutex_destroy(&lif->queue_lock); 2548 ionic_lif_reset(lif); 2549 } 2550 2551 static int ionic_lif_adminq_init(struct ionic_lif *lif) 2552 { 2553 struct device *dev = lif->ionic->dev; 2554 struct ionic_q_init_comp comp; 2555 struct ionic_dev *idev; 2556 struct ionic_qcq *qcq; 2557 struct ionic_queue *q; 2558 int err; 2559 2560 idev = &lif->ionic->idev; 2561 qcq = lif->adminqcq; 2562 q = &qcq->q; 2563 2564 mutex_lock(&lif->ionic->dev_cmd_lock); 2565 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 2566 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2567 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 2568 mutex_unlock(&lif->ionic->dev_cmd_lock); 2569 if (err) { 2570 netdev_err(lif->netdev, "adminq init failed %d\n", err); 2571 return err; 2572 } 2573 2574 q->hw_type = comp.hw_type; 2575 q->hw_index = le32_to_cpu(comp.hw_index); 2576 q->dbval = IONIC_DBELL_QID(q->hw_index); 2577 2578 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 2579 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 2580 2581 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, 2582 NAPI_POLL_WEIGHT); 2583 2584 napi_enable(&qcq->napi); 2585 2586 if (qcq->flags & IONIC_QCQ_F_INTR) 2587 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 2588 IONIC_INTR_MASK_CLEAR); 2589 2590 qcq->flags |= IONIC_QCQ_F_INITED; 2591 2592 return 0; 2593 } 2594 2595 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 2596 { 2597 struct ionic_qcq *qcq = lif->notifyqcq; 2598 struct device *dev = lif->ionic->dev; 2599 struct ionic_queue *q = &qcq->q; 2600 int err; 2601 2602 struct ionic_admin_ctx ctx = { 2603 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2604 .cmd.q_init = { 2605 .opcode = IONIC_CMD_Q_INIT, 2606 .lif_index = cpu_to_le16(lif->index), 2607 .type = q->type, 2608 .ver = lif->qtype_info[q->type].version, 2609 .index = cpu_to_le32(q->index), 2610 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 2611 IONIC_QINIT_F_ENA), 2612 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 2613 .pid = cpu_to_le16(q->pid), 2614 .ring_size = ilog2(q->num_descs), 2615 .ring_base = cpu_to_le64(q->base_pa), 2616 } 2617 }; 2618 2619 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 2620 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 2621 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 2622 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 2623 2624 err = ionic_adminq_post_wait(lif, &ctx); 2625 if (err) 2626 return err; 2627 2628 lif->last_eid = 0; 2629 q->hw_type = ctx.comp.q_init.hw_type; 2630 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 2631 q->dbval = IONIC_DBELL_QID(q->hw_index); 2632 2633 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 2634 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 2635 2636 /* preset the callback info */ 2637 q->info[0].cb_arg = lif; 2638 2639 qcq->flags |= IONIC_QCQ_F_INITED; 2640 2641 return 0; 2642 } 2643 2644 static int ionic_station_set(struct ionic_lif *lif) 2645 { 2646 struct net_device *netdev = lif->netdev; 2647 struct ionic_admin_ctx ctx = { 2648 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2649 .cmd.lif_getattr = { 2650 .opcode = IONIC_CMD_LIF_GETATTR, 2651 .index = cpu_to_le16(lif->index), 2652 .attr = IONIC_LIF_ATTR_MAC, 2653 }, 2654 }; 2655 struct sockaddr addr; 2656 int err; 2657 2658 err = ionic_adminq_post_wait(lif, &ctx); 2659 if (err) 2660 return err; 2661 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 2662 ctx.comp.lif_getattr.mac); 2663 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac)) 2664 return 0; 2665 2666 if (!is_zero_ether_addr(netdev->dev_addr)) { 2667 /* If the netdev mac is non-zero and doesn't match the default 2668 * device address, it was set by something earlier and we're 2669 * likely here again after a fw-upgrade reset. We need to be 2670 * sure the netdev mac is in our filter list. 2671 */ 2672 if (!ether_addr_equal(ctx.comp.lif_getattr.mac, 2673 netdev->dev_addr)) 2674 ionic_lif_addr(lif, netdev->dev_addr, true); 2675 } else { 2676 /* Update the netdev mac with the device's mac */ 2677 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); 2678 addr.sa_family = AF_INET; 2679 err = eth_prepare_mac_addr_change(netdev, &addr); 2680 if (err) { 2681 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 2682 addr.sa_data, err); 2683 return 0; 2684 } 2685 2686 eth_commit_mac_addr_change(netdev, &addr); 2687 } 2688 2689 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 2690 netdev->dev_addr); 2691 ionic_lif_addr(lif, netdev->dev_addr, true); 2692 2693 return 0; 2694 } 2695 2696 int ionic_lif_init(struct ionic_lif *lif) 2697 { 2698 struct ionic_dev *idev = &lif->ionic->idev; 2699 struct device *dev = lif->ionic->dev; 2700 struct ionic_lif_init_comp comp; 2701 int dbpage_num; 2702 int err; 2703 2704 mutex_lock(&lif->ionic->dev_cmd_lock); 2705 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 2706 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2707 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 2708 mutex_unlock(&lif->ionic->dev_cmd_lock); 2709 if (err) 2710 return err; 2711 2712 lif->hw_index = le16_to_cpu(comp.hw_index); 2713 mutex_init(&lif->queue_lock); 2714 2715 /* now that we have the hw_index we can figure out our doorbell page */ 2716 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 2717 if (!lif->dbid_count) { 2718 dev_err(dev, "No doorbell pages, aborting\n"); 2719 return -EINVAL; 2720 } 2721 2722 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); 2723 if (!lif->dbid_inuse) { 2724 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); 2725 return -ENOMEM; 2726 } 2727 2728 /* first doorbell id reserved for kernel (dbid aka pid == zero) */ 2729 set_bit(0, lif->dbid_inuse); 2730 lif->kern_pid = 0; 2731 2732 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 2733 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 2734 if (!lif->kern_dbpage) { 2735 dev_err(dev, "Cannot map dbpage, aborting\n"); 2736 err = -ENOMEM; 2737 goto err_out_free_dbid; 2738 } 2739 2740 err = ionic_lif_adminq_init(lif); 2741 if (err) 2742 goto err_out_adminq_deinit; 2743 2744 if (lif->ionic->nnqs_per_lif) { 2745 err = ionic_lif_notifyq_init(lif); 2746 if (err) 2747 goto err_out_notifyq_deinit; 2748 } 2749 2750 err = ionic_init_nic_features(lif); 2751 if (err) 2752 goto err_out_notifyq_deinit; 2753 2754 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 2755 err = ionic_rx_filters_init(lif); 2756 if (err) 2757 goto err_out_notifyq_deinit; 2758 } 2759 2760 err = ionic_station_set(lif); 2761 if (err) 2762 goto err_out_notifyq_deinit; 2763 2764 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 2765 2766 set_bit(IONIC_LIF_F_INITED, lif->state); 2767 2768 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 2769 2770 return 0; 2771 2772 err_out_notifyq_deinit: 2773 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2774 err_out_adminq_deinit: 2775 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2776 ionic_lif_reset(lif); 2777 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2778 lif->kern_dbpage = NULL; 2779 err_out_free_dbid: 2780 kfree(lif->dbid_inuse); 2781 lif->dbid_inuse = NULL; 2782 2783 return err; 2784 } 2785 2786 static void ionic_lif_notify_work(struct work_struct *ws) 2787 { 2788 } 2789 2790 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 2791 { 2792 struct ionic_admin_ctx ctx = { 2793 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2794 .cmd.lif_setattr = { 2795 .opcode = IONIC_CMD_LIF_SETATTR, 2796 .index = cpu_to_le16(lif->index), 2797 .attr = IONIC_LIF_ATTR_NAME, 2798 }, 2799 }; 2800 2801 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 2802 sizeof(ctx.cmd.lif_setattr.name)); 2803 2804 ionic_adminq_post_wait(lif, &ctx); 2805 } 2806 2807 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 2808 { 2809 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 2810 return NULL; 2811 2812 return netdev_priv(netdev); 2813 } 2814 2815 static int ionic_lif_notify(struct notifier_block *nb, 2816 unsigned long event, void *info) 2817 { 2818 struct net_device *ndev = netdev_notifier_info_to_dev(info); 2819 struct ionic *ionic = container_of(nb, struct ionic, nb); 2820 struct ionic_lif *lif = ionic_netdev_lif(ndev); 2821 2822 if (!lif || lif->ionic != ionic) 2823 return NOTIFY_DONE; 2824 2825 switch (event) { 2826 case NETDEV_CHANGENAME: 2827 ionic_lif_set_netdev_info(lif); 2828 break; 2829 } 2830 2831 return NOTIFY_DONE; 2832 } 2833 2834 int ionic_lif_register(struct ionic_lif *lif) 2835 { 2836 int err; 2837 2838 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); 2839 2840 lif->ionic->nb.notifier_call = ionic_lif_notify; 2841 2842 err = register_netdevice_notifier(&lif->ionic->nb); 2843 if (err) 2844 lif->ionic->nb.notifier_call = NULL; 2845 2846 /* only register LIF0 for now */ 2847 err = register_netdev(lif->netdev); 2848 if (err) { 2849 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); 2850 return err; 2851 } 2852 lif->registered = true; 2853 ionic_lif_set_netdev_info(lif); 2854 2855 return 0; 2856 } 2857 2858 void ionic_lif_unregister(struct ionic_lif *lif) 2859 { 2860 if (lif->ionic->nb.notifier_call) { 2861 unregister_netdevice_notifier(&lif->ionic->nb); 2862 cancel_work_sync(&lif->ionic->nb_work); 2863 lif->ionic->nb.notifier_call = NULL; 2864 } 2865 2866 if (lif->netdev->reg_state == NETREG_REGISTERED) 2867 unregister_netdev(lif->netdev); 2868 lif->registered = false; 2869 } 2870 2871 static void ionic_lif_queue_identify(struct ionic_lif *lif) 2872 { 2873 struct ionic *ionic = lif->ionic; 2874 union ionic_q_identity *q_ident; 2875 struct ionic_dev *idev; 2876 int qtype; 2877 int err; 2878 2879 idev = &lif->ionic->idev; 2880 q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data; 2881 2882 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 2883 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 2884 2885 /* filter out the ones we know about */ 2886 switch (qtype) { 2887 case IONIC_QTYPE_ADMINQ: 2888 case IONIC_QTYPE_NOTIFYQ: 2889 case IONIC_QTYPE_RXQ: 2890 case IONIC_QTYPE_TXQ: 2891 break; 2892 default: 2893 continue; 2894 } 2895 2896 memset(qti, 0, sizeof(*qti)); 2897 2898 mutex_lock(&ionic->dev_cmd_lock); 2899 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 2900 ionic_qtype_versions[qtype]); 2901 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 2902 if (!err) { 2903 qti->version = q_ident->version; 2904 qti->supported = q_ident->supported; 2905 qti->features = le64_to_cpu(q_ident->features); 2906 qti->desc_sz = le16_to_cpu(q_ident->desc_sz); 2907 qti->comp_sz = le16_to_cpu(q_ident->comp_sz); 2908 qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz); 2909 qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems); 2910 qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride); 2911 } 2912 mutex_unlock(&ionic->dev_cmd_lock); 2913 2914 if (err == -EINVAL) { 2915 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 2916 continue; 2917 } else if (err == -EIO) { 2918 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 2919 return; 2920 } else if (err) { 2921 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 2922 qtype, err); 2923 return; 2924 } 2925 2926 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 2927 qtype, qti->version); 2928 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 2929 qtype, qti->supported); 2930 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 2931 qtype, qti->features); 2932 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 2933 qtype, qti->desc_sz); 2934 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 2935 qtype, qti->comp_sz); 2936 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 2937 qtype, qti->sg_desc_sz); 2938 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 2939 qtype, qti->max_sg_elems); 2940 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 2941 qtype, qti->sg_desc_stride); 2942 } 2943 } 2944 2945 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 2946 union ionic_lif_identity *lid) 2947 { 2948 struct ionic_dev *idev = &ionic->idev; 2949 size_t sz; 2950 int err; 2951 2952 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 2953 2954 mutex_lock(&ionic->dev_cmd_lock); 2955 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 2956 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 2957 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 2958 mutex_unlock(&ionic->dev_cmd_lock); 2959 if (err) 2960 return (err); 2961 2962 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 2963 le64_to_cpu(lid->capabilities)); 2964 2965 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 2966 le32_to_cpu(lid->eth.max_ucast_filters)); 2967 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 2968 le32_to_cpu(lid->eth.max_mcast_filters)); 2969 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 2970 le64_to_cpu(lid->eth.config.features)); 2971 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 2972 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 2973 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 2974 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 2975 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 2976 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 2977 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 2978 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 2979 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 2980 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 2981 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 2982 le32_to_cpu(lid->eth.config.mtu)); 2983 2984 return 0; 2985 } 2986 2987 int ionic_lif_size(struct ionic *ionic) 2988 { 2989 struct ionic_identity *ident = &ionic->ident; 2990 unsigned int nintrs, dev_nintrs; 2991 union ionic_lif_config *lc; 2992 unsigned int ntxqs_per_lif; 2993 unsigned int nrxqs_per_lif; 2994 unsigned int neqs_per_lif; 2995 unsigned int nnqs_per_lif; 2996 unsigned int nxqs, neqs; 2997 unsigned int min_intrs; 2998 int err; 2999 3000 lc = &ident->lif.eth.config; 3001 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 3002 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 3003 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 3004 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 3005 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 3006 3007 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 3008 nxqs = min(nxqs, num_online_cpus()); 3009 neqs = min(neqs_per_lif, num_online_cpus()); 3010 3011 try_again: 3012 /* interrupt usage: 3013 * 1 for master lif adminq/notifyq 3014 * 1 for each CPU for master lif TxRx queue pairs 3015 * whatever's left is for RDMA queues 3016 */ 3017 nintrs = 1 + nxqs + neqs; 3018 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 3019 3020 if (nintrs > dev_nintrs) 3021 goto try_fewer; 3022 3023 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 3024 if (err < 0 && err != -ENOSPC) { 3025 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 3026 return err; 3027 } 3028 if (err == -ENOSPC) 3029 goto try_fewer; 3030 3031 if (err != nintrs) { 3032 ionic_bus_free_irq_vectors(ionic); 3033 goto try_fewer; 3034 } 3035 3036 ionic->nnqs_per_lif = nnqs_per_lif; 3037 ionic->neqs_per_lif = neqs; 3038 ionic->ntxqs_per_lif = nxqs; 3039 ionic->nrxqs_per_lif = nxqs; 3040 ionic->nintrs = nintrs; 3041 3042 ionic_debugfs_add_sizes(ionic); 3043 3044 return 0; 3045 3046 try_fewer: 3047 if (nnqs_per_lif > 1) { 3048 nnqs_per_lif >>= 1; 3049 goto try_again; 3050 } 3051 if (neqs > 1) { 3052 neqs >>= 1; 3053 goto try_again; 3054 } 3055 if (nxqs > 1) { 3056 nxqs >>= 1; 3057 goto try_again; 3058 } 3059 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 3060 return -ENOSPC; 3061 } 3062