1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/netdevice.h> 5 #include <linux/etherdevice.h> 6 #include <linux/rtnetlink.h> 7 #include <linux/interrupt.h> 8 #include <linux/pci.h> 9 #include <linux/cpumask.h> 10 11 #include "ionic.h" 12 #include "ionic_bus.h" 13 #include "ionic_lif.h" 14 #include "ionic_txrx.h" 15 #include "ionic_ethtool.h" 16 #include "ionic_debugfs.h" 17 18 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode); 19 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); 20 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); 21 static void ionic_link_status_check(struct ionic_lif *lif); 22 23 static void ionic_lif_deferred_work(struct work_struct *work) 24 { 25 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 26 struct ionic_deferred *def = &lif->deferred; 27 struct ionic_deferred_work *w = NULL; 28 29 spin_lock_bh(&def->lock); 30 if (!list_empty(&def->list)) { 31 w = list_first_entry(&def->list, 32 struct ionic_deferred_work, list); 33 list_del(&w->list); 34 } 35 spin_unlock_bh(&def->lock); 36 37 if (w) { 38 switch (w->type) { 39 case IONIC_DW_TYPE_RX_MODE: 40 ionic_lif_rx_mode(lif, w->rx_mode); 41 break; 42 case IONIC_DW_TYPE_RX_ADDR_ADD: 43 ionic_lif_addr_add(lif, w->addr); 44 break; 45 case IONIC_DW_TYPE_RX_ADDR_DEL: 46 ionic_lif_addr_del(lif, w->addr); 47 break; 48 case IONIC_DW_TYPE_LINK_STATUS: 49 ionic_link_status_check(lif); 50 break; 51 default: 52 break; 53 } 54 kfree(w); 55 schedule_work(&def->work); 56 } 57 } 58 59 static void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 60 struct ionic_deferred_work *work) 61 { 62 spin_lock_bh(&def->lock); 63 list_add_tail(&work->list, &def->list); 64 spin_unlock_bh(&def->lock); 65 schedule_work(&def->work); 66 } 67 68 static void ionic_link_status_check(struct ionic_lif *lif) 69 { 70 struct net_device *netdev = lif->netdev; 71 u16 link_status; 72 bool link_up; 73 74 link_status = le16_to_cpu(lif->info->status.link_status); 75 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 76 77 /* filter out the no-change cases */ 78 if (link_up == netif_carrier_ok(netdev)) 79 goto link_out; 80 81 if (link_up) { 82 netdev_info(netdev, "Link up - %d Gbps\n", 83 le32_to_cpu(lif->info->status.link_speed) / 1000); 84 85 if (test_bit(IONIC_LIF_UP, lif->state)) { 86 netif_tx_wake_all_queues(lif->netdev); 87 netif_carrier_on(netdev); 88 } 89 } else { 90 netdev_info(netdev, "Link down\n"); 91 92 /* carrier off first to avoid watchdog timeout */ 93 netif_carrier_off(netdev); 94 if (test_bit(IONIC_LIF_UP, lif->state)) 95 netif_tx_stop_all_queues(netdev); 96 } 97 98 link_out: 99 clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state); 100 } 101 102 static void ionic_link_status_check_request(struct ionic_lif *lif) 103 { 104 struct ionic_deferred_work *work; 105 106 /* we only need one request outstanding at a time */ 107 if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state)) 108 return; 109 110 if (in_interrupt()) { 111 work = kzalloc(sizeof(*work), GFP_ATOMIC); 112 if (!work) 113 return; 114 115 work->type = IONIC_DW_TYPE_LINK_STATUS; 116 ionic_lif_deferred_enqueue(&lif->deferred, work); 117 } else { 118 ionic_link_status_check(lif); 119 } 120 } 121 122 static irqreturn_t ionic_isr(int irq, void *data) 123 { 124 struct napi_struct *napi = data; 125 126 napi_schedule_irqoff(napi); 127 128 return IRQ_HANDLED; 129 } 130 131 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 132 { 133 struct ionic_intr_info *intr = &qcq->intr; 134 struct device *dev = lif->ionic->dev; 135 struct ionic_queue *q = &qcq->q; 136 const char *name; 137 138 if (lif->registered) 139 name = lif->netdev->name; 140 else 141 name = dev_name(dev); 142 143 snprintf(intr->name, sizeof(intr->name), 144 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 145 146 return devm_request_irq(dev, intr->vector, ionic_isr, 147 0, intr->name, &qcq->napi); 148 } 149 150 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 151 { 152 struct ionic *ionic = lif->ionic; 153 int index; 154 155 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 156 if (index == ionic->nintrs) { 157 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 158 __func__, index, ionic->nintrs); 159 return -ENOSPC; 160 } 161 162 set_bit(index, ionic->intrs); 163 ionic_intr_init(&ionic->idev, intr, index); 164 165 return 0; 166 } 167 168 static void ionic_intr_free(struct ionic_lif *lif, int index) 169 { 170 if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs) 171 clear_bit(index, lif->ionic->intrs); 172 } 173 174 static int ionic_qcq_enable(struct ionic_qcq *qcq) 175 { 176 struct ionic_queue *q = &qcq->q; 177 struct ionic_lif *lif = q->lif; 178 struct ionic_dev *idev; 179 struct device *dev; 180 181 struct ionic_admin_ctx ctx = { 182 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 183 .cmd.q_control = { 184 .opcode = IONIC_CMD_Q_CONTROL, 185 .lif_index = cpu_to_le16(lif->index), 186 .type = q->type, 187 .index = cpu_to_le32(q->index), 188 .oper = IONIC_Q_ENABLE, 189 }, 190 }; 191 192 idev = &lif->ionic->idev; 193 dev = lif->ionic->dev; 194 195 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 196 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 197 198 if (qcq->flags & IONIC_QCQ_F_INTR) { 199 irq_set_affinity_hint(qcq->intr.vector, 200 &qcq->intr.affinity_mask); 201 napi_enable(&qcq->napi); 202 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 203 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 204 IONIC_INTR_MASK_CLEAR); 205 } 206 207 return ionic_adminq_post_wait(lif, &ctx); 208 } 209 210 static int ionic_qcq_disable(struct ionic_qcq *qcq) 211 { 212 struct ionic_queue *q = &qcq->q; 213 struct ionic_lif *lif = q->lif; 214 struct ionic_dev *idev; 215 struct device *dev; 216 217 struct ionic_admin_ctx ctx = { 218 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 219 .cmd.q_control = { 220 .opcode = IONIC_CMD_Q_CONTROL, 221 .lif_index = cpu_to_le16(lif->index), 222 .type = q->type, 223 .index = cpu_to_le32(q->index), 224 .oper = IONIC_Q_DISABLE, 225 }, 226 }; 227 228 idev = &lif->ionic->idev; 229 dev = lif->ionic->dev; 230 231 dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n", 232 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 233 234 if (qcq->flags & IONIC_QCQ_F_INTR) { 235 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 236 IONIC_INTR_MASK_SET); 237 synchronize_irq(qcq->intr.vector); 238 irq_set_affinity_hint(qcq->intr.vector, NULL); 239 napi_disable(&qcq->napi); 240 } 241 242 return ionic_adminq_post_wait(lif, &ctx); 243 } 244 245 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 246 { 247 struct ionic_dev *idev = &lif->ionic->idev; 248 struct device *dev = lif->ionic->dev; 249 250 if (!qcq) 251 return; 252 253 ionic_debugfs_del_qcq(qcq); 254 255 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 256 return; 257 258 if (qcq->flags & IONIC_QCQ_F_INTR) { 259 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 260 IONIC_INTR_MASK_SET); 261 devm_free_irq(dev, qcq->intr.vector, &qcq->napi); 262 netif_napi_del(&qcq->napi); 263 } 264 265 qcq->flags &= ~IONIC_QCQ_F_INITED; 266 } 267 268 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 269 { 270 struct device *dev = lif->ionic->dev; 271 272 if (!qcq) 273 return; 274 275 dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa); 276 qcq->base = NULL; 277 qcq->base_pa = 0; 278 279 if (qcq->flags & IONIC_QCQ_F_INTR) 280 ionic_intr_free(lif, qcq->intr.index); 281 282 devm_kfree(dev, qcq->cq.info); 283 qcq->cq.info = NULL; 284 devm_kfree(dev, qcq->q.info); 285 qcq->q.info = NULL; 286 devm_kfree(dev, qcq); 287 } 288 289 static void ionic_qcqs_free(struct ionic_lif *lif) 290 { 291 struct device *dev = lif->ionic->dev; 292 unsigned int i; 293 294 if (lif->notifyqcq) { 295 ionic_qcq_free(lif, lif->notifyqcq); 296 lif->notifyqcq = NULL; 297 } 298 299 if (lif->adminqcq) { 300 ionic_qcq_free(lif, lif->adminqcq); 301 lif->adminqcq = NULL; 302 } 303 304 for (i = 0; i < lif->nxqs; i++) 305 if (lif->rxqcqs[i].stats) 306 devm_kfree(dev, lif->rxqcqs[i].stats); 307 308 devm_kfree(dev, lif->rxqcqs); 309 lif->rxqcqs = NULL; 310 311 for (i = 0; i < lif->nxqs; i++) 312 if (lif->txqcqs[i].stats) 313 devm_kfree(dev, lif->txqcqs[i].stats); 314 315 devm_kfree(dev, lif->txqcqs); 316 lif->txqcqs = NULL; 317 } 318 319 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 320 struct ionic_qcq *n_qcq) 321 { 322 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { 323 ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index); 324 n_qcq->flags &= ~IONIC_QCQ_F_INTR; 325 } 326 327 n_qcq->intr.vector = src_qcq->intr.vector; 328 n_qcq->intr.index = src_qcq->intr.index; 329 } 330 331 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 332 unsigned int index, 333 const char *name, unsigned int flags, 334 unsigned int num_descs, unsigned int desc_size, 335 unsigned int cq_desc_size, 336 unsigned int sg_desc_size, 337 unsigned int pid, struct ionic_qcq **qcq) 338 { 339 struct ionic_dev *idev = &lif->ionic->idev; 340 u32 q_size, cq_size, sg_size, total_size; 341 struct device *dev = lif->ionic->dev; 342 void *q_base, *cq_base, *sg_base; 343 dma_addr_t cq_base_pa = 0; 344 dma_addr_t sg_base_pa = 0; 345 dma_addr_t q_base_pa = 0; 346 struct ionic_qcq *new; 347 int err; 348 349 *qcq = NULL; 350 351 q_size = num_descs * desc_size; 352 cq_size = num_descs * cq_desc_size; 353 sg_size = num_descs * sg_desc_size; 354 355 total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE); 356 /* Note: aligning q_size/cq_size is not enough due to cq_base 357 * address aligning as q_base could be not aligned to the page. 358 * Adding PAGE_SIZE. 359 */ 360 total_size += PAGE_SIZE; 361 if (flags & IONIC_QCQ_F_SG) { 362 total_size += ALIGN(sg_size, PAGE_SIZE); 363 total_size += PAGE_SIZE; 364 } 365 366 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 367 if (!new) { 368 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 369 err = -ENOMEM; 370 goto err_out; 371 } 372 373 new->flags = flags; 374 375 new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs, 376 GFP_KERNEL); 377 if (!new->q.info) { 378 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 379 err = -ENOMEM; 380 goto err_out; 381 } 382 383 new->q.type = type; 384 385 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 386 desc_size, sg_desc_size, pid); 387 if (err) { 388 netdev_err(lif->netdev, "Cannot initialize queue\n"); 389 goto err_out; 390 } 391 392 if (flags & IONIC_QCQ_F_INTR) { 393 err = ionic_intr_alloc(lif, &new->intr); 394 if (err) { 395 netdev_warn(lif->netdev, "no intr for %s: %d\n", 396 name, err); 397 goto err_out; 398 } 399 400 err = ionic_bus_get_irq(lif->ionic, new->intr.index); 401 if (err < 0) { 402 netdev_warn(lif->netdev, "no vector for %s: %d\n", 403 name, err); 404 goto err_out_free_intr; 405 } 406 new->intr.vector = err; 407 ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index, 408 IONIC_INTR_MASK_SET); 409 410 new->intr.cpu = new->intr.index % num_online_cpus(); 411 if (cpu_online(new->intr.cpu)) 412 cpumask_set_cpu(new->intr.cpu, 413 &new->intr.affinity_mask); 414 } else { 415 new->intr.index = INTR_INDEX_NOT_ASSIGNED; 416 } 417 418 new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs, 419 GFP_KERNEL); 420 if (!new->cq.info) { 421 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 422 err = -ENOMEM; 423 goto err_out_free_intr; 424 } 425 426 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 427 if (err) { 428 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 429 goto err_out_free_intr; 430 } 431 432 new->base = dma_alloc_coherent(dev, total_size, &new->base_pa, 433 GFP_KERNEL); 434 if (!new->base) { 435 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 436 err = -ENOMEM; 437 goto err_out_free_intr; 438 } 439 440 new->total_size = total_size; 441 442 q_base = new->base; 443 q_base_pa = new->base_pa; 444 445 cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE); 446 cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE); 447 448 if (flags & IONIC_QCQ_F_SG) { 449 sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size, 450 PAGE_SIZE); 451 sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE); 452 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 453 } 454 455 ionic_q_map(&new->q, q_base, q_base_pa); 456 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 457 ionic_cq_bind(&new->cq, &new->q); 458 459 *qcq = new; 460 461 return 0; 462 463 err_out_free_intr: 464 ionic_intr_free(lif, new->intr.index); 465 err_out: 466 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 467 return err; 468 } 469 470 static int ionic_qcqs_alloc(struct ionic_lif *lif) 471 { 472 struct device *dev = lif->ionic->dev; 473 unsigned int q_list_size; 474 unsigned int flags; 475 int err; 476 int i; 477 478 flags = IONIC_QCQ_F_INTR; 479 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 480 IONIC_ADMINQ_LENGTH, 481 sizeof(struct ionic_admin_cmd), 482 sizeof(struct ionic_admin_comp), 483 0, lif->kern_pid, &lif->adminqcq); 484 if (err) 485 return err; 486 487 if (lif->ionic->nnqs_per_lif) { 488 flags = IONIC_QCQ_F_NOTIFYQ; 489 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 490 flags, IONIC_NOTIFYQ_LENGTH, 491 sizeof(struct ionic_notifyq_cmd), 492 sizeof(union ionic_notifyq_comp), 493 0, lif->kern_pid, &lif->notifyqcq); 494 if (err) 495 goto err_out_free_adminqcq; 496 497 /* Let the notifyq ride on the adminq interrupt */ 498 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 499 } 500 501 q_list_size = sizeof(*lif->txqcqs) * lif->nxqs; 502 err = -ENOMEM; 503 lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); 504 if (!lif->txqcqs) 505 goto err_out_free_notifyqcq; 506 for (i = 0; i < lif->nxqs; i++) { 507 lif->txqcqs[i].stats = devm_kzalloc(dev, 508 sizeof(struct ionic_q_stats), 509 GFP_KERNEL); 510 if (!lif->txqcqs[i].stats) 511 goto err_out_free_tx_stats; 512 } 513 514 lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); 515 if (!lif->rxqcqs) 516 goto err_out_free_tx_stats; 517 for (i = 0; i < lif->nxqs; i++) { 518 lif->rxqcqs[i].stats = devm_kzalloc(dev, 519 sizeof(struct ionic_q_stats), 520 GFP_KERNEL); 521 if (!lif->rxqcqs[i].stats) 522 goto err_out_free_rx_stats; 523 } 524 525 return 0; 526 527 err_out_free_rx_stats: 528 for (i = 0; i < lif->nxqs; i++) 529 if (lif->rxqcqs[i].stats) 530 devm_kfree(dev, lif->rxqcqs[i].stats); 531 devm_kfree(dev, lif->rxqcqs); 532 lif->rxqcqs = NULL; 533 err_out_free_tx_stats: 534 for (i = 0; i < lif->nxqs; i++) 535 if (lif->txqcqs[i].stats) 536 devm_kfree(dev, lif->txqcqs[i].stats); 537 devm_kfree(dev, lif->txqcqs); 538 lif->txqcqs = NULL; 539 err_out_free_notifyqcq: 540 if (lif->notifyqcq) { 541 ionic_qcq_free(lif, lif->notifyqcq); 542 lif->notifyqcq = NULL; 543 } 544 err_out_free_adminqcq: 545 ionic_qcq_free(lif, lif->adminqcq); 546 lif->adminqcq = NULL; 547 548 return err; 549 } 550 551 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 552 { 553 struct device *dev = lif->ionic->dev; 554 struct ionic_queue *q = &qcq->q; 555 struct ionic_cq *cq = &qcq->cq; 556 struct ionic_admin_ctx ctx = { 557 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 558 .cmd.q_init = { 559 .opcode = IONIC_CMD_Q_INIT, 560 .lif_index = cpu_to_le16(lif->index), 561 .type = q->type, 562 .index = cpu_to_le32(q->index), 563 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 564 IONIC_QINIT_F_SG), 565 .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index), 566 .pid = cpu_to_le16(q->pid), 567 .ring_size = ilog2(q->num_descs), 568 .ring_base = cpu_to_le64(q->base_pa), 569 .cq_ring_base = cpu_to_le64(cq->base_pa), 570 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 571 }, 572 }; 573 int err; 574 575 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 576 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 577 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 578 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 579 580 err = ionic_adminq_post_wait(lif, &ctx); 581 if (err) 582 return err; 583 584 q->hw_type = ctx.comp.q_init.hw_type; 585 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 586 q->dbval = IONIC_DBELL_QID(q->hw_index); 587 588 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 589 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 590 591 qcq->flags |= IONIC_QCQ_F_INITED; 592 593 ionic_debugfs_add_qcq(lif, qcq); 594 595 return 0; 596 } 597 598 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 599 { 600 struct device *dev = lif->ionic->dev; 601 struct ionic_queue *q = &qcq->q; 602 struct ionic_cq *cq = &qcq->cq; 603 struct ionic_admin_ctx ctx = { 604 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 605 .cmd.q_init = { 606 .opcode = IONIC_CMD_Q_INIT, 607 .lif_index = cpu_to_le16(lif->index), 608 .type = q->type, 609 .index = cpu_to_le32(q->index), 610 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ), 611 .intr_index = cpu_to_le16(cq->bound_intr->index), 612 .pid = cpu_to_le16(q->pid), 613 .ring_size = ilog2(q->num_descs), 614 .ring_base = cpu_to_le64(q->base_pa), 615 .cq_ring_base = cpu_to_le64(cq->base_pa), 616 }, 617 }; 618 int err; 619 620 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 621 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 622 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 623 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 624 625 err = ionic_adminq_post_wait(lif, &ctx); 626 if (err) 627 return err; 628 629 q->hw_type = ctx.comp.q_init.hw_type; 630 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 631 q->dbval = IONIC_DBELL_QID(q->hw_index); 632 633 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 634 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 635 636 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, 637 NAPI_POLL_WEIGHT); 638 639 err = ionic_request_irq(lif, qcq); 640 if (err) { 641 netif_napi_del(&qcq->napi); 642 return err; 643 } 644 645 qcq->flags |= IONIC_QCQ_F_INITED; 646 647 ionic_debugfs_add_qcq(lif, qcq); 648 649 return 0; 650 } 651 652 static bool ionic_notifyq_service(struct ionic_cq *cq, 653 struct ionic_cq_info *cq_info) 654 { 655 union ionic_notifyq_comp *comp = cq_info->cq_desc; 656 struct net_device *netdev; 657 struct ionic_queue *q; 658 struct ionic_lif *lif; 659 u64 eid; 660 661 q = cq->bound_q; 662 lif = q->info[0].cb_arg; 663 netdev = lif->netdev; 664 eid = le64_to_cpu(comp->event.eid); 665 666 /* Have we run out of new completions to process? */ 667 if (eid <= lif->last_eid) 668 return false; 669 670 lif->last_eid = eid; 671 672 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 673 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 674 comp, sizeof(*comp), true); 675 676 switch (le16_to_cpu(comp->event.ecode)) { 677 case IONIC_EVENT_LINK_CHANGE: 678 ionic_link_status_check_request(lif); 679 break; 680 case IONIC_EVENT_RESET: 681 netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n", 682 eid); 683 netdev_info(netdev, " reset_code=%d state=%d\n", 684 comp->reset.reset_code, 685 comp->reset.state); 686 break; 687 default: 688 netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n", 689 comp->event.ecode, eid); 690 break; 691 } 692 693 return true; 694 } 695 696 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget) 697 { 698 struct ionic_dev *idev = &lif->ionic->idev; 699 struct ionic_cq *cq = &lif->notifyqcq->cq; 700 u32 work_done; 701 702 work_done = ionic_cq_service(cq, budget, ionic_notifyq_service, 703 NULL, NULL); 704 if (work_done) 705 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 706 work_done, IONIC_INTR_CRED_RESET_COALESCE); 707 708 return work_done; 709 } 710 711 static bool ionic_adminq_service(struct ionic_cq *cq, 712 struct ionic_cq_info *cq_info) 713 { 714 struct ionic_admin_comp *comp = cq_info->cq_desc; 715 716 if (!color_match(comp->color, cq->done_color)) 717 return false; 718 719 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 720 721 return true; 722 } 723 724 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 725 { 726 struct ionic_lif *lif = napi_to_cq(napi)->lif; 727 int n_work = 0; 728 int a_work = 0; 729 730 if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)) 731 n_work = ionic_notifyq_clean(lif, budget); 732 a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL); 733 734 return max(n_work, a_work); 735 } 736 737 static void ionic_get_stats64(struct net_device *netdev, 738 struct rtnl_link_stats64 *ns) 739 { 740 struct ionic_lif *lif = netdev_priv(netdev); 741 struct ionic_lif_stats *ls; 742 743 memset(ns, 0, sizeof(*ns)); 744 ls = &lif->info->stats; 745 746 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 747 le64_to_cpu(ls->rx_mcast_packets) + 748 le64_to_cpu(ls->rx_bcast_packets); 749 750 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 751 le64_to_cpu(ls->tx_mcast_packets) + 752 le64_to_cpu(ls->tx_bcast_packets); 753 754 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 755 le64_to_cpu(ls->rx_mcast_bytes) + 756 le64_to_cpu(ls->rx_bcast_bytes); 757 758 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 759 le64_to_cpu(ls->tx_mcast_bytes) + 760 le64_to_cpu(ls->tx_bcast_bytes); 761 762 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 763 le64_to_cpu(ls->rx_mcast_drop_packets) + 764 le64_to_cpu(ls->rx_bcast_drop_packets); 765 766 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 767 le64_to_cpu(ls->tx_mcast_drop_packets) + 768 le64_to_cpu(ls->tx_bcast_drop_packets); 769 770 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 771 772 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 773 774 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 775 le64_to_cpu(ls->rx_queue_disabled) + 776 le64_to_cpu(ls->rx_desc_fetch_error) + 777 le64_to_cpu(ls->rx_desc_data_error); 778 779 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 780 le64_to_cpu(ls->tx_queue_disabled) + 781 le64_to_cpu(ls->tx_desc_fetch_error) + 782 le64_to_cpu(ls->tx_desc_data_error); 783 784 ns->rx_errors = ns->rx_over_errors + 785 ns->rx_missed_errors; 786 787 ns->tx_errors = ns->tx_aborted_errors; 788 } 789 790 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) 791 { 792 struct ionic_admin_ctx ctx = { 793 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 794 .cmd.rx_filter_add = { 795 .opcode = IONIC_CMD_RX_FILTER_ADD, 796 .lif_index = cpu_to_le16(lif->index), 797 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 798 }, 799 }; 800 struct ionic_rx_filter *f; 801 int err; 802 803 /* don't bother if we already have it */ 804 spin_lock_bh(&lif->rx_filters.lock); 805 f = ionic_rx_filter_by_addr(lif, addr); 806 spin_unlock_bh(&lif->rx_filters.lock); 807 if (f) 808 return 0; 809 810 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr, 811 ctx.comp.rx_filter_add.filter_id); 812 813 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 814 err = ionic_adminq_post_wait(lif, &ctx); 815 if (err) 816 return err; 817 818 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 819 } 820 821 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) 822 { 823 struct ionic_admin_ctx ctx = { 824 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 825 .cmd.rx_filter_del = { 826 .opcode = IONIC_CMD_RX_FILTER_DEL, 827 .lif_index = cpu_to_le16(lif->index), 828 }, 829 }; 830 struct ionic_rx_filter *f; 831 int err; 832 833 spin_lock_bh(&lif->rx_filters.lock); 834 f = ionic_rx_filter_by_addr(lif, addr); 835 if (!f) { 836 spin_unlock_bh(&lif->rx_filters.lock); 837 return -ENOENT; 838 } 839 840 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 841 ionic_rx_filter_free(lif, f); 842 spin_unlock_bh(&lif->rx_filters.lock); 843 844 err = ionic_adminq_post_wait(lif, &ctx); 845 if (err) 846 return err; 847 848 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, 849 ctx.cmd.rx_filter_del.filter_id); 850 851 return 0; 852 } 853 854 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) 855 { 856 struct ionic *ionic = lif->ionic; 857 struct ionic_deferred_work *work; 858 unsigned int nmfilters; 859 unsigned int nufilters; 860 861 if (add) { 862 /* Do we have space for this filter? We test the counters 863 * here before checking the need for deferral so that we 864 * can return an overflow error to the stack. 865 */ 866 nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters); 867 nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters); 868 869 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) 870 lif->nmcast++; 871 else if (!is_multicast_ether_addr(addr) && 872 lif->nucast < nufilters) 873 lif->nucast++; 874 else 875 return -ENOSPC; 876 } else { 877 if (is_multicast_ether_addr(addr) && lif->nmcast) 878 lif->nmcast--; 879 else if (!is_multicast_ether_addr(addr) && lif->nucast) 880 lif->nucast--; 881 } 882 883 if (in_interrupt()) { 884 work = kzalloc(sizeof(*work), GFP_ATOMIC); 885 if (!work) { 886 netdev_err(lif->netdev, "%s OOM\n", __func__); 887 return -ENOMEM; 888 } 889 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : 890 IONIC_DW_TYPE_RX_ADDR_DEL; 891 memcpy(work->addr, addr, ETH_ALEN); 892 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n", 893 add ? "add" : "del", addr); 894 ionic_lif_deferred_enqueue(&lif->deferred, work); 895 } else { 896 netdev_dbg(lif->netdev, "rx_filter %s %pM\n", 897 add ? "add" : "del", addr); 898 if (add) 899 return ionic_lif_addr_add(lif, addr); 900 else 901 return ionic_lif_addr_del(lif, addr); 902 } 903 904 return 0; 905 } 906 907 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 908 { 909 return ionic_lif_addr(netdev_priv(netdev), addr, true); 910 } 911 912 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 913 { 914 return ionic_lif_addr(netdev_priv(netdev), addr, false); 915 } 916 917 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 918 { 919 struct ionic_admin_ctx ctx = { 920 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 921 .cmd.rx_mode_set = { 922 .opcode = IONIC_CMD_RX_MODE_SET, 923 .lif_index = cpu_to_le16(lif->index), 924 .rx_mode = cpu_to_le16(rx_mode), 925 }, 926 }; 927 char buf[128]; 928 int err; 929 int i; 930 #define REMAIN(__x) (sizeof(buf) - (__x)) 931 932 i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 933 lif->rx_mode, rx_mode); 934 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 935 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 936 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 937 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 938 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 939 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 940 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 941 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 942 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 943 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 944 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); 945 946 err = ionic_adminq_post_wait(lif, &ctx); 947 if (err) 948 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n", 949 rx_mode, err); 950 else 951 lif->rx_mode = rx_mode; 952 } 953 954 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 955 { 956 struct ionic_deferred_work *work; 957 958 if (in_interrupt()) { 959 work = kzalloc(sizeof(*work), GFP_ATOMIC); 960 if (!work) { 961 netdev_err(lif->netdev, "%s OOM\n", __func__); 962 return; 963 } 964 work->type = IONIC_DW_TYPE_RX_MODE; 965 work->rx_mode = rx_mode; 966 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 967 ionic_lif_deferred_enqueue(&lif->deferred, work); 968 } else { 969 ionic_lif_rx_mode(lif, rx_mode); 970 } 971 } 972 973 static void ionic_set_rx_mode(struct net_device *netdev) 974 { 975 struct ionic_lif *lif = netdev_priv(netdev); 976 struct ionic_identity *ident; 977 unsigned int nfilters; 978 unsigned int rx_mode; 979 980 ident = &lif->ionic->ident; 981 982 rx_mode = IONIC_RX_MODE_F_UNICAST; 983 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 984 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 985 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 986 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 987 988 /* sync unicast addresses 989 * next check to see if we're in an overflow state 990 * if so, we track that we overflowed and enable NIC PROMISC 991 * else if the overflow is set and not needed 992 * we remove our overflow flag and check the netdev flags 993 * to see if we can disable NIC PROMISC 994 */ 995 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 996 nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters); 997 if (netdev_uc_count(netdev) + 1 > nfilters) { 998 rx_mode |= IONIC_RX_MODE_F_PROMISC; 999 lif->uc_overflow = true; 1000 } else if (lif->uc_overflow) { 1001 lif->uc_overflow = false; 1002 if (!(netdev->flags & IFF_PROMISC)) 1003 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1004 } 1005 1006 /* same for multicast */ 1007 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1008 nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters); 1009 if (netdev_mc_count(netdev) > nfilters) { 1010 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1011 lif->mc_overflow = true; 1012 } else if (lif->mc_overflow) { 1013 lif->mc_overflow = false; 1014 if (!(netdev->flags & IFF_ALLMULTI)) 1015 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1016 } 1017 1018 if (lif->rx_mode != rx_mode) 1019 _ionic_lif_rx_mode(lif, rx_mode); 1020 } 1021 1022 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1023 { 1024 u64 wanted = 0; 1025 1026 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1027 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1028 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1029 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1030 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1031 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1032 if (features & NETIF_F_RXHASH) 1033 wanted |= IONIC_ETH_HW_RX_HASH; 1034 if (features & NETIF_F_RXCSUM) 1035 wanted |= IONIC_ETH_HW_RX_CSUM; 1036 if (features & NETIF_F_SG) 1037 wanted |= IONIC_ETH_HW_TX_SG; 1038 if (features & NETIF_F_HW_CSUM) 1039 wanted |= IONIC_ETH_HW_TX_CSUM; 1040 if (features & NETIF_F_TSO) 1041 wanted |= IONIC_ETH_HW_TSO; 1042 if (features & NETIF_F_TSO6) 1043 wanted |= IONIC_ETH_HW_TSO_IPV6; 1044 if (features & NETIF_F_TSO_ECN) 1045 wanted |= IONIC_ETH_HW_TSO_ECN; 1046 if (features & NETIF_F_GSO_GRE) 1047 wanted |= IONIC_ETH_HW_TSO_GRE; 1048 if (features & NETIF_F_GSO_GRE_CSUM) 1049 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1050 if (features & NETIF_F_GSO_IPXIP4) 1051 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1052 if (features & NETIF_F_GSO_IPXIP6) 1053 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1054 if (features & NETIF_F_GSO_UDP_TUNNEL) 1055 wanted |= IONIC_ETH_HW_TSO_UDP; 1056 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1057 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1058 1059 return cpu_to_le64(wanted); 1060 } 1061 1062 static int ionic_set_nic_features(struct ionic_lif *lif, 1063 netdev_features_t features) 1064 { 1065 struct device *dev = lif->ionic->dev; 1066 struct ionic_admin_ctx ctx = { 1067 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1068 .cmd.lif_setattr = { 1069 .opcode = IONIC_CMD_LIF_SETATTR, 1070 .index = cpu_to_le16(lif->index), 1071 .attr = IONIC_LIF_ATTR_FEATURES, 1072 }, 1073 }; 1074 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1075 IONIC_ETH_HW_VLAN_RX_STRIP | 1076 IONIC_ETH_HW_VLAN_RX_FILTER; 1077 int err; 1078 1079 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1080 err = ionic_adminq_post_wait(lif, &ctx); 1081 if (err) 1082 return err; 1083 1084 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1085 ctx.comp.lif_setattr.features); 1086 1087 if ((vlan_flags & features) && 1088 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1089 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1090 1091 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1092 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1093 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1094 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1095 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1096 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1097 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1098 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1099 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1100 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1101 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1102 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1103 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1104 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1105 if (lif->hw_features & IONIC_ETH_HW_TSO) 1106 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1107 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1108 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1109 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1110 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1111 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1112 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1113 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1114 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1115 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1116 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1117 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1118 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1119 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1120 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1121 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1122 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1123 1124 return 0; 1125 } 1126 1127 static int ionic_init_nic_features(struct ionic_lif *lif) 1128 { 1129 struct net_device *netdev = lif->netdev; 1130 netdev_features_t features; 1131 int err; 1132 1133 /* set up what we expect to support by default */ 1134 features = NETIF_F_HW_VLAN_CTAG_TX | 1135 NETIF_F_HW_VLAN_CTAG_RX | 1136 NETIF_F_HW_VLAN_CTAG_FILTER | 1137 NETIF_F_RXHASH | 1138 NETIF_F_SG | 1139 NETIF_F_HW_CSUM | 1140 NETIF_F_RXCSUM | 1141 NETIF_F_TSO | 1142 NETIF_F_TSO6 | 1143 NETIF_F_TSO_ECN; 1144 1145 err = ionic_set_nic_features(lif, features); 1146 if (err) 1147 return err; 1148 1149 /* tell the netdev what we actually can support */ 1150 netdev->features |= NETIF_F_HIGHDMA; 1151 1152 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1153 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1154 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1155 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1156 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1157 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1158 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1159 netdev->hw_features |= NETIF_F_RXHASH; 1160 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1161 netdev->hw_features |= NETIF_F_SG; 1162 1163 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1164 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1165 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1166 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1167 if (lif->hw_features & IONIC_ETH_HW_TSO) 1168 netdev->hw_enc_features |= NETIF_F_TSO; 1169 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1170 netdev->hw_enc_features |= NETIF_F_TSO6; 1171 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1172 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1173 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1174 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1175 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1176 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1177 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1178 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1179 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1180 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1181 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1182 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1183 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1184 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1185 1186 netdev->hw_features |= netdev->hw_enc_features; 1187 netdev->features |= netdev->hw_features; 1188 1189 netdev->priv_flags |= IFF_UNICAST_FLT; 1190 1191 return 0; 1192 } 1193 1194 static int ionic_set_features(struct net_device *netdev, 1195 netdev_features_t features) 1196 { 1197 struct ionic_lif *lif = netdev_priv(netdev); 1198 int err; 1199 1200 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1201 __func__, (u64)lif->netdev->features, (u64)features); 1202 1203 err = ionic_set_nic_features(lif, features); 1204 1205 return err; 1206 } 1207 1208 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1209 { 1210 struct sockaddr *addr = sa; 1211 u8 *mac; 1212 int err; 1213 1214 mac = (u8 *)addr->sa_data; 1215 if (ether_addr_equal(netdev->dev_addr, mac)) 1216 return 0; 1217 1218 err = eth_prepare_mac_addr_change(netdev, addr); 1219 if (err) 1220 return err; 1221 1222 if (!is_zero_ether_addr(netdev->dev_addr)) { 1223 netdev_info(netdev, "deleting mac addr %pM\n", 1224 netdev->dev_addr); 1225 ionic_addr_del(netdev, netdev->dev_addr); 1226 } 1227 1228 eth_commit_mac_addr_change(netdev, addr); 1229 netdev_info(netdev, "updating mac addr %pM\n", mac); 1230 1231 return ionic_addr_add(netdev, mac); 1232 } 1233 1234 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1235 { 1236 struct ionic_lif *lif = netdev_priv(netdev); 1237 struct ionic_admin_ctx ctx = { 1238 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1239 .cmd.lif_setattr = { 1240 .opcode = IONIC_CMD_LIF_SETATTR, 1241 .index = cpu_to_le16(lif->index), 1242 .attr = IONIC_LIF_ATTR_MTU, 1243 .mtu = cpu_to_le32(new_mtu), 1244 }, 1245 }; 1246 int err; 1247 1248 err = ionic_adminq_post_wait(lif, &ctx); 1249 if (err) 1250 return err; 1251 1252 netdev->mtu = new_mtu; 1253 err = ionic_reset_queues(lif); 1254 1255 return err; 1256 } 1257 1258 static void ionic_tx_timeout_work(struct work_struct *ws) 1259 { 1260 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1261 1262 netdev_info(lif->netdev, "Tx Timeout recovery\n"); 1263 1264 rtnl_lock(); 1265 ionic_reset_queues(lif); 1266 rtnl_unlock(); 1267 } 1268 1269 static void ionic_tx_timeout(struct net_device *netdev) 1270 { 1271 struct ionic_lif *lif = netdev_priv(netdev); 1272 1273 schedule_work(&lif->tx_timeout_work); 1274 } 1275 1276 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1277 u16 vid) 1278 { 1279 struct ionic_lif *lif = netdev_priv(netdev); 1280 struct ionic_admin_ctx ctx = { 1281 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1282 .cmd.rx_filter_add = { 1283 .opcode = IONIC_CMD_RX_FILTER_ADD, 1284 .lif_index = cpu_to_le16(lif->index), 1285 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), 1286 .vlan.vlan = cpu_to_le16(vid), 1287 }, 1288 }; 1289 int err; 1290 1291 err = ionic_adminq_post_wait(lif, &ctx); 1292 if (err) 1293 return err; 1294 1295 netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid, 1296 ctx.comp.rx_filter_add.filter_id); 1297 1298 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1299 } 1300 1301 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1302 u16 vid) 1303 { 1304 struct ionic_lif *lif = netdev_priv(netdev); 1305 struct ionic_admin_ctx ctx = { 1306 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1307 .cmd.rx_filter_del = { 1308 .opcode = IONIC_CMD_RX_FILTER_DEL, 1309 .lif_index = cpu_to_le16(lif->index), 1310 }, 1311 }; 1312 struct ionic_rx_filter *f; 1313 1314 spin_lock_bh(&lif->rx_filters.lock); 1315 1316 f = ionic_rx_filter_by_vlan(lif, vid); 1317 if (!f) { 1318 spin_unlock_bh(&lif->rx_filters.lock); 1319 return -ENOENT; 1320 } 1321 1322 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid, 1323 le32_to_cpu(ctx.cmd.rx_filter_del.filter_id)); 1324 1325 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1326 ionic_rx_filter_free(lif, f); 1327 spin_unlock_bh(&lif->rx_filters.lock); 1328 1329 return ionic_adminq_post_wait(lif, &ctx); 1330 } 1331 1332 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1333 const u8 *key, const u32 *indir) 1334 { 1335 struct ionic_admin_ctx ctx = { 1336 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1337 .cmd.lif_setattr = { 1338 .opcode = IONIC_CMD_LIF_SETATTR, 1339 .attr = IONIC_LIF_ATTR_RSS, 1340 .rss.types = cpu_to_le16(types), 1341 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1342 }, 1343 }; 1344 unsigned int i, tbl_sz; 1345 1346 lif->rss_types = types; 1347 1348 if (key) 1349 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1350 1351 if (indir) { 1352 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1353 for (i = 0; i < tbl_sz; i++) 1354 lif->rss_ind_tbl[i] = indir[i]; 1355 } 1356 1357 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1358 IONIC_RSS_HASH_KEY_SIZE); 1359 1360 return ionic_adminq_post_wait(lif, &ctx); 1361 } 1362 1363 static int ionic_lif_rss_init(struct ionic_lif *lif) 1364 { 1365 u8 rss_key[IONIC_RSS_HASH_KEY_SIZE]; 1366 unsigned int tbl_sz; 1367 unsigned int i; 1368 1369 netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE); 1370 1371 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1372 IONIC_RSS_TYPE_IPV4_TCP | 1373 IONIC_RSS_TYPE_IPV4_UDP | 1374 IONIC_RSS_TYPE_IPV6 | 1375 IONIC_RSS_TYPE_IPV6_TCP | 1376 IONIC_RSS_TYPE_IPV6_UDP; 1377 1378 /* Fill indirection table with 'default' values */ 1379 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1380 for (i = 0; i < tbl_sz; i++) 1381 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1382 1383 return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL); 1384 } 1385 1386 static int ionic_lif_rss_deinit(struct ionic_lif *lif) 1387 { 1388 return ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1389 } 1390 1391 static void ionic_txrx_disable(struct ionic_lif *lif) 1392 { 1393 unsigned int i; 1394 1395 for (i = 0; i < lif->nxqs; i++) { 1396 ionic_qcq_disable(lif->txqcqs[i].qcq); 1397 ionic_qcq_disable(lif->rxqcqs[i].qcq); 1398 } 1399 } 1400 1401 static void ionic_txrx_deinit(struct ionic_lif *lif) 1402 { 1403 unsigned int i; 1404 1405 for (i = 0; i < lif->nxqs; i++) { 1406 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1407 ionic_tx_flush(&lif->txqcqs[i].qcq->cq); 1408 1409 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); 1410 ionic_rx_flush(&lif->rxqcqs[i].qcq->cq); 1411 ionic_rx_empty(&lif->rxqcqs[i].qcq->q); 1412 } 1413 } 1414 1415 static void ionic_txrx_free(struct ionic_lif *lif) 1416 { 1417 unsigned int i; 1418 1419 for (i = 0; i < lif->nxqs; i++) { 1420 ionic_qcq_free(lif, lif->txqcqs[i].qcq); 1421 lif->txqcqs[i].qcq = NULL; 1422 1423 ionic_qcq_free(lif, lif->rxqcqs[i].qcq); 1424 lif->rxqcqs[i].qcq = NULL; 1425 } 1426 } 1427 1428 static int ionic_txrx_alloc(struct ionic_lif *lif) 1429 { 1430 unsigned int flags; 1431 unsigned int i; 1432 int err = 0; 1433 u32 coal; 1434 1435 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 1436 for (i = 0; i < lif->nxqs; i++) { 1437 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 1438 lif->ntxq_descs, 1439 sizeof(struct ionic_txq_desc), 1440 sizeof(struct ionic_txq_comp), 1441 sizeof(struct ionic_txq_sg_desc), 1442 lif->kern_pid, &lif->txqcqs[i].qcq); 1443 if (err) 1444 goto err_out; 1445 1446 lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats; 1447 } 1448 1449 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_INTR; 1450 coal = ionic_coal_usec_to_hw(lif->ionic, lif->rx_coalesce_usecs); 1451 for (i = 0; i < lif->nxqs; i++) { 1452 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 1453 lif->nrxq_descs, 1454 sizeof(struct ionic_rxq_desc), 1455 sizeof(struct ionic_rxq_comp), 1456 0, lif->kern_pid, &lif->rxqcqs[i].qcq); 1457 if (err) 1458 goto err_out; 1459 1460 lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats; 1461 1462 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 1463 lif->rxqcqs[i].qcq->intr.index, coal); 1464 ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq, 1465 lif->txqcqs[i].qcq); 1466 } 1467 1468 return 0; 1469 1470 err_out: 1471 ionic_txrx_free(lif); 1472 1473 return err; 1474 } 1475 1476 static int ionic_txrx_init(struct ionic_lif *lif) 1477 { 1478 unsigned int i; 1479 int err; 1480 1481 for (i = 0; i < lif->nxqs; i++) { 1482 err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq); 1483 if (err) 1484 goto err_out; 1485 1486 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq); 1487 if (err) { 1488 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1489 goto err_out; 1490 } 1491 } 1492 1493 if (lif->netdev->features & NETIF_F_RXHASH) 1494 ionic_lif_rss_init(lif); 1495 1496 ionic_set_rx_mode(lif->netdev); 1497 1498 return 0; 1499 1500 err_out: 1501 while (i--) { 1502 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1503 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); 1504 } 1505 1506 return err; 1507 } 1508 1509 static int ionic_txrx_enable(struct ionic_lif *lif) 1510 { 1511 int i, err; 1512 1513 for (i = 0; i < lif->nxqs; i++) { 1514 err = ionic_qcq_enable(lif->txqcqs[i].qcq); 1515 if (err) 1516 goto err_out; 1517 1518 ionic_rx_fill(&lif->rxqcqs[i].qcq->q); 1519 err = ionic_qcq_enable(lif->rxqcqs[i].qcq); 1520 if (err) { 1521 ionic_qcq_disable(lif->txqcqs[i].qcq); 1522 goto err_out; 1523 } 1524 } 1525 1526 return 0; 1527 1528 err_out: 1529 while (i--) { 1530 ionic_qcq_disable(lif->rxqcqs[i].qcq); 1531 ionic_qcq_disable(lif->txqcqs[i].qcq); 1532 } 1533 1534 return err; 1535 } 1536 1537 int ionic_open(struct net_device *netdev) 1538 { 1539 struct ionic_lif *lif = netdev_priv(netdev); 1540 int err; 1541 1542 netif_carrier_off(netdev); 1543 1544 err = ionic_txrx_alloc(lif); 1545 if (err) 1546 return err; 1547 1548 err = ionic_txrx_init(lif); 1549 if (err) 1550 goto err_txrx_free; 1551 1552 err = ionic_txrx_enable(lif); 1553 if (err) 1554 goto err_txrx_deinit; 1555 1556 netif_set_real_num_tx_queues(netdev, lif->nxqs); 1557 netif_set_real_num_rx_queues(netdev, lif->nxqs); 1558 1559 set_bit(IONIC_LIF_UP, lif->state); 1560 1561 ionic_link_status_check_request(lif); 1562 if (netif_carrier_ok(netdev)) 1563 netif_tx_wake_all_queues(netdev); 1564 1565 return 0; 1566 1567 err_txrx_deinit: 1568 ionic_txrx_deinit(lif); 1569 err_txrx_free: 1570 ionic_txrx_free(lif); 1571 return err; 1572 } 1573 1574 int ionic_stop(struct net_device *netdev) 1575 { 1576 struct ionic_lif *lif = netdev_priv(netdev); 1577 int err = 0; 1578 1579 if (!test_bit(IONIC_LIF_UP, lif->state)) { 1580 dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n", 1581 __func__, lif->name); 1582 return 0; 1583 } 1584 dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name); 1585 clear_bit(IONIC_LIF_UP, lif->state); 1586 1587 /* carrier off before disabling queues to avoid watchdog timeout */ 1588 netif_carrier_off(netdev); 1589 netif_tx_stop_all_queues(netdev); 1590 netif_tx_disable(netdev); 1591 1592 ionic_txrx_disable(lif); 1593 ionic_txrx_deinit(lif); 1594 ionic_txrx_free(lif); 1595 1596 return err; 1597 } 1598 1599 static const struct net_device_ops ionic_netdev_ops = { 1600 .ndo_open = ionic_open, 1601 .ndo_stop = ionic_stop, 1602 .ndo_start_xmit = ionic_start_xmit, 1603 .ndo_get_stats64 = ionic_get_stats64, 1604 .ndo_set_rx_mode = ionic_set_rx_mode, 1605 .ndo_set_features = ionic_set_features, 1606 .ndo_set_mac_address = ionic_set_mac_address, 1607 .ndo_validate_addr = eth_validate_addr, 1608 .ndo_tx_timeout = ionic_tx_timeout, 1609 .ndo_change_mtu = ionic_change_mtu, 1610 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 1611 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 1612 }; 1613 1614 int ionic_reset_queues(struct ionic_lif *lif) 1615 { 1616 bool running; 1617 int err = 0; 1618 1619 /* Put off the next watchdog timeout */ 1620 netif_trans_update(lif->netdev); 1621 1622 if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET)) 1623 return -EBUSY; 1624 1625 running = netif_running(lif->netdev); 1626 if (running) 1627 err = ionic_stop(lif->netdev); 1628 if (!err && running) 1629 ionic_open(lif->netdev); 1630 1631 clear_bit(IONIC_LIF_QUEUE_RESET, lif->state); 1632 1633 return err; 1634 } 1635 1636 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index) 1637 { 1638 struct device *dev = ionic->dev; 1639 struct net_device *netdev; 1640 struct ionic_lif *lif; 1641 int tbl_sz; 1642 u32 coal; 1643 int err; 1644 1645 netdev = alloc_etherdev_mqs(sizeof(*lif), 1646 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 1647 if (!netdev) { 1648 dev_err(dev, "Cannot allocate netdev, aborting\n"); 1649 return ERR_PTR(-ENOMEM); 1650 } 1651 1652 SET_NETDEV_DEV(netdev, dev); 1653 1654 lif = netdev_priv(netdev); 1655 lif->netdev = netdev; 1656 ionic->master_lif = lif; 1657 netdev->netdev_ops = &ionic_netdev_ops; 1658 ionic_ethtool_set_ops(netdev); 1659 1660 netdev->watchdog_timeo = 2 * HZ; 1661 netdev->min_mtu = IONIC_MIN_MTU; 1662 netdev->max_mtu = IONIC_MAX_MTU; 1663 1664 lif->neqs = ionic->neqs_per_lif; 1665 lif->nxqs = ionic->ntxqs_per_lif; 1666 1667 lif->ionic = ionic; 1668 lif->index = index; 1669 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 1670 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 1671 1672 /* Convert the default coalesce value to actual hw resolution */ 1673 coal = ionic_coal_usec_to_hw(lif->ionic, IONIC_ITR_COAL_USEC_DEFAULT); 1674 lif->rx_coalesce_usecs = ionic_coal_hw_to_usec(lif->ionic, coal); 1675 1676 snprintf(lif->name, sizeof(lif->name), "lif%u", index); 1677 1678 spin_lock_init(&lif->adminq_lock); 1679 1680 spin_lock_init(&lif->deferred.lock); 1681 INIT_LIST_HEAD(&lif->deferred.list); 1682 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 1683 1684 /* allocate lif info */ 1685 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 1686 lif->info = dma_alloc_coherent(dev, lif->info_sz, 1687 &lif->info_pa, GFP_KERNEL); 1688 if (!lif->info) { 1689 dev_err(dev, "Failed to allocate lif info, aborting\n"); 1690 err = -ENOMEM; 1691 goto err_out_free_netdev; 1692 } 1693 1694 /* allocate queues */ 1695 err = ionic_qcqs_alloc(lif); 1696 if (err) 1697 goto err_out_free_lif_info; 1698 1699 /* allocate rss indirection table */ 1700 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1701 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 1702 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 1703 &lif->rss_ind_tbl_pa, 1704 GFP_KERNEL); 1705 1706 if (!lif->rss_ind_tbl) { 1707 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 1708 goto err_out_free_qcqs; 1709 } 1710 1711 list_add_tail(&lif->list, &ionic->lifs); 1712 1713 return lif; 1714 1715 err_out_free_qcqs: 1716 ionic_qcqs_free(lif); 1717 err_out_free_lif_info: 1718 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 1719 lif->info = NULL; 1720 lif->info_pa = 0; 1721 err_out_free_netdev: 1722 free_netdev(lif->netdev); 1723 lif = NULL; 1724 1725 return ERR_PTR(err); 1726 } 1727 1728 int ionic_lifs_alloc(struct ionic *ionic) 1729 { 1730 struct ionic_lif *lif; 1731 1732 INIT_LIST_HEAD(&ionic->lifs); 1733 1734 /* only build the first lif, others are for later features */ 1735 set_bit(0, ionic->lifbits); 1736 lif = ionic_lif_alloc(ionic, 0); 1737 1738 return PTR_ERR_OR_ZERO(lif); 1739 } 1740 1741 static void ionic_lif_reset(struct ionic_lif *lif) 1742 { 1743 struct ionic_dev *idev = &lif->ionic->idev; 1744 1745 mutex_lock(&lif->ionic->dev_cmd_lock); 1746 ionic_dev_cmd_lif_reset(idev, lif->index); 1747 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 1748 mutex_unlock(&lif->ionic->dev_cmd_lock); 1749 } 1750 1751 static void ionic_lif_free(struct ionic_lif *lif) 1752 { 1753 struct device *dev = lif->ionic->dev; 1754 1755 /* free rss indirection table */ 1756 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 1757 lif->rss_ind_tbl_pa); 1758 lif->rss_ind_tbl = NULL; 1759 lif->rss_ind_tbl_pa = 0; 1760 1761 /* free queues */ 1762 ionic_qcqs_free(lif); 1763 ionic_lif_reset(lif); 1764 1765 /* free lif info */ 1766 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 1767 lif->info = NULL; 1768 lif->info_pa = 0; 1769 1770 /* unmap doorbell page */ 1771 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 1772 lif->kern_dbpage = NULL; 1773 kfree(lif->dbid_inuse); 1774 lif->dbid_inuse = NULL; 1775 1776 /* free netdev & lif */ 1777 ionic_debugfs_del_lif(lif); 1778 list_del(&lif->list); 1779 free_netdev(lif->netdev); 1780 } 1781 1782 void ionic_lifs_free(struct ionic *ionic) 1783 { 1784 struct list_head *cur, *tmp; 1785 struct ionic_lif *lif; 1786 1787 list_for_each_safe(cur, tmp, &ionic->lifs) { 1788 lif = list_entry(cur, struct ionic_lif, list); 1789 1790 ionic_lif_free(lif); 1791 } 1792 } 1793 1794 static void ionic_lif_deinit(struct ionic_lif *lif) 1795 { 1796 if (!test_bit(IONIC_LIF_INITED, lif->state)) 1797 return; 1798 1799 clear_bit(IONIC_LIF_INITED, lif->state); 1800 1801 ionic_rx_filters_deinit(lif); 1802 ionic_lif_rss_deinit(lif); 1803 1804 napi_disable(&lif->adminqcq->napi); 1805 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 1806 ionic_lif_qcq_deinit(lif, lif->adminqcq); 1807 1808 ionic_lif_reset(lif); 1809 } 1810 1811 void ionic_lifs_deinit(struct ionic *ionic) 1812 { 1813 struct list_head *cur, *tmp; 1814 struct ionic_lif *lif; 1815 1816 list_for_each_safe(cur, tmp, &ionic->lifs) { 1817 lif = list_entry(cur, struct ionic_lif, list); 1818 ionic_lif_deinit(lif); 1819 } 1820 } 1821 1822 static int ionic_lif_adminq_init(struct ionic_lif *lif) 1823 { 1824 struct device *dev = lif->ionic->dev; 1825 struct ionic_q_init_comp comp; 1826 struct ionic_dev *idev; 1827 struct ionic_qcq *qcq; 1828 struct ionic_queue *q; 1829 int err; 1830 1831 idev = &lif->ionic->idev; 1832 qcq = lif->adminqcq; 1833 q = &qcq->q; 1834 1835 mutex_lock(&lif->ionic->dev_cmd_lock); 1836 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 1837 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 1838 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 1839 mutex_unlock(&lif->ionic->dev_cmd_lock); 1840 if (err) { 1841 netdev_err(lif->netdev, "adminq init failed %d\n", err); 1842 return err; 1843 } 1844 1845 q->hw_type = comp.hw_type; 1846 q->hw_index = le32_to_cpu(comp.hw_index); 1847 q->dbval = IONIC_DBELL_QID(q->hw_index); 1848 1849 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 1850 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 1851 1852 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, 1853 NAPI_POLL_WEIGHT); 1854 1855 err = ionic_request_irq(lif, qcq); 1856 if (err) { 1857 netdev_warn(lif->netdev, "adminq irq request failed %d\n", err); 1858 netif_napi_del(&qcq->napi); 1859 return err; 1860 } 1861 1862 napi_enable(&qcq->napi); 1863 1864 if (qcq->flags & IONIC_QCQ_F_INTR) 1865 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 1866 IONIC_INTR_MASK_CLEAR); 1867 1868 qcq->flags |= IONIC_QCQ_F_INITED; 1869 1870 ionic_debugfs_add_qcq(lif, qcq); 1871 1872 return 0; 1873 } 1874 1875 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 1876 { 1877 struct ionic_qcq *qcq = lif->notifyqcq; 1878 struct device *dev = lif->ionic->dev; 1879 struct ionic_queue *q = &qcq->q; 1880 int err; 1881 1882 struct ionic_admin_ctx ctx = { 1883 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1884 .cmd.q_init = { 1885 .opcode = IONIC_CMD_Q_INIT, 1886 .lif_index = cpu_to_le16(lif->index), 1887 .type = q->type, 1888 .index = cpu_to_le32(q->index), 1889 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 1890 IONIC_QINIT_F_ENA), 1891 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 1892 .pid = cpu_to_le16(q->pid), 1893 .ring_size = ilog2(q->num_descs), 1894 .ring_base = cpu_to_le64(q->base_pa), 1895 } 1896 }; 1897 1898 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 1899 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 1900 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 1901 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 1902 1903 err = ionic_adminq_post_wait(lif, &ctx); 1904 if (err) 1905 return err; 1906 1907 q->hw_type = ctx.comp.q_init.hw_type; 1908 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 1909 q->dbval = IONIC_DBELL_QID(q->hw_index); 1910 1911 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 1912 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 1913 1914 /* preset the callback info */ 1915 q->info[0].cb_arg = lif; 1916 1917 qcq->flags |= IONIC_QCQ_F_INITED; 1918 1919 ionic_debugfs_add_qcq(lif, qcq); 1920 1921 return 0; 1922 } 1923 1924 static int ionic_station_set(struct ionic_lif *lif) 1925 { 1926 struct net_device *netdev = lif->netdev; 1927 struct ionic_admin_ctx ctx = { 1928 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1929 .cmd.lif_getattr = { 1930 .opcode = IONIC_CMD_LIF_GETATTR, 1931 .index = cpu_to_le16(lif->index), 1932 .attr = IONIC_LIF_ATTR_MAC, 1933 }, 1934 }; 1935 struct sockaddr addr; 1936 int err; 1937 1938 err = ionic_adminq_post_wait(lif, &ctx); 1939 if (err) 1940 return err; 1941 1942 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); 1943 addr.sa_family = AF_INET; 1944 err = eth_prepare_mac_addr_change(netdev, &addr); 1945 if (err) 1946 return err; 1947 1948 if (!is_zero_ether_addr(netdev->dev_addr)) { 1949 netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n", 1950 netdev->dev_addr); 1951 ionic_lif_addr(lif, netdev->dev_addr, false); 1952 } 1953 1954 eth_commit_mac_addr_change(netdev, &addr); 1955 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 1956 netdev->dev_addr); 1957 ionic_lif_addr(lif, netdev->dev_addr, true); 1958 1959 return 0; 1960 } 1961 1962 static int ionic_lif_init(struct ionic_lif *lif) 1963 { 1964 struct ionic_dev *idev = &lif->ionic->idev; 1965 struct device *dev = lif->ionic->dev; 1966 struct ionic_lif_init_comp comp; 1967 int dbpage_num; 1968 int err; 1969 1970 ionic_debugfs_add_lif(lif); 1971 1972 mutex_lock(&lif->ionic->dev_cmd_lock); 1973 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 1974 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 1975 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 1976 mutex_unlock(&lif->ionic->dev_cmd_lock); 1977 if (err) 1978 return err; 1979 1980 lif->hw_index = le16_to_cpu(comp.hw_index); 1981 1982 /* now that we have the hw_index we can figure out our doorbell page */ 1983 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 1984 if (!lif->dbid_count) { 1985 dev_err(dev, "No doorbell pages, aborting\n"); 1986 return -EINVAL; 1987 } 1988 1989 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); 1990 if (!lif->dbid_inuse) { 1991 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); 1992 return -ENOMEM; 1993 } 1994 1995 /* first doorbell id reserved for kernel (dbid aka pid == zero) */ 1996 set_bit(0, lif->dbid_inuse); 1997 lif->kern_pid = 0; 1998 1999 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 2000 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 2001 if (!lif->kern_dbpage) { 2002 dev_err(dev, "Cannot map dbpage, aborting\n"); 2003 err = -ENOMEM; 2004 goto err_out_free_dbid; 2005 } 2006 2007 err = ionic_lif_adminq_init(lif); 2008 if (err) 2009 goto err_out_adminq_deinit; 2010 2011 if (lif->ionic->nnqs_per_lif) { 2012 err = ionic_lif_notifyq_init(lif); 2013 if (err) 2014 goto err_out_notifyq_deinit; 2015 } 2016 2017 err = ionic_init_nic_features(lif); 2018 if (err) 2019 goto err_out_notifyq_deinit; 2020 2021 err = ionic_rx_filters_init(lif); 2022 if (err) 2023 goto err_out_notifyq_deinit; 2024 2025 err = ionic_station_set(lif); 2026 if (err) 2027 goto err_out_notifyq_deinit; 2028 2029 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 2030 2031 set_bit(IONIC_LIF_INITED, lif->state); 2032 2033 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 2034 2035 return 0; 2036 2037 err_out_notifyq_deinit: 2038 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2039 err_out_adminq_deinit: 2040 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2041 ionic_lif_reset(lif); 2042 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2043 lif->kern_dbpage = NULL; 2044 err_out_free_dbid: 2045 kfree(lif->dbid_inuse); 2046 lif->dbid_inuse = NULL; 2047 2048 return err; 2049 } 2050 2051 int ionic_lifs_init(struct ionic *ionic) 2052 { 2053 struct list_head *cur, *tmp; 2054 struct ionic_lif *lif; 2055 int err; 2056 2057 list_for_each_safe(cur, tmp, &ionic->lifs) { 2058 lif = list_entry(cur, struct ionic_lif, list); 2059 err = ionic_lif_init(lif); 2060 if (err) 2061 return err; 2062 } 2063 2064 return 0; 2065 } 2066 2067 static void ionic_lif_notify_work(struct work_struct *ws) 2068 { 2069 } 2070 2071 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 2072 { 2073 struct ionic_admin_ctx ctx = { 2074 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2075 .cmd.lif_setattr = { 2076 .opcode = IONIC_CMD_LIF_SETATTR, 2077 .index = cpu_to_le16(lif->index), 2078 .attr = IONIC_LIF_ATTR_NAME, 2079 }, 2080 }; 2081 2082 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 2083 sizeof(ctx.cmd.lif_setattr.name)); 2084 2085 ionic_adminq_post_wait(lif, &ctx); 2086 } 2087 2088 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 2089 { 2090 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 2091 return NULL; 2092 2093 return netdev_priv(netdev); 2094 } 2095 2096 static int ionic_lif_notify(struct notifier_block *nb, 2097 unsigned long event, void *info) 2098 { 2099 struct net_device *ndev = netdev_notifier_info_to_dev(info); 2100 struct ionic *ionic = container_of(nb, struct ionic, nb); 2101 struct ionic_lif *lif = ionic_netdev_lif(ndev); 2102 2103 if (!lif || lif->ionic != ionic) 2104 return NOTIFY_DONE; 2105 2106 switch (event) { 2107 case NETDEV_CHANGENAME: 2108 ionic_lif_set_netdev_info(lif); 2109 break; 2110 } 2111 2112 return NOTIFY_DONE; 2113 } 2114 2115 int ionic_lifs_register(struct ionic *ionic) 2116 { 2117 int err; 2118 2119 INIT_WORK(&ionic->nb_work, ionic_lif_notify_work); 2120 2121 ionic->nb.notifier_call = ionic_lif_notify; 2122 2123 err = register_netdevice_notifier(&ionic->nb); 2124 if (err) 2125 ionic->nb.notifier_call = NULL; 2126 2127 /* only register LIF0 for now */ 2128 err = register_netdev(ionic->master_lif->netdev); 2129 if (err) { 2130 dev_err(ionic->dev, "Cannot register net device, aborting\n"); 2131 return err; 2132 } 2133 2134 ionic_link_status_check_request(ionic->master_lif); 2135 ionic->master_lif->registered = true; 2136 2137 return 0; 2138 } 2139 2140 void ionic_lifs_unregister(struct ionic *ionic) 2141 { 2142 if (ionic->nb.notifier_call) { 2143 unregister_netdevice_notifier(&ionic->nb); 2144 cancel_work_sync(&ionic->nb_work); 2145 ionic->nb.notifier_call = NULL; 2146 } 2147 2148 /* There is only one lif ever registered in the 2149 * current model, so don't bother searching the 2150 * ionic->lif for candidates to unregister 2151 */ 2152 cancel_work_sync(&ionic->master_lif->deferred.work); 2153 cancel_work_sync(&ionic->master_lif->tx_timeout_work); 2154 if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED) 2155 unregister_netdev(ionic->master_lif->netdev); 2156 } 2157 2158 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 2159 union ionic_lif_identity *lid) 2160 { 2161 struct ionic_dev *idev = &ionic->idev; 2162 size_t sz; 2163 int err; 2164 2165 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 2166 2167 mutex_lock(&ionic->dev_cmd_lock); 2168 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 2169 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 2170 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 2171 mutex_unlock(&ionic->dev_cmd_lock); 2172 if (err) 2173 return (err); 2174 2175 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 2176 le64_to_cpu(lid->capabilities)); 2177 2178 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 2179 le32_to_cpu(lid->eth.max_ucast_filters)); 2180 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 2181 le32_to_cpu(lid->eth.max_mcast_filters)); 2182 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 2183 le64_to_cpu(lid->eth.config.features)); 2184 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 2185 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 2186 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 2187 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 2188 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 2189 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 2190 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 2191 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 2192 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 2193 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 2194 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 2195 le32_to_cpu(lid->eth.config.mtu)); 2196 2197 return 0; 2198 } 2199 2200 int ionic_lifs_size(struct ionic *ionic) 2201 { 2202 struct ionic_identity *ident = &ionic->ident; 2203 unsigned int nintrs, dev_nintrs; 2204 union ionic_lif_config *lc; 2205 unsigned int ntxqs_per_lif; 2206 unsigned int nrxqs_per_lif; 2207 unsigned int neqs_per_lif; 2208 unsigned int nnqs_per_lif; 2209 unsigned int nxqs, neqs; 2210 unsigned int min_intrs; 2211 int err; 2212 2213 lc = &ident->lif.eth.config; 2214 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 2215 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 2216 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 2217 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 2218 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 2219 2220 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 2221 nxqs = min(nxqs, num_online_cpus()); 2222 neqs = min(neqs_per_lif, num_online_cpus()); 2223 2224 try_again: 2225 /* interrupt usage: 2226 * 1 for master lif adminq/notifyq 2227 * 1 for each CPU for master lif TxRx queue pairs 2228 * whatever's left is for RDMA queues 2229 */ 2230 nintrs = 1 + nxqs + neqs; 2231 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 2232 2233 if (nintrs > dev_nintrs) 2234 goto try_fewer; 2235 2236 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 2237 if (err < 0 && err != -ENOSPC) { 2238 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 2239 return err; 2240 } 2241 if (err == -ENOSPC) 2242 goto try_fewer; 2243 2244 if (err != nintrs) { 2245 ionic_bus_free_irq_vectors(ionic); 2246 goto try_fewer; 2247 } 2248 2249 ionic->nnqs_per_lif = nnqs_per_lif; 2250 ionic->neqs_per_lif = neqs; 2251 ionic->ntxqs_per_lif = nxqs; 2252 ionic->nrxqs_per_lif = nxqs; 2253 ionic->nintrs = nintrs; 2254 2255 ionic_debugfs_add_sizes(ionic); 2256 2257 return 0; 2258 2259 try_fewer: 2260 if (nnqs_per_lif > 1) { 2261 nnqs_per_lif >>= 1; 2262 goto try_again; 2263 } 2264 if (neqs > 1) { 2265 neqs >>= 1; 2266 goto try_again; 2267 } 2268 if (nxqs > 1) { 2269 nxqs >>= 1; 2270 goto try_again; 2271 } 2272 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 2273 return -ENOSPC; 2274 } 2275