1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/printk.h> 5 #include <linux/dynamic_debug.h> 6 #include <linux/netdevice.h> 7 #include <linux/etherdevice.h> 8 #include <linux/if_vlan.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/interrupt.h> 11 #include <linux/pci.h> 12 #include <linux/cpumask.h> 13 14 #include "ionic.h" 15 #include "ionic_bus.h" 16 #include "ionic_lif.h" 17 #include "ionic_txrx.h" 18 #include "ionic_ethtool.h" 19 #include "ionic_debugfs.h" 20 21 /* queuetype support level */ 22 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 23 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 24 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 25 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */ 26 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support 27 * 1 = ... with Tx SG version 1 28 */ 29 }; 30 31 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode); 32 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); 33 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); 34 static void ionic_link_status_check(struct ionic_lif *lif); 35 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 36 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 37 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 38 39 static int ionic_start_queues(struct ionic_lif *lif); 40 static void ionic_stop_queues(struct ionic_lif *lif); 41 static void ionic_lif_queue_identify(struct ionic_lif *lif); 42 43 static void ionic_lif_deferred_work(struct work_struct *work) 44 { 45 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 46 struct ionic_deferred *def = &lif->deferred; 47 struct ionic_deferred_work *w = NULL; 48 49 spin_lock_bh(&def->lock); 50 if (!list_empty(&def->list)) { 51 w = list_first_entry(&def->list, 52 struct ionic_deferred_work, list); 53 list_del(&w->list); 54 } 55 spin_unlock_bh(&def->lock); 56 57 if (w) { 58 switch (w->type) { 59 case IONIC_DW_TYPE_RX_MODE: 60 ionic_lif_rx_mode(lif, w->rx_mode); 61 break; 62 case IONIC_DW_TYPE_RX_ADDR_ADD: 63 ionic_lif_addr_add(lif, w->addr); 64 break; 65 case IONIC_DW_TYPE_RX_ADDR_DEL: 66 ionic_lif_addr_del(lif, w->addr); 67 break; 68 case IONIC_DW_TYPE_LINK_STATUS: 69 ionic_link_status_check(lif); 70 break; 71 case IONIC_DW_TYPE_LIF_RESET: 72 if (w->fw_status) 73 ionic_lif_handle_fw_up(lif); 74 else 75 ionic_lif_handle_fw_down(lif); 76 break; 77 default: 78 break; 79 } 80 kfree(w); 81 schedule_work(&def->work); 82 } 83 } 84 85 void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 86 struct ionic_deferred_work *work) 87 { 88 spin_lock_bh(&def->lock); 89 list_add_tail(&work->list, &def->list); 90 spin_unlock_bh(&def->lock); 91 schedule_work(&def->work); 92 } 93 94 static void ionic_link_status_check(struct ionic_lif *lif) 95 { 96 struct net_device *netdev = lif->netdev; 97 u16 link_status; 98 bool link_up; 99 100 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 101 return; 102 103 link_status = le16_to_cpu(lif->info->status.link_status); 104 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 105 106 if (link_up) { 107 if (!netif_carrier_ok(netdev)) { 108 u32 link_speed; 109 110 ionic_port_identify(lif->ionic); 111 link_speed = le32_to_cpu(lif->info->status.link_speed); 112 netdev_info(netdev, "Link up - %d Gbps\n", 113 link_speed / 1000); 114 netif_carrier_on(netdev); 115 } 116 117 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { 118 mutex_lock(&lif->queue_lock); 119 ionic_start_queues(lif); 120 mutex_unlock(&lif->queue_lock); 121 } 122 } else { 123 if (netif_carrier_ok(netdev)) { 124 netdev_info(netdev, "Link down\n"); 125 netif_carrier_off(netdev); 126 } 127 128 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { 129 mutex_lock(&lif->queue_lock); 130 ionic_stop_queues(lif); 131 mutex_unlock(&lif->queue_lock); 132 } 133 } 134 135 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 136 } 137 138 void ionic_link_status_check_request(struct ionic_lif *lif) 139 { 140 struct ionic_deferred_work *work; 141 142 /* we only need one request outstanding at a time */ 143 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 144 return; 145 146 if (in_interrupt()) { 147 work = kzalloc(sizeof(*work), GFP_ATOMIC); 148 if (!work) 149 return; 150 151 work->type = IONIC_DW_TYPE_LINK_STATUS; 152 ionic_lif_deferred_enqueue(&lif->deferred, work); 153 } else { 154 ionic_link_status_check(lif); 155 } 156 } 157 158 static irqreturn_t ionic_isr(int irq, void *data) 159 { 160 struct napi_struct *napi = data; 161 162 napi_schedule_irqoff(napi); 163 164 return IRQ_HANDLED; 165 } 166 167 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 168 { 169 struct ionic_intr_info *intr = &qcq->intr; 170 struct device *dev = lif->ionic->dev; 171 struct ionic_queue *q = &qcq->q; 172 const char *name; 173 174 if (lif->registered) 175 name = lif->netdev->name; 176 else 177 name = dev_name(dev); 178 179 snprintf(intr->name, sizeof(intr->name), 180 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 181 182 return devm_request_irq(dev, intr->vector, ionic_isr, 183 0, intr->name, &qcq->napi); 184 } 185 186 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 187 { 188 struct ionic *ionic = lif->ionic; 189 int index; 190 191 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 192 if (index == ionic->nintrs) { 193 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 194 __func__, index, ionic->nintrs); 195 return -ENOSPC; 196 } 197 198 set_bit(index, ionic->intrs); 199 ionic_intr_init(&ionic->idev, intr, index); 200 201 return 0; 202 } 203 204 static void ionic_intr_free(struct ionic *ionic, int index) 205 { 206 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) 207 clear_bit(index, ionic->intrs); 208 } 209 210 static int ionic_qcq_enable(struct ionic_qcq *qcq) 211 { 212 struct ionic_queue *q = &qcq->q; 213 struct ionic_lif *lif = q->lif; 214 struct ionic_dev *idev; 215 struct device *dev; 216 217 struct ionic_admin_ctx ctx = { 218 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 219 .cmd.q_control = { 220 .opcode = IONIC_CMD_Q_CONTROL, 221 .lif_index = cpu_to_le16(lif->index), 222 .type = q->type, 223 .index = cpu_to_le32(q->index), 224 .oper = IONIC_Q_ENABLE, 225 }, 226 }; 227 228 idev = &lif->ionic->idev; 229 dev = lif->ionic->dev; 230 231 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 232 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 233 234 if (qcq->flags & IONIC_QCQ_F_INTR) { 235 irq_set_affinity_hint(qcq->intr.vector, 236 &qcq->intr.affinity_mask); 237 napi_enable(&qcq->napi); 238 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 239 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 240 IONIC_INTR_MASK_CLEAR); 241 } 242 243 return ionic_adminq_post_wait(lif, &ctx); 244 } 245 246 static int ionic_qcq_disable(struct ionic_qcq *qcq) 247 { 248 struct ionic_queue *q = &qcq->q; 249 struct ionic_lif *lif = q->lif; 250 struct ionic_dev *idev; 251 struct device *dev; 252 253 struct ionic_admin_ctx ctx = { 254 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 255 .cmd.q_control = { 256 .opcode = IONIC_CMD_Q_CONTROL, 257 .lif_index = cpu_to_le16(lif->index), 258 .type = q->type, 259 .index = cpu_to_le32(q->index), 260 .oper = IONIC_Q_DISABLE, 261 }, 262 }; 263 264 idev = &lif->ionic->idev; 265 dev = lif->ionic->dev; 266 267 dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n", 268 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 269 270 if (qcq->flags & IONIC_QCQ_F_INTR) { 271 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 272 IONIC_INTR_MASK_SET); 273 synchronize_irq(qcq->intr.vector); 274 irq_set_affinity_hint(qcq->intr.vector, NULL); 275 napi_disable(&qcq->napi); 276 } 277 278 return ionic_adminq_post_wait(lif, &ctx); 279 } 280 281 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 282 { 283 struct ionic_dev *idev = &lif->ionic->idev; 284 285 if (!qcq) 286 return; 287 288 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 289 return; 290 291 if (qcq->flags & IONIC_QCQ_F_INTR) { 292 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 293 IONIC_INTR_MASK_SET); 294 netif_napi_del(&qcq->napi); 295 } 296 297 qcq->flags &= ~IONIC_QCQ_F_INITED; 298 } 299 300 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 301 { 302 struct device *dev = lif->ionic->dev; 303 304 if (!qcq) 305 return; 306 307 ionic_debugfs_del_qcq(qcq); 308 309 dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa); 310 qcq->base = NULL; 311 qcq->base_pa = 0; 312 313 if (qcq->flags & IONIC_QCQ_F_INTR) { 314 irq_set_affinity_hint(qcq->intr.vector, NULL); 315 devm_free_irq(dev, qcq->intr.vector, &qcq->napi); 316 qcq->intr.vector = 0; 317 ionic_intr_free(lif->ionic, qcq->intr.index); 318 } 319 320 devm_kfree(dev, qcq->cq.info); 321 qcq->cq.info = NULL; 322 devm_kfree(dev, qcq->q.info); 323 qcq->q.info = NULL; 324 devm_kfree(dev, qcq); 325 } 326 327 static void ionic_qcqs_free(struct ionic_lif *lif) 328 { 329 struct device *dev = lif->ionic->dev; 330 unsigned int i; 331 332 if (lif->notifyqcq) { 333 ionic_qcq_free(lif, lif->notifyqcq); 334 lif->notifyqcq = NULL; 335 } 336 337 if (lif->adminqcq) { 338 ionic_qcq_free(lif, lif->adminqcq); 339 lif->adminqcq = NULL; 340 } 341 342 if (lif->rxqcqs) { 343 for (i = 0; i < lif->nxqs; i++) 344 if (lif->rxqcqs[i].stats) 345 devm_kfree(dev, lif->rxqcqs[i].stats); 346 devm_kfree(dev, lif->rxqcqs); 347 lif->rxqcqs = NULL; 348 } 349 350 if (lif->txqcqs) { 351 for (i = 0; i < lif->nxqs; i++) 352 if (lif->txqcqs[i].stats) 353 devm_kfree(dev, lif->txqcqs[i].stats); 354 devm_kfree(dev, lif->txqcqs); 355 lif->txqcqs = NULL; 356 } 357 } 358 359 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 360 struct ionic_qcq *n_qcq) 361 { 362 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { 363 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index); 364 n_qcq->flags &= ~IONIC_QCQ_F_INTR; 365 } 366 367 n_qcq->intr.vector = src_qcq->intr.vector; 368 n_qcq->intr.index = src_qcq->intr.index; 369 } 370 371 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 372 unsigned int index, 373 const char *name, unsigned int flags, 374 unsigned int num_descs, unsigned int desc_size, 375 unsigned int cq_desc_size, 376 unsigned int sg_desc_size, 377 unsigned int pid, struct ionic_qcq **qcq) 378 { 379 struct ionic_dev *idev = &lif->ionic->idev; 380 u32 q_size, cq_size, sg_size, total_size; 381 struct device *dev = lif->ionic->dev; 382 void *q_base, *cq_base, *sg_base; 383 dma_addr_t cq_base_pa = 0; 384 dma_addr_t sg_base_pa = 0; 385 dma_addr_t q_base_pa = 0; 386 struct ionic_qcq *new; 387 int err; 388 389 *qcq = NULL; 390 391 q_size = num_descs * desc_size; 392 cq_size = num_descs * cq_desc_size; 393 sg_size = num_descs * sg_desc_size; 394 395 total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE); 396 /* Note: aligning q_size/cq_size is not enough due to cq_base 397 * address aligning as q_base could be not aligned to the page. 398 * Adding PAGE_SIZE. 399 */ 400 total_size += PAGE_SIZE; 401 if (flags & IONIC_QCQ_F_SG) { 402 total_size += ALIGN(sg_size, PAGE_SIZE); 403 total_size += PAGE_SIZE; 404 } 405 406 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 407 if (!new) { 408 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 409 err = -ENOMEM; 410 goto err_out; 411 } 412 413 new->flags = flags; 414 415 new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs, 416 GFP_KERNEL); 417 if (!new->q.info) { 418 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 419 err = -ENOMEM; 420 goto err_out; 421 } 422 423 new->q.type = type; 424 425 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 426 desc_size, sg_desc_size, pid); 427 if (err) { 428 netdev_err(lif->netdev, "Cannot initialize queue\n"); 429 goto err_out; 430 } 431 432 if (flags & IONIC_QCQ_F_INTR) { 433 err = ionic_intr_alloc(lif, &new->intr); 434 if (err) { 435 netdev_warn(lif->netdev, "no intr for %s: %d\n", 436 name, err); 437 goto err_out; 438 } 439 440 err = ionic_bus_get_irq(lif->ionic, new->intr.index); 441 if (err < 0) { 442 netdev_warn(lif->netdev, "no vector for %s: %d\n", 443 name, err); 444 goto err_out_free_intr; 445 } 446 new->intr.vector = err; 447 ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index, 448 IONIC_INTR_MASK_SET); 449 450 err = ionic_request_irq(lif, new); 451 if (err) { 452 netdev_warn(lif->netdev, "irq request failed %d\n", err); 453 goto err_out_free_intr; 454 } 455 456 new->intr.cpu = cpumask_local_spread(new->intr.index, 457 dev_to_node(dev)); 458 if (new->intr.cpu != -1) 459 cpumask_set_cpu(new->intr.cpu, 460 &new->intr.affinity_mask); 461 } else { 462 new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 463 } 464 465 new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs, 466 GFP_KERNEL); 467 if (!new->cq.info) { 468 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 469 err = -ENOMEM; 470 goto err_out_free_irq; 471 } 472 473 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 474 if (err) { 475 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 476 goto err_out_free_irq; 477 } 478 479 new->base = dma_alloc_coherent(dev, total_size, &new->base_pa, 480 GFP_KERNEL); 481 if (!new->base) { 482 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 483 err = -ENOMEM; 484 goto err_out_free_irq; 485 } 486 487 new->total_size = total_size; 488 489 q_base = new->base; 490 q_base_pa = new->base_pa; 491 492 cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE); 493 cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE); 494 495 if (flags & IONIC_QCQ_F_SG) { 496 sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size, 497 PAGE_SIZE); 498 sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE); 499 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 500 } 501 502 ionic_q_map(&new->q, q_base, q_base_pa); 503 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 504 ionic_cq_bind(&new->cq, &new->q); 505 506 *qcq = new; 507 508 return 0; 509 510 err_out_free_irq: 511 if (flags & IONIC_QCQ_F_INTR) 512 devm_free_irq(dev, new->intr.vector, &new->napi); 513 err_out_free_intr: 514 if (flags & IONIC_QCQ_F_INTR) 515 ionic_intr_free(lif->ionic, new->intr.index); 516 err_out: 517 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 518 return err; 519 } 520 521 static int ionic_qcqs_alloc(struct ionic_lif *lif) 522 { 523 struct device *dev = lif->ionic->dev; 524 unsigned int q_list_size; 525 unsigned int flags; 526 int err; 527 int i; 528 529 flags = IONIC_QCQ_F_INTR; 530 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 531 IONIC_ADMINQ_LENGTH, 532 sizeof(struct ionic_admin_cmd), 533 sizeof(struct ionic_admin_comp), 534 0, lif->kern_pid, &lif->adminqcq); 535 if (err) 536 return err; 537 ionic_debugfs_add_qcq(lif, lif->adminqcq); 538 539 if (lif->ionic->nnqs_per_lif) { 540 flags = IONIC_QCQ_F_NOTIFYQ; 541 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 542 flags, IONIC_NOTIFYQ_LENGTH, 543 sizeof(struct ionic_notifyq_cmd), 544 sizeof(union ionic_notifyq_comp), 545 0, lif->kern_pid, &lif->notifyqcq); 546 if (err) 547 goto err_out_free_adminqcq; 548 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 549 550 /* Let the notifyq ride on the adminq interrupt */ 551 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 552 } 553 554 q_list_size = sizeof(*lif->txqcqs) * lif->nxqs; 555 err = -ENOMEM; 556 lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); 557 if (!lif->txqcqs) 558 goto err_out_free_notifyqcq; 559 for (i = 0; i < lif->nxqs; i++) { 560 lif->txqcqs[i].stats = devm_kzalloc(dev, 561 sizeof(struct ionic_q_stats), 562 GFP_KERNEL); 563 if (!lif->txqcqs[i].stats) 564 goto err_out_free_tx_stats; 565 } 566 567 lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); 568 if (!lif->rxqcqs) 569 goto err_out_free_tx_stats; 570 for (i = 0; i < lif->nxqs; i++) { 571 lif->rxqcqs[i].stats = devm_kzalloc(dev, 572 sizeof(struct ionic_q_stats), 573 GFP_KERNEL); 574 if (!lif->rxqcqs[i].stats) 575 goto err_out_free_rx_stats; 576 } 577 578 return 0; 579 580 err_out_free_rx_stats: 581 for (i = 0; i < lif->nxqs; i++) 582 if (lif->rxqcqs[i].stats) 583 devm_kfree(dev, lif->rxqcqs[i].stats); 584 devm_kfree(dev, lif->rxqcqs); 585 lif->rxqcqs = NULL; 586 err_out_free_tx_stats: 587 for (i = 0; i < lif->nxqs; i++) 588 if (lif->txqcqs[i].stats) 589 devm_kfree(dev, lif->txqcqs[i].stats); 590 devm_kfree(dev, lif->txqcqs); 591 lif->txqcqs = NULL; 592 err_out_free_notifyqcq: 593 if (lif->notifyqcq) { 594 ionic_qcq_free(lif, lif->notifyqcq); 595 lif->notifyqcq = NULL; 596 } 597 err_out_free_adminqcq: 598 ionic_qcq_free(lif, lif->adminqcq); 599 lif->adminqcq = NULL; 600 601 return err; 602 } 603 604 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 605 { 606 struct device *dev = lif->ionic->dev; 607 struct ionic_queue *q = &qcq->q; 608 struct ionic_cq *cq = &qcq->cq; 609 struct ionic_admin_ctx ctx = { 610 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 611 .cmd.q_init = { 612 .opcode = IONIC_CMD_Q_INIT, 613 .lif_index = cpu_to_le16(lif->index), 614 .type = q->type, 615 .ver = lif->qtype_info[q->type].version, 616 .index = cpu_to_le32(q->index), 617 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 618 IONIC_QINIT_F_SG), 619 .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index), 620 .pid = cpu_to_le16(q->pid), 621 .ring_size = ilog2(q->num_descs), 622 .ring_base = cpu_to_le64(q->base_pa), 623 .cq_ring_base = cpu_to_le64(cq->base_pa), 624 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 625 }, 626 }; 627 int err; 628 629 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 630 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 631 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 632 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 633 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 634 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 635 636 q->tail = q->info; 637 q->head = q->tail; 638 cq->tail = cq->info; 639 640 err = ionic_adminq_post_wait(lif, &ctx); 641 if (err) 642 return err; 643 644 q->hw_type = ctx.comp.q_init.hw_type; 645 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 646 q->dbval = IONIC_DBELL_QID(q->hw_index); 647 648 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 649 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 650 651 qcq->flags |= IONIC_QCQ_F_INITED; 652 653 return 0; 654 } 655 656 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 657 { 658 struct device *dev = lif->ionic->dev; 659 struct ionic_queue *q = &qcq->q; 660 struct ionic_cq *cq = &qcq->cq; 661 struct ionic_admin_ctx ctx = { 662 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 663 .cmd.q_init = { 664 .opcode = IONIC_CMD_Q_INIT, 665 .lif_index = cpu_to_le16(lif->index), 666 .type = q->type, 667 .ver = lif->qtype_info[q->type].version, 668 .index = cpu_to_le32(q->index), 669 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 670 IONIC_QINIT_F_SG), 671 .intr_index = cpu_to_le16(cq->bound_intr->index), 672 .pid = cpu_to_le16(q->pid), 673 .ring_size = ilog2(q->num_descs), 674 .ring_base = cpu_to_le64(q->base_pa), 675 .cq_ring_base = cpu_to_le64(cq->base_pa), 676 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 677 }, 678 }; 679 int err; 680 681 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 682 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 683 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 684 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 685 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 686 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 687 688 q->tail = q->info; 689 q->head = q->tail; 690 cq->tail = cq->info; 691 692 err = ionic_adminq_post_wait(lif, &ctx); 693 if (err) 694 return err; 695 696 q->hw_type = ctx.comp.q_init.hw_type; 697 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 698 q->dbval = IONIC_DBELL_QID(q->hw_index); 699 700 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 701 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 702 703 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, 704 NAPI_POLL_WEIGHT); 705 706 qcq->flags |= IONIC_QCQ_F_INITED; 707 708 return 0; 709 } 710 711 static bool ionic_notifyq_service(struct ionic_cq *cq, 712 struct ionic_cq_info *cq_info) 713 { 714 union ionic_notifyq_comp *comp = cq_info->cq_desc; 715 struct ionic_deferred_work *work; 716 struct net_device *netdev; 717 struct ionic_queue *q; 718 struct ionic_lif *lif; 719 u64 eid; 720 721 q = cq->bound_q; 722 lif = q->info[0].cb_arg; 723 netdev = lif->netdev; 724 eid = le64_to_cpu(comp->event.eid); 725 726 /* Have we run out of new completions to process? */ 727 if ((s64)(eid - lif->last_eid) <= 0) 728 return false; 729 730 lif->last_eid = eid; 731 732 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 733 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 734 comp, sizeof(*comp), true); 735 736 switch (le16_to_cpu(comp->event.ecode)) { 737 case IONIC_EVENT_LINK_CHANGE: 738 ionic_link_status_check_request(lif); 739 break; 740 case IONIC_EVENT_RESET: 741 work = kzalloc(sizeof(*work), GFP_ATOMIC); 742 if (!work) { 743 netdev_err(lif->netdev, "%s OOM\n", __func__); 744 } else { 745 work->type = IONIC_DW_TYPE_LIF_RESET; 746 ionic_lif_deferred_enqueue(&lif->deferred, work); 747 } 748 break; 749 default: 750 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", 751 comp->event.ecode, eid); 752 break; 753 } 754 755 return true; 756 } 757 758 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget) 759 { 760 struct ionic_dev *idev = &lif->ionic->idev; 761 struct ionic_cq *cq = &lif->notifyqcq->cq; 762 u32 work_done; 763 764 work_done = ionic_cq_service(cq, budget, ionic_notifyq_service, 765 NULL, NULL); 766 if (work_done) 767 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 768 work_done, IONIC_INTR_CRED_RESET_COALESCE); 769 770 return work_done; 771 } 772 773 static bool ionic_adminq_service(struct ionic_cq *cq, 774 struct ionic_cq_info *cq_info) 775 { 776 struct ionic_admin_comp *comp = cq_info->cq_desc; 777 778 if (!color_match(comp->color, cq->done_color)) 779 return false; 780 781 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 782 783 return true; 784 } 785 786 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 787 { 788 struct ionic_lif *lif = napi_to_cq(napi)->lif; 789 int n_work = 0; 790 int a_work = 0; 791 792 if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)) 793 n_work = ionic_notifyq_clean(lif, budget); 794 a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL); 795 796 return max(n_work, a_work); 797 } 798 799 void ionic_get_stats64(struct net_device *netdev, 800 struct rtnl_link_stats64 *ns) 801 { 802 struct ionic_lif *lif = netdev_priv(netdev); 803 struct ionic_lif_stats *ls; 804 805 memset(ns, 0, sizeof(*ns)); 806 ls = &lif->info->stats; 807 808 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 809 le64_to_cpu(ls->rx_mcast_packets) + 810 le64_to_cpu(ls->rx_bcast_packets); 811 812 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 813 le64_to_cpu(ls->tx_mcast_packets) + 814 le64_to_cpu(ls->tx_bcast_packets); 815 816 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 817 le64_to_cpu(ls->rx_mcast_bytes) + 818 le64_to_cpu(ls->rx_bcast_bytes); 819 820 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 821 le64_to_cpu(ls->tx_mcast_bytes) + 822 le64_to_cpu(ls->tx_bcast_bytes); 823 824 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 825 le64_to_cpu(ls->rx_mcast_drop_packets) + 826 le64_to_cpu(ls->rx_bcast_drop_packets); 827 828 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 829 le64_to_cpu(ls->tx_mcast_drop_packets) + 830 le64_to_cpu(ls->tx_bcast_drop_packets); 831 832 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 833 834 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 835 836 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 837 le64_to_cpu(ls->rx_queue_disabled) + 838 le64_to_cpu(ls->rx_desc_fetch_error) + 839 le64_to_cpu(ls->rx_desc_data_error); 840 841 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 842 le64_to_cpu(ls->tx_queue_disabled) + 843 le64_to_cpu(ls->tx_desc_fetch_error) + 844 le64_to_cpu(ls->tx_desc_data_error); 845 846 ns->rx_errors = ns->rx_over_errors + 847 ns->rx_missed_errors; 848 849 ns->tx_errors = ns->tx_aborted_errors; 850 } 851 852 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) 853 { 854 struct ionic_admin_ctx ctx = { 855 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 856 .cmd.rx_filter_add = { 857 .opcode = IONIC_CMD_RX_FILTER_ADD, 858 .lif_index = cpu_to_le16(lif->index), 859 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 860 }, 861 }; 862 struct ionic_rx_filter *f; 863 int err; 864 865 /* don't bother if we already have it */ 866 spin_lock_bh(&lif->rx_filters.lock); 867 f = ionic_rx_filter_by_addr(lif, addr); 868 spin_unlock_bh(&lif->rx_filters.lock); 869 if (f) 870 return 0; 871 872 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); 873 874 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 875 err = ionic_adminq_post_wait(lif, &ctx); 876 if (err && err != -EEXIST) 877 return err; 878 879 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 880 } 881 882 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) 883 { 884 struct ionic_admin_ctx ctx = { 885 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 886 .cmd.rx_filter_del = { 887 .opcode = IONIC_CMD_RX_FILTER_DEL, 888 .lif_index = cpu_to_le16(lif->index), 889 }, 890 }; 891 struct ionic_rx_filter *f; 892 int err; 893 894 spin_lock_bh(&lif->rx_filters.lock); 895 f = ionic_rx_filter_by_addr(lif, addr); 896 if (!f) { 897 spin_unlock_bh(&lif->rx_filters.lock); 898 return -ENOENT; 899 } 900 901 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", 902 addr, f->filter_id); 903 904 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 905 ionic_rx_filter_free(lif, f); 906 spin_unlock_bh(&lif->rx_filters.lock); 907 908 err = ionic_adminq_post_wait(lif, &ctx); 909 if (err && err != -EEXIST) 910 return err; 911 912 return 0; 913 } 914 915 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) 916 { 917 struct ionic *ionic = lif->ionic; 918 struct ionic_deferred_work *work; 919 unsigned int nmfilters; 920 unsigned int nufilters; 921 922 if (add) { 923 /* Do we have space for this filter? We test the counters 924 * here before checking the need for deferral so that we 925 * can return an overflow error to the stack. 926 */ 927 nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters); 928 nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters); 929 930 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) 931 lif->nmcast++; 932 else if (!is_multicast_ether_addr(addr) && 933 lif->nucast < nufilters) 934 lif->nucast++; 935 else 936 return -ENOSPC; 937 } else { 938 if (is_multicast_ether_addr(addr) && lif->nmcast) 939 lif->nmcast--; 940 else if (!is_multicast_ether_addr(addr) && lif->nucast) 941 lif->nucast--; 942 } 943 944 if (in_interrupt()) { 945 work = kzalloc(sizeof(*work), GFP_ATOMIC); 946 if (!work) { 947 netdev_err(lif->netdev, "%s OOM\n", __func__); 948 return -ENOMEM; 949 } 950 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : 951 IONIC_DW_TYPE_RX_ADDR_DEL; 952 memcpy(work->addr, addr, ETH_ALEN); 953 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n", 954 add ? "add" : "del", addr); 955 ionic_lif_deferred_enqueue(&lif->deferred, work); 956 } else { 957 netdev_dbg(lif->netdev, "rx_filter %s %pM\n", 958 add ? "add" : "del", addr); 959 if (add) 960 return ionic_lif_addr_add(lif, addr); 961 else 962 return ionic_lif_addr_del(lif, addr); 963 } 964 965 return 0; 966 } 967 968 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 969 { 970 return ionic_lif_addr(netdev_priv(netdev), addr, true); 971 } 972 973 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 974 { 975 return ionic_lif_addr(netdev_priv(netdev), addr, false); 976 } 977 978 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 979 { 980 struct ionic_admin_ctx ctx = { 981 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 982 .cmd.rx_mode_set = { 983 .opcode = IONIC_CMD_RX_MODE_SET, 984 .lif_index = cpu_to_le16(lif->index), 985 .rx_mode = cpu_to_le16(rx_mode), 986 }, 987 }; 988 char buf[128]; 989 int err; 990 int i; 991 #define REMAIN(__x) (sizeof(buf) - (__x)) 992 993 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 994 lif->rx_mode, rx_mode); 995 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 996 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 997 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 998 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 999 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1000 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1001 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1002 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1003 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1004 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1005 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); 1006 1007 err = ionic_adminq_post_wait(lif, &ctx); 1008 if (err) 1009 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n", 1010 rx_mode, err); 1011 else 1012 lif->rx_mode = rx_mode; 1013 } 1014 1015 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 1016 { 1017 struct ionic_deferred_work *work; 1018 1019 if (in_interrupt()) { 1020 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1021 if (!work) { 1022 netdev_err(lif->netdev, "%s OOM\n", __func__); 1023 return; 1024 } 1025 work->type = IONIC_DW_TYPE_RX_MODE; 1026 work->rx_mode = rx_mode; 1027 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1028 ionic_lif_deferred_enqueue(&lif->deferred, work); 1029 } else { 1030 ionic_lif_rx_mode(lif, rx_mode); 1031 } 1032 } 1033 1034 static void ionic_set_rx_mode(struct net_device *netdev) 1035 { 1036 struct ionic_lif *lif = netdev_priv(netdev); 1037 struct ionic_identity *ident; 1038 unsigned int nfilters; 1039 unsigned int rx_mode; 1040 1041 ident = &lif->ionic->ident; 1042 1043 rx_mode = IONIC_RX_MODE_F_UNICAST; 1044 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1045 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1046 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1047 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1048 1049 /* sync unicast addresses 1050 * next check to see if we're in an overflow state 1051 * if so, we track that we overflowed and enable NIC PROMISC 1052 * else if the overflow is set and not needed 1053 * we remove our overflow flag and check the netdev flags 1054 * to see if we can disable NIC PROMISC 1055 */ 1056 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1057 nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters); 1058 if (netdev_uc_count(netdev) + 1 > nfilters) { 1059 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1060 lif->uc_overflow = true; 1061 } else if (lif->uc_overflow) { 1062 lif->uc_overflow = false; 1063 if (!(netdev->flags & IFF_PROMISC)) 1064 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1065 } 1066 1067 /* same for multicast */ 1068 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1069 nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters); 1070 if (netdev_mc_count(netdev) > nfilters) { 1071 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1072 lif->mc_overflow = true; 1073 } else if (lif->mc_overflow) { 1074 lif->mc_overflow = false; 1075 if (!(netdev->flags & IFF_ALLMULTI)) 1076 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1077 } 1078 1079 if (lif->rx_mode != rx_mode) 1080 _ionic_lif_rx_mode(lif, rx_mode); 1081 } 1082 1083 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1084 { 1085 u64 wanted = 0; 1086 1087 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1088 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1089 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1090 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1091 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1092 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1093 if (features & NETIF_F_RXHASH) 1094 wanted |= IONIC_ETH_HW_RX_HASH; 1095 if (features & NETIF_F_RXCSUM) 1096 wanted |= IONIC_ETH_HW_RX_CSUM; 1097 if (features & NETIF_F_SG) 1098 wanted |= IONIC_ETH_HW_TX_SG; 1099 if (features & NETIF_F_HW_CSUM) 1100 wanted |= IONIC_ETH_HW_TX_CSUM; 1101 if (features & NETIF_F_TSO) 1102 wanted |= IONIC_ETH_HW_TSO; 1103 if (features & NETIF_F_TSO6) 1104 wanted |= IONIC_ETH_HW_TSO_IPV6; 1105 if (features & NETIF_F_TSO_ECN) 1106 wanted |= IONIC_ETH_HW_TSO_ECN; 1107 if (features & NETIF_F_GSO_GRE) 1108 wanted |= IONIC_ETH_HW_TSO_GRE; 1109 if (features & NETIF_F_GSO_GRE_CSUM) 1110 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1111 if (features & NETIF_F_GSO_IPXIP4) 1112 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1113 if (features & NETIF_F_GSO_IPXIP6) 1114 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1115 if (features & NETIF_F_GSO_UDP_TUNNEL) 1116 wanted |= IONIC_ETH_HW_TSO_UDP; 1117 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1118 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1119 1120 return cpu_to_le64(wanted); 1121 } 1122 1123 static int ionic_set_nic_features(struct ionic_lif *lif, 1124 netdev_features_t features) 1125 { 1126 struct device *dev = lif->ionic->dev; 1127 struct ionic_admin_ctx ctx = { 1128 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1129 .cmd.lif_setattr = { 1130 .opcode = IONIC_CMD_LIF_SETATTR, 1131 .index = cpu_to_le16(lif->index), 1132 .attr = IONIC_LIF_ATTR_FEATURES, 1133 }, 1134 }; 1135 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1136 IONIC_ETH_HW_VLAN_RX_STRIP | 1137 IONIC_ETH_HW_VLAN_RX_FILTER; 1138 u64 old_hw_features; 1139 int err; 1140 1141 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1142 err = ionic_adminq_post_wait(lif, &ctx); 1143 if (err) 1144 return err; 1145 1146 old_hw_features = lif->hw_features; 1147 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1148 ctx.comp.lif_setattr.features); 1149 1150 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1151 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1152 1153 if ((vlan_flags & features) && 1154 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1155 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1156 1157 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1158 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1159 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1160 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1161 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1162 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1163 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1164 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1165 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1166 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1167 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1168 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1169 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1170 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1171 if (lif->hw_features & IONIC_ETH_HW_TSO) 1172 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1173 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1174 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1175 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1176 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1177 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1178 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1179 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1180 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1181 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1182 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1183 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1184 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1185 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1186 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1187 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1188 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1189 1190 return 0; 1191 } 1192 1193 static int ionic_init_nic_features(struct ionic_lif *lif) 1194 { 1195 struct net_device *netdev = lif->netdev; 1196 netdev_features_t features; 1197 int err; 1198 1199 /* set up what we expect to support by default */ 1200 features = NETIF_F_HW_VLAN_CTAG_TX | 1201 NETIF_F_HW_VLAN_CTAG_RX | 1202 NETIF_F_HW_VLAN_CTAG_FILTER | 1203 NETIF_F_RXHASH | 1204 NETIF_F_SG | 1205 NETIF_F_HW_CSUM | 1206 NETIF_F_RXCSUM | 1207 NETIF_F_TSO | 1208 NETIF_F_TSO6 | 1209 NETIF_F_TSO_ECN; 1210 1211 err = ionic_set_nic_features(lif, features); 1212 if (err) 1213 return err; 1214 1215 /* tell the netdev what we actually can support */ 1216 netdev->features |= NETIF_F_HIGHDMA; 1217 1218 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1219 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1220 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1221 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1222 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1223 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1224 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1225 netdev->hw_features |= NETIF_F_RXHASH; 1226 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1227 netdev->hw_features |= NETIF_F_SG; 1228 1229 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1230 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1231 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1232 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1233 if (lif->hw_features & IONIC_ETH_HW_TSO) 1234 netdev->hw_enc_features |= NETIF_F_TSO; 1235 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1236 netdev->hw_enc_features |= NETIF_F_TSO6; 1237 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1238 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1239 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1240 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1241 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1242 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1243 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1244 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1245 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1246 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1247 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1248 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1249 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1250 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1251 1252 netdev->hw_features |= netdev->hw_enc_features; 1253 netdev->features |= netdev->hw_features; 1254 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1255 1256 netdev->priv_flags |= IFF_UNICAST_FLT | 1257 IFF_LIVE_ADDR_CHANGE; 1258 1259 return 0; 1260 } 1261 1262 static int ionic_set_features(struct net_device *netdev, 1263 netdev_features_t features) 1264 { 1265 struct ionic_lif *lif = netdev_priv(netdev); 1266 int err; 1267 1268 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1269 __func__, (u64)lif->netdev->features, (u64)features); 1270 1271 err = ionic_set_nic_features(lif, features); 1272 1273 return err; 1274 } 1275 1276 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1277 { 1278 struct sockaddr *addr = sa; 1279 u8 *mac; 1280 int err; 1281 1282 mac = (u8 *)addr->sa_data; 1283 if (ether_addr_equal(netdev->dev_addr, mac)) 1284 return 0; 1285 1286 err = eth_prepare_mac_addr_change(netdev, addr); 1287 if (err) 1288 return err; 1289 1290 if (!is_zero_ether_addr(netdev->dev_addr)) { 1291 netdev_info(netdev, "deleting mac addr %pM\n", 1292 netdev->dev_addr); 1293 ionic_addr_del(netdev, netdev->dev_addr); 1294 } 1295 1296 eth_commit_mac_addr_change(netdev, addr); 1297 netdev_info(netdev, "updating mac addr %pM\n", mac); 1298 1299 return ionic_addr_add(netdev, mac); 1300 } 1301 1302 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1303 { 1304 struct ionic_lif *lif = netdev_priv(netdev); 1305 struct ionic_admin_ctx ctx = { 1306 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1307 .cmd.lif_setattr = { 1308 .opcode = IONIC_CMD_LIF_SETATTR, 1309 .index = cpu_to_le16(lif->index), 1310 .attr = IONIC_LIF_ATTR_MTU, 1311 .mtu = cpu_to_le32(new_mtu), 1312 }, 1313 }; 1314 int err; 1315 1316 err = ionic_adminq_post_wait(lif, &ctx); 1317 if (err) 1318 return err; 1319 1320 netdev->mtu = new_mtu; 1321 err = ionic_reset_queues(lif, NULL, NULL); 1322 1323 return err; 1324 } 1325 1326 static void ionic_tx_timeout_work(struct work_struct *ws) 1327 { 1328 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1329 1330 netdev_info(lif->netdev, "Tx Timeout recovery\n"); 1331 1332 rtnl_lock(); 1333 ionic_reset_queues(lif, NULL, NULL); 1334 rtnl_unlock(); 1335 } 1336 1337 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1338 { 1339 struct ionic_lif *lif = netdev_priv(netdev); 1340 1341 schedule_work(&lif->tx_timeout_work); 1342 } 1343 1344 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1345 u16 vid) 1346 { 1347 struct ionic_lif *lif = netdev_priv(netdev); 1348 struct ionic_admin_ctx ctx = { 1349 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1350 .cmd.rx_filter_add = { 1351 .opcode = IONIC_CMD_RX_FILTER_ADD, 1352 .lif_index = cpu_to_le16(lif->index), 1353 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), 1354 .vlan.vlan = cpu_to_le16(vid), 1355 }, 1356 }; 1357 int err; 1358 1359 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); 1360 err = ionic_adminq_post_wait(lif, &ctx); 1361 if (err) 1362 return err; 1363 1364 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1365 } 1366 1367 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1368 u16 vid) 1369 { 1370 struct ionic_lif *lif = netdev_priv(netdev); 1371 struct ionic_admin_ctx ctx = { 1372 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1373 .cmd.rx_filter_del = { 1374 .opcode = IONIC_CMD_RX_FILTER_DEL, 1375 .lif_index = cpu_to_le16(lif->index), 1376 }, 1377 }; 1378 struct ionic_rx_filter *f; 1379 1380 spin_lock_bh(&lif->rx_filters.lock); 1381 1382 f = ionic_rx_filter_by_vlan(lif, vid); 1383 if (!f) { 1384 spin_unlock_bh(&lif->rx_filters.lock); 1385 return -ENOENT; 1386 } 1387 1388 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", 1389 vid, f->filter_id); 1390 1391 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1392 ionic_rx_filter_free(lif, f); 1393 spin_unlock_bh(&lif->rx_filters.lock); 1394 1395 return ionic_adminq_post_wait(lif, &ctx); 1396 } 1397 1398 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1399 const u8 *key, const u32 *indir) 1400 { 1401 struct ionic_admin_ctx ctx = { 1402 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1403 .cmd.lif_setattr = { 1404 .opcode = IONIC_CMD_LIF_SETATTR, 1405 .attr = IONIC_LIF_ATTR_RSS, 1406 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1407 }, 1408 }; 1409 unsigned int i, tbl_sz; 1410 1411 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1412 lif->rss_types = types; 1413 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1414 } 1415 1416 if (key) 1417 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1418 1419 if (indir) { 1420 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1421 for (i = 0; i < tbl_sz; i++) 1422 lif->rss_ind_tbl[i] = indir[i]; 1423 } 1424 1425 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1426 IONIC_RSS_HASH_KEY_SIZE); 1427 1428 return ionic_adminq_post_wait(lif, &ctx); 1429 } 1430 1431 static int ionic_lif_rss_init(struct ionic_lif *lif) 1432 { 1433 unsigned int tbl_sz; 1434 unsigned int i; 1435 1436 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1437 IONIC_RSS_TYPE_IPV4_TCP | 1438 IONIC_RSS_TYPE_IPV4_UDP | 1439 IONIC_RSS_TYPE_IPV6 | 1440 IONIC_RSS_TYPE_IPV6_TCP | 1441 IONIC_RSS_TYPE_IPV6_UDP; 1442 1443 /* Fill indirection table with 'default' values */ 1444 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1445 for (i = 0; i < tbl_sz; i++) 1446 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1447 1448 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1449 } 1450 1451 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1452 { 1453 int tbl_sz; 1454 1455 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1456 memset(lif->rss_ind_tbl, 0, tbl_sz); 1457 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1458 1459 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1460 } 1461 1462 static void ionic_txrx_disable(struct ionic_lif *lif) 1463 { 1464 unsigned int i; 1465 int err; 1466 1467 if (lif->txqcqs) { 1468 for (i = 0; i < lif->nxqs; i++) { 1469 err = ionic_qcq_disable(lif->txqcqs[i].qcq); 1470 if (err == -ETIMEDOUT) 1471 break; 1472 } 1473 } 1474 1475 if (lif->rxqcqs) { 1476 for (i = 0; i < lif->nxqs; i++) { 1477 err = ionic_qcq_disable(lif->rxqcqs[i].qcq); 1478 if (err == -ETIMEDOUT) 1479 break; 1480 } 1481 } 1482 } 1483 1484 static void ionic_txrx_deinit(struct ionic_lif *lif) 1485 { 1486 unsigned int i; 1487 1488 if (lif->txqcqs) { 1489 for (i = 0; i < lif->nxqs; i++) { 1490 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1491 ionic_tx_flush(&lif->txqcqs[i].qcq->cq); 1492 ionic_tx_empty(&lif->txqcqs[i].qcq->q); 1493 } 1494 } 1495 1496 if (lif->rxqcqs) { 1497 for (i = 0; i < lif->nxqs; i++) { 1498 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); 1499 ionic_rx_flush(&lif->rxqcqs[i].qcq->cq); 1500 ionic_rx_empty(&lif->rxqcqs[i].qcq->q); 1501 } 1502 } 1503 lif->rx_mode = 0; 1504 } 1505 1506 static void ionic_txrx_free(struct ionic_lif *lif) 1507 { 1508 unsigned int i; 1509 1510 if (lif->txqcqs) { 1511 for (i = 0; i < lif->nxqs; i++) { 1512 ionic_qcq_free(lif, lif->txqcqs[i].qcq); 1513 lif->txqcqs[i].qcq = NULL; 1514 } 1515 } 1516 1517 if (lif->rxqcqs) { 1518 for (i = 0; i < lif->nxqs; i++) { 1519 ionic_qcq_free(lif, lif->rxqcqs[i].qcq); 1520 lif->rxqcqs[i].qcq = NULL; 1521 } 1522 } 1523 } 1524 1525 static int ionic_txrx_alloc(struct ionic_lif *lif) 1526 { 1527 unsigned int sg_desc_sz; 1528 unsigned int flags; 1529 unsigned int i; 1530 int err = 0; 1531 1532 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 1533 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 1534 sizeof(struct ionic_txq_sg_desc_v1)) 1535 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 1536 else 1537 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 1538 1539 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 1540 for (i = 0; i < lif->nxqs; i++) { 1541 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 1542 lif->ntxq_descs, 1543 sizeof(struct ionic_txq_desc), 1544 sizeof(struct ionic_txq_comp), 1545 sg_desc_sz, 1546 lif->kern_pid, &lif->txqcqs[i].qcq); 1547 if (err) 1548 goto err_out; 1549 1550 lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats; 1551 ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq); 1552 } 1553 1554 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 1555 for (i = 0; i < lif->nxqs; i++) { 1556 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 1557 lif->nrxq_descs, 1558 sizeof(struct ionic_rxq_desc), 1559 sizeof(struct ionic_rxq_comp), 1560 sizeof(struct ionic_rxq_sg_desc), 1561 lif->kern_pid, &lif->rxqcqs[i].qcq); 1562 if (err) 1563 goto err_out; 1564 1565 lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats; 1566 1567 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 1568 lif->rxqcqs[i].qcq->intr.index, 1569 lif->rx_coalesce_hw); 1570 ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq, 1571 lif->txqcqs[i].qcq); 1572 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq); 1573 } 1574 1575 return 0; 1576 1577 err_out: 1578 ionic_txrx_free(lif); 1579 1580 return err; 1581 } 1582 1583 static int ionic_txrx_init(struct ionic_lif *lif) 1584 { 1585 unsigned int i; 1586 int err; 1587 1588 for (i = 0; i < lif->nxqs; i++) { 1589 err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq); 1590 if (err) 1591 goto err_out; 1592 1593 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq); 1594 if (err) { 1595 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1596 goto err_out; 1597 } 1598 } 1599 1600 if (lif->netdev->features & NETIF_F_RXHASH) 1601 ionic_lif_rss_init(lif); 1602 1603 ionic_set_rx_mode(lif->netdev); 1604 1605 return 0; 1606 1607 err_out: 1608 while (i--) { 1609 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1610 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); 1611 } 1612 1613 return err; 1614 } 1615 1616 static int ionic_txrx_enable(struct ionic_lif *lif) 1617 { 1618 int i, err; 1619 1620 for (i = 0; i < lif->nxqs; i++) { 1621 ionic_rx_fill(&lif->rxqcqs[i].qcq->q); 1622 err = ionic_qcq_enable(lif->rxqcqs[i].qcq); 1623 if (err) 1624 goto err_out; 1625 1626 err = ionic_qcq_enable(lif->txqcqs[i].qcq); 1627 if (err) { 1628 if (err != -ETIMEDOUT) 1629 ionic_qcq_disable(lif->rxqcqs[i].qcq); 1630 goto err_out; 1631 } 1632 } 1633 1634 return 0; 1635 1636 err_out: 1637 while (i--) { 1638 err = ionic_qcq_disable(lif->txqcqs[i].qcq); 1639 if (err == -ETIMEDOUT) 1640 break; 1641 err = ionic_qcq_disable(lif->rxqcqs[i].qcq); 1642 if (err == -ETIMEDOUT) 1643 break; 1644 } 1645 1646 return err; 1647 } 1648 1649 static int ionic_start_queues(struct ionic_lif *lif) 1650 { 1651 int err; 1652 1653 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 1654 return 0; 1655 1656 err = ionic_txrx_enable(lif); 1657 if (err) { 1658 clear_bit(IONIC_LIF_F_UP, lif->state); 1659 return err; 1660 } 1661 netif_tx_wake_all_queues(lif->netdev); 1662 1663 return 0; 1664 } 1665 1666 int ionic_open(struct net_device *netdev) 1667 { 1668 struct ionic_lif *lif = netdev_priv(netdev); 1669 int err; 1670 1671 err = ionic_txrx_alloc(lif); 1672 if (err) 1673 return err; 1674 1675 err = ionic_txrx_init(lif); 1676 if (err) 1677 goto err_out; 1678 1679 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 1680 if (err) 1681 goto err_txrx_deinit; 1682 1683 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 1684 if (err) 1685 goto err_txrx_deinit; 1686 1687 /* don't start the queues until we have link */ 1688 if (netif_carrier_ok(netdev)) { 1689 err = ionic_start_queues(lif); 1690 if (err) 1691 goto err_txrx_deinit; 1692 } 1693 1694 return 0; 1695 1696 err_txrx_deinit: 1697 ionic_txrx_deinit(lif); 1698 err_out: 1699 ionic_txrx_free(lif); 1700 return err; 1701 } 1702 1703 static void ionic_stop_queues(struct ionic_lif *lif) 1704 { 1705 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 1706 return; 1707 1708 netif_tx_disable(lif->netdev); 1709 ionic_txrx_disable(lif); 1710 } 1711 1712 int ionic_stop(struct net_device *netdev) 1713 { 1714 struct ionic_lif *lif = netdev_priv(netdev); 1715 1716 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1717 return 0; 1718 1719 ionic_stop_queues(lif); 1720 ionic_txrx_deinit(lif); 1721 ionic_txrx_free(lif); 1722 1723 return 0; 1724 } 1725 1726 static int ionic_get_vf_config(struct net_device *netdev, 1727 int vf, struct ifla_vf_info *ivf) 1728 { 1729 struct ionic_lif *lif = netdev_priv(netdev); 1730 struct ionic *ionic = lif->ionic; 1731 int ret = 0; 1732 1733 if (!netif_device_present(netdev)) 1734 return -EBUSY; 1735 1736 down_read(&ionic->vf_op_lock); 1737 1738 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1739 ret = -EINVAL; 1740 } else { 1741 ivf->vf = vf; 1742 ivf->vlan = ionic->vfs[vf].vlanid; 1743 ivf->qos = 0; 1744 ivf->spoofchk = ionic->vfs[vf].spoofchk; 1745 ivf->linkstate = ionic->vfs[vf].linkstate; 1746 ivf->max_tx_rate = ionic->vfs[vf].maxrate; 1747 ivf->trusted = ionic->vfs[vf].trusted; 1748 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr); 1749 } 1750 1751 up_read(&ionic->vf_op_lock); 1752 return ret; 1753 } 1754 1755 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 1756 struct ifla_vf_stats *vf_stats) 1757 { 1758 struct ionic_lif *lif = netdev_priv(netdev); 1759 struct ionic *ionic = lif->ionic; 1760 struct ionic_lif_stats *vs; 1761 int ret = 0; 1762 1763 if (!netif_device_present(netdev)) 1764 return -EBUSY; 1765 1766 down_read(&ionic->vf_op_lock); 1767 1768 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1769 ret = -EINVAL; 1770 } else { 1771 memset(vf_stats, 0, sizeof(*vf_stats)); 1772 vs = &ionic->vfs[vf].stats; 1773 1774 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 1775 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 1776 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 1777 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 1778 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 1779 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 1780 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 1781 le64_to_cpu(vs->rx_mcast_drop_packets) + 1782 le64_to_cpu(vs->rx_bcast_drop_packets); 1783 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 1784 le64_to_cpu(vs->tx_mcast_drop_packets) + 1785 le64_to_cpu(vs->tx_bcast_drop_packets); 1786 } 1787 1788 up_read(&ionic->vf_op_lock); 1789 return ret; 1790 } 1791 1792 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1793 { 1794 struct ionic_lif *lif = netdev_priv(netdev); 1795 struct ionic *ionic = lif->ionic; 1796 int ret; 1797 1798 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 1799 return -EINVAL; 1800 1801 if (!netif_device_present(netdev)) 1802 return -EBUSY; 1803 1804 down_write(&ionic->vf_op_lock); 1805 1806 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1807 ret = -EINVAL; 1808 } else { 1809 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac); 1810 if (!ret) 1811 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 1812 } 1813 1814 up_write(&ionic->vf_op_lock); 1815 return ret; 1816 } 1817 1818 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1819 u8 qos, __be16 proto) 1820 { 1821 struct ionic_lif *lif = netdev_priv(netdev); 1822 struct ionic *ionic = lif->ionic; 1823 int ret; 1824 1825 /* until someday when we support qos */ 1826 if (qos) 1827 return -EINVAL; 1828 1829 if (vlan > 4095) 1830 return -EINVAL; 1831 1832 if (proto != htons(ETH_P_8021Q)) 1833 return -EPROTONOSUPPORT; 1834 1835 if (!netif_device_present(netdev)) 1836 return -EBUSY; 1837 1838 down_write(&ionic->vf_op_lock); 1839 1840 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1841 ret = -EINVAL; 1842 } else { 1843 ret = ionic_set_vf_config(ionic, vf, 1844 IONIC_VF_ATTR_VLAN, (u8 *)&vlan); 1845 if (!ret) 1846 ionic->vfs[vf].vlanid = vlan; 1847 } 1848 1849 up_write(&ionic->vf_op_lock); 1850 return ret; 1851 } 1852 1853 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 1854 int tx_min, int tx_max) 1855 { 1856 struct ionic_lif *lif = netdev_priv(netdev); 1857 struct ionic *ionic = lif->ionic; 1858 int ret; 1859 1860 /* setting the min just seems silly */ 1861 if (tx_min) 1862 return -EINVAL; 1863 1864 if (!netif_device_present(netdev)) 1865 return -EBUSY; 1866 1867 down_write(&ionic->vf_op_lock); 1868 1869 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1870 ret = -EINVAL; 1871 } else { 1872 ret = ionic_set_vf_config(ionic, vf, 1873 IONIC_VF_ATTR_RATE, (u8 *)&tx_max); 1874 if (!ret) 1875 lif->ionic->vfs[vf].maxrate = tx_max; 1876 } 1877 1878 up_write(&ionic->vf_op_lock); 1879 return ret; 1880 } 1881 1882 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 1883 { 1884 struct ionic_lif *lif = netdev_priv(netdev); 1885 struct ionic *ionic = lif->ionic; 1886 u8 data = set; /* convert to u8 for config */ 1887 int ret; 1888 1889 if (!netif_device_present(netdev)) 1890 return -EBUSY; 1891 1892 down_write(&ionic->vf_op_lock); 1893 1894 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1895 ret = -EINVAL; 1896 } else { 1897 ret = ionic_set_vf_config(ionic, vf, 1898 IONIC_VF_ATTR_SPOOFCHK, &data); 1899 if (!ret) 1900 ionic->vfs[vf].spoofchk = data; 1901 } 1902 1903 up_write(&ionic->vf_op_lock); 1904 return ret; 1905 } 1906 1907 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 1908 { 1909 struct ionic_lif *lif = netdev_priv(netdev); 1910 struct ionic *ionic = lif->ionic; 1911 u8 data = set; /* convert to u8 for config */ 1912 int ret; 1913 1914 if (!netif_device_present(netdev)) 1915 return -EBUSY; 1916 1917 down_write(&ionic->vf_op_lock); 1918 1919 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1920 ret = -EINVAL; 1921 } else { 1922 ret = ionic_set_vf_config(ionic, vf, 1923 IONIC_VF_ATTR_TRUST, &data); 1924 if (!ret) 1925 ionic->vfs[vf].trusted = data; 1926 } 1927 1928 up_write(&ionic->vf_op_lock); 1929 return ret; 1930 } 1931 1932 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 1933 { 1934 struct ionic_lif *lif = netdev_priv(netdev); 1935 struct ionic *ionic = lif->ionic; 1936 u8 data; 1937 int ret; 1938 1939 switch (set) { 1940 case IFLA_VF_LINK_STATE_ENABLE: 1941 data = IONIC_VF_LINK_STATUS_UP; 1942 break; 1943 case IFLA_VF_LINK_STATE_DISABLE: 1944 data = IONIC_VF_LINK_STATUS_DOWN; 1945 break; 1946 case IFLA_VF_LINK_STATE_AUTO: 1947 data = IONIC_VF_LINK_STATUS_AUTO; 1948 break; 1949 default: 1950 return -EINVAL; 1951 } 1952 1953 if (!netif_device_present(netdev)) 1954 return -EBUSY; 1955 1956 down_write(&ionic->vf_op_lock); 1957 1958 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1959 ret = -EINVAL; 1960 } else { 1961 ret = ionic_set_vf_config(ionic, vf, 1962 IONIC_VF_ATTR_LINKSTATE, &data); 1963 if (!ret) 1964 ionic->vfs[vf].linkstate = set; 1965 } 1966 1967 up_write(&ionic->vf_op_lock); 1968 return ret; 1969 } 1970 1971 static const struct net_device_ops ionic_netdev_ops = { 1972 .ndo_open = ionic_open, 1973 .ndo_stop = ionic_stop, 1974 .ndo_start_xmit = ionic_start_xmit, 1975 .ndo_get_stats64 = ionic_get_stats64, 1976 .ndo_set_rx_mode = ionic_set_rx_mode, 1977 .ndo_set_features = ionic_set_features, 1978 .ndo_set_mac_address = ionic_set_mac_address, 1979 .ndo_validate_addr = eth_validate_addr, 1980 .ndo_tx_timeout = ionic_tx_timeout, 1981 .ndo_change_mtu = ionic_change_mtu, 1982 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 1983 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 1984 .ndo_set_vf_vlan = ionic_set_vf_vlan, 1985 .ndo_set_vf_trust = ionic_set_vf_trust, 1986 .ndo_set_vf_mac = ionic_set_vf_mac, 1987 .ndo_set_vf_rate = ionic_set_vf_rate, 1988 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 1989 .ndo_get_vf_config = ionic_get_vf_config, 1990 .ndo_set_vf_link_state = ionic_set_vf_link_state, 1991 .ndo_get_vf_stats = ionic_get_vf_stats, 1992 }; 1993 1994 int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg) 1995 { 1996 bool running; 1997 int err = 0; 1998 1999 mutex_lock(&lif->queue_lock); 2000 running = netif_running(lif->netdev); 2001 if (running) { 2002 netif_device_detach(lif->netdev); 2003 err = ionic_stop(lif->netdev); 2004 if (err) 2005 return err; 2006 } 2007 2008 if (cb) 2009 cb(lif, arg); 2010 2011 if (running) { 2012 err = ionic_open(lif->netdev); 2013 netif_device_attach(lif->netdev); 2014 } 2015 mutex_unlock(&lif->queue_lock); 2016 2017 return err; 2018 } 2019 2020 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index) 2021 { 2022 struct device *dev = ionic->dev; 2023 union ionic_lif_identity *lid; 2024 struct net_device *netdev; 2025 struct ionic_lif *lif; 2026 int tbl_sz; 2027 int err; 2028 2029 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 2030 if (!lid) 2031 return ERR_PTR(-ENOMEM); 2032 2033 netdev = alloc_etherdev_mqs(sizeof(*lif), 2034 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 2035 if (!netdev) { 2036 dev_err(dev, "Cannot allocate netdev, aborting\n"); 2037 err = -ENOMEM; 2038 goto err_out_free_lid; 2039 } 2040 2041 SET_NETDEV_DEV(netdev, dev); 2042 2043 lif = netdev_priv(netdev); 2044 lif->netdev = netdev; 2045 ionic->master_lif = lif; 2046 netdev->netdev_ops = &ionic_netdev_ops; 2047 ionic_ethtool_set_ops(netdev); 2048 2049 netdev->watchdog_timeo = 2 * HZ; 2050 netif_carrier_off(netdev); 2051 2052 lif->identity = lid; 2053 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 2054 ionic_lif_identify(ionic, lif->lif_type, lif->identity); 2055 lif->netdev->min_mtu = le32_to_cpu(lif->identity->eth.min_frame_size); 2056 lif->netdev->max_mtu = 2057 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; 2058 2059 lif->neqs = ionic->neqs_per_lif; 2060 lif->nxqs = ionic->ntxqs_per_lif; 2061 2062 lif->ionic = ionic; 2063 lif->index = index; 2064 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 2065 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 2066 2067 /* Convert the default coalesce value to actual hw resolution */ 2068 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 2069 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 2070 lif->rx_coalesce_usecs); 2071 2072 snprintf(lif->name, sizeof(lif->name), "lif%u", index); 2073 2074 spin_lock_init(&lif->adminq_lock); 2075 2076 spin_lock_init(&lif->deferred.lock); 2077 INIT_LIST_HEAD(&lif->deferred.list); 2078 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 2079 2080 /* allocate lif info */ 2081 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 2082 lif->info = dma_alloc_coherent(dev, lif->info_sz, 2083 &lif->info_pa, GFP_KERNEL); 2084 if (!lif->info) { 2085 dev_err(dev, "Failed to allocate lif info, aborting\n"); 2086 err = -ENOMEM; 2087 goto err_out_free_netdev; 2088 } 2089 2090 ionic_debugfs_add_lif(lif); 2091 2092 /* allocate queues */ 2093 err = ionic_qcqs_alloc(lif); 2094 if (err) 2095 goto err_out_free_lif_info; 2096 2097 /* allocate rss indirection table */ 2098 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 2099 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 2100 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 2101 &lif->rss_ind_tbl_pa, 2102 GFP_KERNEL); 2103 2104 if (!lif->rss_ind_tbl) { 2105 err = -ENOMEM; 2106 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 2107 goto err_out_free_qcqs; 2108 } 2109 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 2110 2111 list_add_tail(&lif->list, &ionic->lifs); 2112 2113 return lif; 2114 2115 err_out_free_qcqs: 2116 ionic_qcqs_free(lif); 2117 err_out_free_lif_info: 2118 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2119 lif->info = NULL; 2120 lif->info_pa = 0; 2121 err_out_free_netdev: 2122 free_netdev(lif->netdev); 2123 lif = NULL; 2124 err_out_free_lid: 2125 kfree(lid); 2126 2127 return ERR_PTR(err); 2128 } 2129 2130 int ionic_lifs_alloc(struct ionic *ionic) 2131 { 2132 struct ionic_lif *lif; 2133 2134 INIT_LIST_HEAD(&ionic->lifs); 2135 2136 /* only build the first lif, others are for later features */ 2137 set_bit(0, ionic->lifbits); 2138 2139 lif = ionic_lif_alloc(ionic, 0); 2140 if (IS_ERR_OR_NULL(lif)) { 2141 clear_bit(0, ionic->lifbits); 2142 return -ENOMEM; 2143 } 2144 2145 ionic_lif_queue_identify(lif); 2146 2147 return 0; 2148 } 2149 2150 static void ionic_lif_reset(struct ionic_lif *lif) 2151 { 2152 struct ionic_dev *idev = &lif->ionic->idev; 2153 2154 mutex_lock(&lif->ionic->dev_cmd_lock); 2155 ionic_dev_cmd_lif_reset(idev, lif->index); 2156 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2157 mutex_unlock(&lif->ionic->dev_cmd_lock); 2158 } 2159 2160 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 2161 { 2162 struct ionic *ionic = lif->ionic; 2163 2164 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2165 return; 2166 2167 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 2168 2169 netif_device_detach(lif->netdev); 2170 2171 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 2172 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 2173 mutex_lock(&lif->queue_lock); 2174 ionic_stop_queues(lif); 2175 mutex_unlock(&lif->queue_lock); 2176 } 2177 2178 if (netif_running(lif->netdev)) { 2179 ionic_txrx_deinit(lif); 2180 ionic_txrx_free(lif); 2181 } 2182 ionic_lifs_deinit(ionic); 2183 ionic_reset(ionic); 2184 ionic_qcqs_free(lif); 2185 2186 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 2187 } 2188 2189 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 2190 { 2191 struct ionic *ionic = lif->ionic; 2192 int err; 2193 2194 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2195 return; 2196 2197 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 2198 2199 ionic_init_devinfo(ionic); 2200 ionic_port_init(ionic); 2201 err = ionic_qcqs_alloc(lif); 2202 if (err) 2203 goto err_out; 2204 2205 err = ionic_lifs_init(ionic); 2206 if (err) 2207 goto err_qcqs_free; 2208 2209 if (lif->registered) 2210 ionic_lif_set_netdev_info(lif); 2211 2212 ionic_rx_filter_replay(lif); 2213 2214 if (netif_running(lif->netdev)) { 2215 err = ionic_txrx_alloc(lif); 2216 if (err) 2217 goto err_lifs_deinit; 2218 2219 err = ionic_txrx_init(lif); 2220 if (err) 2221 goto err_txrx_free; 2222 } 2223 2224 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 2225 ionic_link_status_check_request(lif); 2226 netif_device_attach(lif->netdev); 2227 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 2228 2229 return; 2230 2231 err_txrx_free: 2232 ionic_txrx_free(lif); 2233 err_lifs_deinit: 2234 ionic_lifs_deinit(ionic); 2235 err_qcqs_free: 2236 ionic_qcqs_free(lif); 2237 err_out: 2238 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 2239 } 2240 2241 static void ionic_lif_free(struct ionic_lif *lif) 2242 { 2243 struct device *dev = lif->ionic->dev; 2244 2245 /* free rss indirection table */ 2246 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 2247 lif->rss_ind_tbl_pa); 2248 lif->rss_ind_tbl = NULL; 2249 lif->rss_ind_tbl_pa = 0; 2250 2251 /* free queues */ 2252 ionic_qcqs_free(lif); 2253 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2254 ionic_lif_reset(lif); 2255 2256 /* free lif info */ 2257 kfree(lif->identity); 2258 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2259 lif->info = NULL; 2260 lif->info_pa = 0; 2261 2262 /* unmap doorbell page */ 2263 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2264 lif->kern_dbpage = NULL; 2265 kfree(lif->dbid_inuse); 2266 lif->dbid_inuse = NULL; 2267 2268 /* free netdev & lif */ 2269 ionic_debugfs_del_lif(lif); 2270 list_del(&lif->list); 2271 free_netdev(lif->netdev); 2272 } 2273 2274 void ionic_lifs_free(struct ionic *ionic) 2275 { 2276 struct list_head *cur, *tmp; 2277 struct ionic_lif *lif; 2278 2279 list_for_each_safe(cur, tmp, &ionic->lifs) { 2280 lif = list_entry(cur, struct ionic_lif, list); 2281 2282 ionic_lif_free(lif); 2283 } 2284 } 2285 2286 static void ionic_lif_deinit(struct ionic_lif *lif) 2287 { 2288 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 2289 return; 2290 2291 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 2292 cancel_work_sync(&lif->deferred.work); 2293 cancel_work_sync(&lif->tx_timeout_work); 2294 ionic_rx_filters_deinit(lif); 2295 if (lif->netdev->features & NETIF_F_RXHASH) 2296 ionic_lif_rss_deinit(lif); 2297 } 2298 2299 napi_disable(&lif->adminqcq->napi); 2300 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2301 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2302 2303 mutex_destroy(&lif->queue_lock); 2304 ionic_lif_reset(lif); 2305 } 2306 2307 void ionic_lifs_deinit(struct ionic *ionic) 2308 { 2309 struct list_head *cur, *tmp; 2310 struct ionic_lif *lif; 2311 2312 list_for_each_safe(cur, tmp, &ionic->lifs) { 2313 lif = list_entry(cur, struct ionic_lif, list); 2314 ionic_lif_deinit(lif); 2315 } 2316 } 2317 2318 static int ionic_lif_adminq_init(struct ionic_lif *lif) 2319 { 2320 struct device *dev = lif->ionic->dev; 2321 struct ionic_q_init_comp comp; 2322 struct ionic_dev *idev; 2323 struct ionic_qcq *qcq; 2324 struct ionic_queue *q; 2325 int err; 2326 2327 idev = &lif->ionic->idev; 2328 qcq = lif->adminqcq; 2329 q = &qcq->q; 2330 2331 mutex_lock(&lif->ionic->dev_cmd_lock); 2332 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 2333 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2334 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 2335 mutex_unlock(&lif->ionic->dev_cmd_lock); 2336 if (err) { 2337 netdev_err(lif->netdev, "adminq init failed %d\n", err); 2338 return err; 2339 } 2340 2341 q->hw_type = comp.hw_type; 2342 q->hw_index = le32_to_cpu(comp.hw_index); 2343 q->dbval = IONIC_DBELL_QID(q->hw_index); 2344 2345 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 2346 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 2347 2348 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, 2349 NAPI_POLL_WEIGHT); 2350 2351 napi_enable(&qcq->napi); 2352 2353 if (qcq->flags & IONIC_QCQ_F_INTR) 2354 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 2355 IONIC_INTR_MASK_CLEAR); 2356 2357 qcq->flags |= IONIC_QCQ_F_INITED; 2358 2359 return 0; 2360 } 2361 2362 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 2363 { 2364 struct ionic_qcq *qcq = lif->notifyqcq; 2365 struct device *dev = lif->ionic->dev; 2366 struct ionic_queue *q = &qcq->q; 2367 int err; 2368 2369 struct ionic_admin_ctx ctx = { 2370 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2371 .cmd.q_init = { 2372 .opcode = IONIC_CMD_Q_INIT, 2373 .lif_index = cpu_to_le16(lif->index), 2374 .type = q->type, 2375 .ver = lif->qtype_info[q->type].version, 2376 .index = cpu_to_le32(q->index), 2377 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 2378 IONIC_QINIT_F_ENA), 2379 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 2380 .pid = cpu_to_le16(q->pid), 2381 .ring_size = ilog2(q->num_descs), 2382 .ring_base = cpu_to_le64(q->base_pa), 2383 } 2384 }; 2385 2386 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 2387 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 2388 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 2389 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 2390 2391 err = ionic_adminq_post_wait(lif, &ctx); 2392 if (err) 2393 return err; 2394 2395 lif->last_eid = 0; 2396 q->hw_type = ctx.comp.q_init.hw_type; 2397 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 2398 q->dbval = IONIC_DBELL_QID(q->hw_index); 2399 2400 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 2401 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 2402 2403 /* preset the callback info */ 2404 q->info[0].cb_arg = lif; 2405 2406 qcq->flags |= IONIC_QCQ_F_INITED; 2407 2408 return 0; 2409 } 2410 2411 static int ionic_station_set(struct ionic_lif *lif) 2412 { 2413 struct net_device *netdev = lif->netdev; 2414 struct ionic_admin_ctx ctx = { 2415 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2416 .cmd.lif_getattr = { 2417 .opcode = IONIC_CMD_LIF_GETATTR, 2418 .index = cpu_to_le16(lif->index), 2419 .attr = IONIC_LIF_ATTR_MAC, 2420 }, 2421 }; 2422 struct sockaddr addr; 2423 int err; 2424 2425 err = ionic_adminq_post_wait(lif, &ctx); 2426 if (err) 2427 return err; 2428 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 2429 ctx.comp.lif_getattr.mac); 2430 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac)) 2431 return 0; 2432 2433 if (!is_zero_ether_addr(netdev->dev_addr)) { 2434 /* If the netdev mac is non-zero and doesn't match the default 2435 * device address, it was set by something earlier and we're 2436 * likely here again after a fw-upgrade reset. We need to be 2437 * sure the netdev mac is in our filter list. 2438 */ 2439 if (!ether_addr_equal(ctx.comp.lif_getattr.mac, 2440 netdev->dev_addr)) 2441 ionic_lif_addr(lif, netdev->dev_addr, true); 2442 } else { 2443 /* Update the netdev mac with the device's mac */ 2444 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); 2445 addr.sa_family = AF_INET; 2446 err = eth_prepare_mac_addr_change(netdev, &addr); 2447 if (err) { 2448 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 2449 addr.sa_data, err); 2450 return 0; 2451 } 2452 2453 eth_commit_mac_addr_change(netdev, &addr); 2454 } 2455 2456 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 2457 netdev->dev_addr); 2458 ionic_lif_addr(lif, netdev->dev_addr, true); 2459 2460 return 0; 2461 } 2462 2463 static int ionic_lif_init(struct ionic_lif *lif) 2464 { 2465 struct ionic_dev *idev = &lif->ionic->idev; 2466 struct device *dev = lif->ionic->dev; 2467 struct ionic_lif_init_comp comp; 2468 int dbpage_num; 2469 int err; 2470 2471 mutex_lock(&lif->ionic->dev_cmd_lock); 2472 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 2473 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2474 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 2475 mutex_unlock(&lif->ionic->dev_cmd_lock); 2476 if (err) 2477 return err; 2478 2479 lif->hw_index = le16_to_cpu(comp.hw_index); 2480 mutex_init(&lif->queue_lock); 2481 2482 /* now that we have the hw_index we can figure out our doorbell page */ 2483 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 2484 if (!lif->dbid_count) { 2485 dev_err(dev, "No doorbell pages, aborting\n"); 2486 return -EINVAL; 2487 } 2488 2489 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); 2490 if (!lif->dbid_inuse) { 2491 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); 2492 return -ENOMEM; 2493 } 2494 2495 /* first doorbell id reserved for kernel (dbid aka pid == zero) */ 2496 set_bit(0, lif->dbid_inuse); 2497 lif->kern_pid = 0; 2498 2499 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 2500 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 2501 if (!lif->kern_dbpage) { 2502 dev_err(dev, "Cannot map dbpage, aborting\n"); 2503 err = -ENOMEM; 2504 goto err_out_free_dbid; 2505 } 2506 2507 err = ionic_lif_adminq_init(lif); 2508 if (err) 2509 goto err_out_adminq_deinit; 2510 2511 if (lif->ionic->nnqs_per_lif) { 2512 err = ionic_lif_notifyq_init(lif); 2513 if (err) 2514 goto err_out_notifyq_deinit; 2515 } 2516 2517 err = ionic_init_nic_features(lif); 2518 if (err) 2519 goto err_out_notifyq_deinit; 2520 2521 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 2522 err = ionic_rx_filters_init(lif); 2523 if (err) 2524 goto err_out_notifyq_deinit; 2525 } 2526 2527 err = ionic_station_set(lif); 2528 if (err) 2529 goto err_out_notifyq_deinit; 2530 2531 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 2532 2533 set_bit(IONIC_LIF_F_INITED, lif->state); 2534 2535 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 2536 2537 return 0; 2538 2539 err_out_notifyq_deinit: 2540 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2541 err_out_adminq_deinit: 2542 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2543 ionic_lif_reset(lif); 2544 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2545 lif->kern_dbpage = NULL; 2546 err_out_free_dbid: 2547 kfree(lif->dbid_inuse); 2548 lif->dbid_inuse = NULL; 2549 2550 return err; 2551 } 2552 2553 int ionic_lifs_init(struct ionic *ionic) 2554 { 2555 struct list_head *cur, *tmp; 2556 struct ionic_lif *lif; 2557 int err; 2558 2559 list_for_each_safe(cur, tmp, &ionic->lifs) { 2560 lif = list_entry(cur, struct ionic_lif, list); 2561 err = ionic_lif_init(lif); 2562 if (err) 2563 return err; 2564 } 2565 2566 return 0; 2567 } 2568 2569 static void ionic_lif_notify_work(struct work_struct *ws) 2570 { 2571 } 2572 2573 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 2574 { 2575 struct ionic_admin_ctx ctx = { 2576 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2577 .cmd.lif_setattr = { 2578 .opcode = IONIC_CMD_LIF_SETATTR, 2579 .index = cpu_to_le16(lif->index), 2580 .attr = IONIC_LIF_ATTR_NAME, 2581 }, 2582 }; 2583 2584 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 2585 sizeof(ctx.cmd.lif_setattr.name)); 2586 2587 ionic_adminq_post_wait(lif, &ctx); 2588 } 2589 2590 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 2591 { 2592 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 2593 return NULL; 2594 2595 return netdev_priv(netdev); 2596 } 2597 2598 static int ionic_lif_notify(struct notifier_block *nb, 2599 unsigned long event, void *info) 2600 { 2601 struct net_device *ndev = netdev_notifier_info_to_dev(info); 2602 struct ionic *ionic = container_of(nb, struct ionic, nb); 2603 struct ionic_lif *lif = ionic_netdev_lif(ndev); 2604 2605 if (!lif || lif->ionic != ionic) 2606 return NOTIFY_DONE; 2607 2608 switch (event) { 2609 case NETDEV_CHANGENAME: 2610 ionic_lif_set_netdev_info(lif); 2611 break; 2612 } 2613 2614 return NOTIFY_DONE; 2615 } 2616 2617 int ionic_lifs_register(struct ionic *ionic) 2618 { 2619 int err; 2620 2621 INIT_WORK(&ionic->nb_work, ionic_lif_notify_work); 2622 2623 ionic->nb.notifier_call = ionic_lif_notify; 2624 2625 err = register_netdevice_notifier(&ionic->nb); 2626 if (err) 2627 ionic->nb.notifier_call = NULL; 2628 2629 /* only register LIF0 for now */ 2630 err = register_netdev(ionic->master_lif->netdev); 2631 if (err) { 2632 dev_err(ionic->dev, "Cannot register net device, aborting\n"); 2633 return err; 2634 } 2635 ionic->master_lif->registered = true; 2636 ionic_lif_set_netdev_info(ionic->master_lif); 2637 2638 return 0; 2639 } 2640 2641 void ionic_lifs_unregister(struct ionic *ionic) 2642 { 2643 if (ionic->nb.notifier_call) { 2644 unregister_netdevice_notifier(&ionic->nb); 2645 cancel_work_sync(&ionic->nb_work); 2646 ionic->nb.notifier_call = NULL; 2647 } 2648 2649 /* There is only one lif ever registered in the 2650 * current model, so don't bother searching the 2651 * ionic->lif for candidates to unregister 2652 */ 2653 if (ionic->master_lif && 2654 ionic->master_lif->netdev->reg_state == NETREG_REGISTERED) 2655 unregister_netdev(ionic->master_lif->netdev); 2656 } 2657 2658 static void ionic_lif_queue_identify(struct ionic_lif *lif) 2659 { 2660 struct ionic *ionic = lif->ionic; 2661 union ionic_q_identity *q_ident; 2662 struct ionic_dev *idev; 2663 int qtype; 2664 int err; 2665 2666 idev = &lif->ionic->idev; 2667 q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data; 2668 2669 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 2670 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 2671 2672 /* filter out the ones we know about */ 2673 switch (qtype) { 2674 case IONIC_QTYPE_ADMINQ: 2675 case IONIC_QTYPE_NOTIFYQ: 2676 case IONIC_QTYPE_RXQ: 2677 case IONIC_QTYPE_TXQ: 2678 break; 2679 default: 2680 continue; 2681 } 2682 2683 memset(qti, 0, sizeof(*qti)); 2684 2685 mutex_lock(&ionic->dev_cmd_lock); 2686 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 2687 ionic_qtype_versions[qtype]); 2688 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 2689 if (!err) { 2690 qti->version = q_ident->version; 2691 qti->supported = q_ident->supported; 2692 qti->features = le64_to_cpu(q_ident->features); 2693 qti->desc_sz = le16_to_cpu(q_ident->desc_sz); 2694 qti->comp_sz = le16_to_cpu(q_ident->comp_sz); 2695 qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz); 2696 qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems); 2697 qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride); 2698 } 2699 mutex_unlock(&ionic->dev_cmd_lock); 2700 2701 if (err == -EINVAL) { 2702 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 2703 continue; 2704 } else if (err == -EIO) { 2705 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 2706 return; 2707 } else if (err) { 2708 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 2709 qtype, err); 2710 return; 2711 } 2712 2713 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 2714 qtype, qti->version); 2715 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 2716 qtype, qti->supported); 2717 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 2718 qtype, qti->features); 2719 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 2720 qtype, qti->desc_sz); 2721 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 2722 qtype, qti->comp_sz); 2723 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 2724 qtype, qti->sg_desc_sz); 2725 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 2726 qtype, qti->max_sg_elems); 2727 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 2728 qtype, qti->sg_desc_stride); 2729 } 2730 } 2731 2732 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 2733 union ionic_lif_identity *lid) 2734 { 2735 struct ionic_dev *idev = &ionic->idev; 2736 size_t sz; 2737 int err; 2738 2739 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 2740 2741 mutex_lock(&ionic->dev_cmd_lock); 2742 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 2743 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 2744 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 2745 mutex_unlock(&ionic->dev_cmd_lock); 2746 if (err) 2747 return (err); 2748 2749 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 2750 le64_to_cpu(lid->capabilities)); 2751 2752 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 2753 le32_to_cpu(lid->eth.max_ucast_filters)); 2754 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 2755 le32_to_cpu(lid->eth.max_mcast_filters)); 2756 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 2757 le64_to_cpu(lid->eth.config.features)); 2758 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 2759 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 2760 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 2761 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 2762 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 2763 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 2764 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 2765 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 2766 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 2767 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 2768 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 2769 le32_to_cpu(lid->eth.config.mtu)); 2770 2771 return 0; 2772 } 2773 2774 int ionic_lifs_size(struct ionic *ionic) 2775 { 2776 struct ionic_identity *ident = &ionic->ident; 2777 unsigned int nintrs, dev_nintrs; 2778 union ionic_lif_config *lc; 2779 unsigned int ntxqs_per_lif; 2780 unsigned int nrxqs_per_lif; 2781 unsigned int neqs_per_lif; 2782 unsigned int nnqs_per_lif; 2783 unsigned int nxqs, neqs; 2784 unsigned int min_intrs; 2785 int err; 2786 2787 lc = &ident->lif.eth.config; 2788 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 2789 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 2790 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 2791 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 2792 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 2793 2794 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 2795 nxqs = min(nxqs, num_online_cpus()); 2796 neqs = min(neqs_per_lif, num_online_cpus()); 2797 2798 try_again: 2799 /* interrupt usage: 2800 * 1 for master lif adminq/notifyq 2801 * 1 for each CPU for master lif TxRx queue pairs 2802 * whatever's left is for RDMA queues 2803 */ 2804 nintrs = 1 + nxqs + neqs; 2805 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 2806 2807 if (nintrs > dev_nintrs) 2808 goto try_fewer; 2809 2810 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 2811 if (err < 0 && err != -ENOSPC) { 2812 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 2813 return err; 2814 } 2815 if (err == -ENOSPC) 2816 goto try_fewer; 2817 2818 if (err != nintrs) { 2819 ionic_bus_free_irq_vectors(ionic); 2820 goto try_fewer; 2821 } 2822 2823 ionic->nnqs_per_lif = nnqs_per_lif; 2824 ionic->neqs_per_lif = neqs; 2825 ionic->ntxqs_per_lif = nxqs; 2826 ionic->nrxqs_per_lif = nxqs; 2827 ionic->nintrs = nintrs; 2828 2829 ionic_debugfs_add_sizes(ionic); 2830 2831 return 0; 2832 2833 try_fewer: 2834 if (nnqs_per_lif > 1) { 2835 nnqs_per_lif >>= 1; 2836 goto try_again; 2837 } 2838 if (neqs > 1) { 2839 neqs >>= 1; 2840 goto try_again; 2841 } 2842 if (nxqs > 1) { 2843 nxqs >>= 1; 2844 goto try_again; 2845 } 2846 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 2847 return -ENOSPC; 2848 } 2849