1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/printk.h> 5 #include <linux/dynamic_debug.h> 6 #include <linux/netdevice.h> 7 #include <linux/etherdevice.h> 8 #include <linux/rtnetlink.h> 9 #include <linux/interrupt.h> 10 #include <linux/pci.h> 11 #include <linux/cpumask.h> 12 13 #include "ionic.h" 14 #include "ionic_bus.h" 15 #include "ionic_lif.h" 16 #include "ionic_txrx.h" 17 #include "ionic_ethtool.h" 18 #include "ionic_debugfs.h" 19 20 /* queuetype support level */ 21 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 22 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 23 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 24 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */ 25 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support 26 * 1 = ... with Tx SG version 1 27 */ 28 }; 29 30 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode); 31 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); 32 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); 33 static void ionic_link_status_check(struct ionic_lif *lif); 34 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 35 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 36 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 37 38 static int ionic_start_queues(struct ionic_lif *lif); 39 static void ionic_stop_queues(struct ionic_lif *lif); 40 static void ionic_lif_queue_identify(struct ionic_lif *lif); 41 42 static void ionic_lif_deferred_work(struct work_struct *work) 43 { 44 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 45 struct ionic_deferred *def = &lif->deferred; 46 struct ionic_deferred_work *w = NULL; 47 48 spin_lock_bh(&def->lock); 49 if (!list_empty(&def->list)) { 50 w = list_first_entry(&def->list, 51 struct ionic_deferred_work, list); 52 list_del(&w->list); 53 } 54 spin_unlock_bh(&def->lock); 55 56 if (w) { 57 switch (w->type) { 58 case IONIC_DW_TYPE_RX_MODE: 59 ionic_lif_rx_mode(lif, w->rx_mode); 60 break; 61 case IONIC_DW_TYPE_RX_ADDR_ADD: 62 ionic_lif_addr_add(lif, w->addr); 63 break; 64 case IONIC_DW_TYPE_RX_ADDR_DEL: 65 ionic_lif_addr_del(lif, w->addr); 66 break; 67 case IONIC_DW_TYPE_LINK_STATUS: 68 ionic_link_status_check(lif); 69 break; 70 case IONIC_DW_TYPE_LIF_RESET: 71 if (w->fw_status) 72 ionic_lif_handle_fw_up(lif); 73 else 74 ionic_lif_handle_fw_down(lif); 75 break; 76 default: 77 break; 78 } 79 kfree(w); 80 schedule_work(&def->work); 81 } 82 } 83 84 void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 85 struct ionic_deferred_work *work) 86 { 87 spin_lock_bh(&def->lock); 88 list_add_tail(&work->list, &def->list); 89 spin_unlock_bh(&def->lock); 90 schedule_work(&def->work); 91 } 92 93 static void ionic_link_status_check(struct ionic_lif *lif) 94 { 95 struct net_device *netdev = lif->netdev; 96 u16 link_status; 97 bool link_up; 98 99 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 100 return; 101 102 if (lif->ionic->is_mgmt_nic) 103 return; 104 105 link_status = le16_to_cpu(lif->info->status.link_status); 106 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 107 108 if (link_up) { 109 if (!netif_carrier_ok(netdev)) { 110 u32 link_speed; 111 112 ionic_port_identify(lif->ionic); 113 link_speed = le32_to_cpu(lif->info->status.link_speed); 114 netdev_info(netdev, "Link up - %d Gbps\n", 115 link_speed / 1000); 116 netif_carrier_on(netdev); 117 } 118 119 if (netif_running(lif->netdev)) 120 ionic_start_queues(lif); 121 } else { 122 if (netif_carrier_ok(netdev)) { 123 netdev_info(netdev, "Link down\n"); 124 netif_carrier_off(netdev); 125 } 126 127 if (netif_running(lif->netdev)) 128 ionic_stop_queues(lif); 129 } 130 131 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 132 } 133 134 void ionic_link_status_check_request(struct ionic_lif *lif) 135 { 136 struct ionic_deferred_work *work; 137 138 /* we only need one request outstanding at a time */ 139 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 140 return; 141 142 if (in_interrupt()) { 143 work = kzalloc(sizeof(*work), GFP_ATOMIC); 144 if (!work) 145 return; 146 147 work->type = IONIC_DW_TYPE_LINK_STATUS; 148 ionic_lif_deferred_enqueue(&lif->deferred, work); 149 } else { 150 ionic_link_status_check(lif); 151 } 152 } 153 154 static irqreturn_t ionic_isr(int irq, void *data) 155 { 156 struct napi_struct *napi = data; 157 158 napi_schedule_irqoff(napi); 159 160 return IRQ_HANDLED; 161 } 162 163 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 164 { 165 struct ionic_intr_info *intr = &qcq->intr; 166 struct device *dev = lif->ionic->dev; 167 struct ionic_queue *q = &qcq->q; 168 const char *name; 169 170 if (lif->registered) 171 name = lif->netdev->name; 172 else 173 name = dev_name(dev); 174 175 snprintf(intr->name, sizeof(intr->name), 176 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 177 178 return devm_request_irq(dev, intr->vector, ionic_isr, 179 0, intr->name, &qcq->napi); 180 } 181 182 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 183 { 184 struct ionic *ionic = lif->ionic; 185 int index; 186 187 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 188 if (index == ionic->nintrs) { 189 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 190 __func__, index, ionic->nintrs); 191 return -ENOSPC; 192 } 193 194 set_bit(index, ionic->intrs); 195 ionic_intr_init(&ionic->idev, intr, index); 196 197 return 0; 198 } 199 200 static void ionic_intr_free(struct ionic *ionic, int index) 201 { 202 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) 203 clear_bit(index, ionic->intrs); 204 } 205 206 static int ionic_qcq_enable(struct ionic_qcq *qcq) 207 { 208 struct ionic_queue *q = &qcq->q; 209 struct ionic_lif *lif = q->lif; 210 struct ionic_dev *idev; 211 struct device *dev; 212 213 struct ionic_admin_ctx ctx = { 214 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 215 .cmd.q_control = { 216 .opcode = IONIC_CMD_Q_CONTROL, 217 .lif_index = cpu_to_le16(lif->index), 218 .type = q->type, 219 .index = cpu_to_le32(q->index), 220 .oper = IONIC_Q_ENABLE, 221 }, 222 }; 223 224 idev = &lif->ionic->idev; 225 dev = lif->ionic->dev; 226 227 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 228 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 229 230 if (qcq->flags & IONIC_QCQ_F_INTR) { 231 irq_set_affinity_hint(qcq->intr.vector, 232 &qcq->intr.affinity_mask); 233 napi_enable(&qcq->napi); 234 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 235 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 236 IONIC_INTR_MASK_CLEAR); 237 } 238 239 return ionic_adminq_post_wait(lif, &ctx); 240 } 241 242 static int ionic_qcq_disable(struct ionic_qcq *qcq) 243 { 244 struct ionic_queue *q = &qcq->q; 245 struct ionic_lif *lif = q->lif; 246 struct ionic_dev *idev; 247 struct device *dev; 248 249 struct ionic_admin_ctx ctx = { 250 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 251 .cmd.q_control = { 252 .opcode = IONIC_CMD_Q_CONTROL, 253 .lif_index = cpu_to_le16(lif->index), 254 .type = q->type, 255 .index = cpu_to_le32(q->index), 256 .oper = IONIC_Q_DISABLE, 257 }, 258 }; 259 260 idev = &lif->ionic->idev; 261 dev = lif->ionic->dev; 262 263 dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n", 264 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 265 266 if (qcq->flags & IONIC_QCQ_F_INTR) { 267 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 268 IONIC_INTR_MASK_SET); 269 synchronize_irq(qcq->intr.vector); 270 irq_set_affinity_hint(qcq->intr.vector, NULL); 271 napi_disable(&qcq->napi); 272 } 273 274 return ionic_adminq_post_wait(lif, &ctx); 275 } 276 277 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 278 { 279 struct ionic_dev *idev = &lif->ionic->idev; 280 281 if (!qcq) 282 return; 283 284 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 285 return; 286 287 if (qcq->flags & IONIC_QCQ_F_INTR) { 288 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 289 IONIC_INTR_MASK_SET); 290 netif_napi_del(&qcq->napi); 291 } 292 293 qcq->flags &= ~IONIC_QCQ_F_INITED; 294 } 295 296 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 297 { 298 struct device *dev = lif->ionic->dev; 299 300 if (!qcq) 301 return; 302 303 ionic_debugfs_del_qcq(qcq); 304 305 dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa); 306 qcq->base = NULL; 307 qcq->base_pa = 0; 308 309 if (qcq->flags & IONIC_QCQ_F_INTR) { 310 irq_set_affinity_hint(qcq->intr.vector, NULL); 311 devm_free_irq(dev, qcq->intr.vector, &qcq->napi); 312 qcq->intr.vector = 0; 313 ionic_intr_free(lif->ionic, qcq->intr.index); 314 } 315 316 devm_kfree(dev, qcq->cq.info); 317 qcq->cq.info = NULL; 318 devm_kfree(dev, qcq->q.info); 319 qcq->q.info = NULL; 320 devm_kfree(dev, qcq); 321 } 322 323 static void ionic_qcqs_free(struct ionic_lif *lif) 324 { 325 struct device *dev = lif->ionic->dev; 326 unsigned int i; 327 328 if (lif->notifyqcq) { 329 ionic_qcq_free(lif, lif->notifyqcq); 330 lif->notifyqcq = NULL; 331 } 332 333 if (lif->adminqcq) { 334 ionic_qcq_free(lif, lif->adminqcq); 335 lif->adminqcq = NULL; 336 } 337 338 if (lif->rxqcqs) { 339 for (i = 0; i < lif->nxqs; i++) 340 if (lif->rxqcqs[i].stats) 341 devm_kfree(dev, lif->rxqcqs[i].stats); 342 devm_kfree(dev, lif->rxqcqs); 343 lif->rxqcqs = NULL; 344 } 345 346 if (lif->txqcqs) { 347 for (i = 0; i < lif->nxqs; i++) 348 if (lif->txqcqs[i].stats) 349 devm_kfree(dev, lif->txqcqs[i].stats); 350 devm_kfree(dev, lif->txqcqs); 351 lif->txqcqs = NULL; 352 } 353 } 354 355 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 356 struct ionic_qcq *n_qcq) 357 { 358 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { 359 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index); 360 n_qcq->flags &= ~IONIC_QCQ_F_INTR; 361 } 362 363 n_qcq->intr.vector = src_qcq->intr.vector; 364 n_qcq->intr.index = src_qcq->intr.index; 365 } 366 367 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 368 unsigned int index, 369 const char *name, unsigned int flags, 370 unsigned int num_descs, unsigned int desc_size, 371 unsigned int cq_desc_size, 372 unsigned int sg_desc_size, 373 unsigned int pid, struct ionic_qcq **qcq) 374 { 375 struct ionic_dev *idev = &lif->ionic->idev; 376 u32 q_size, cq_size, sg_size, total_size; 377 struct device *dev = lif->ionic->dev; 378 void *q_base, *cq_base, *sg_base; 379 dma_addr_t cq_base_pa = 0; 380 dma_addr_t sg_base_pa = 0; 381 dma_addr_t q_base_pa = 0; 382 struct ionic_qcq *new; 383 int err; 384 385 *qcq = NULL; 386 387 q_size = num_descs * desc_size; 388 cq_size = num_descs * cq_desc_size; 389 sg_size = num_descs * sg_desc_size; 390 391 total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE); 392 /* Note: aligning q_size/cq_size is not enough due to cq_base 393 * address aligning as q_base could be not aligned to the page. 394 * Adding PAGE_SIZE. 395 */ 396 total_size += PAGE_SIZE; 397 if (flags & IONIC_QCQ_F_SG) { 398 total_size += ALIGN(sg_size, PAGE_SIZE); 399 total_size += PAGE_SIZE; 400 } 401 402 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 403 if (!new) { 404 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 405 err = -ENOMEM; 406 goto err_out; 407 } 408 409 new->flags = flags; 410 411 new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs, 412 GFP_KERNEL); 413 if (!new->q.info) { 414 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 415 err = -ENOMEM; 416 goto err_out; 417 } 418 419 new->q.type = type; 420 421 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 422 desc_size, sg_desc_size, pid); 423 if (err) { 424 netdev_err(lif->netdev, "Cannot initialize queue\n"); 425 goto err_out; 426 } 427 428 if (flags & IONIC_QCQ_F_INTR) { 429 err = ionic_intr_alloc(lif, &new->intr); 430 if (err) { 431 netdev_warn(lif->netdev, "no intr for %s: %d\n", 432 name, err); 433 goto err_out; 434 } 435 436 err = ionic_bus_get_irq(lif->ionic, new->intr.index); 437 if (err < 0) { 438 netdev_warn(lif->netdev, "no vector for %s: %d\n", 439 name, err); 440 goto err_out_free_intr; 441 } 442 new->intr.vector = err; 443 ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index, 444 IONIC_INTR_MASK_SET); 445 446 err = ionic_request_irq(lif, new); 447 if (err) { 448 netdev_warn(lif->netdev, "irq request failed %d\n", err); 449 goto err_out_free_intr; 450 } 451 452 new->intr.cpu = cpumask_local_spread(new->intr.index, 453 dev_to_node(dev)); 454 if (new->intr.cpu != -1) 455 cpumask_set_cpu(new->intr.cpu, 456 &new->intr.affinity_mask); 457 } else { 458 new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 459 } 460 461 new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs, 462 GFP_KERNEL); 463 if (!new->cq.info) { 464 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 465 err = -ENOMEM; 466 goto err_out_free_irq; 467 } 468 469 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 470 if (err) { 471 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 472 goto err_out_free_irq; 473 } 474 475 new->base = dma_alloc_coherent(dev, total_size, &new->base_pa, 476 GFP_KERNEL); 477 if (!new->base) { 478 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 479 err = -ENOMEM; 480 goto err_out_free_irq; 481 } 482 483 new->total_size = total_size; 484 485 q_base = new->base; 486 q_base_pa = new->base_pa; 487 488 cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE); 489 cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE); 490 491 if (flags & IONIC_QCQ_F_SG) { 492 sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size, 493 PAGE_SIZE); 494 sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE); 495 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 496 } 497 498 ionic_q_map(&new->q, q_base, q_base_pa); 499 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 500 ionic_cq_bind(&new->cq, &new->q); 501 502 *qcq = new; 503 504 return 0; 505 506 err_out_free_irq: 507 if (flags & IONIC_QCQ_F_INTR) 508 devm_free_irq(dev, new->intr.vector, &new->napi); 509 err_out_free_intr: 510 if (flags & IONIC_QCQ_F_INTR) 511 ionic_intr_free(lif->ionic, new->intr.index); 512 err_out: 513 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 514 return err; 515 } 516 517 static int ionic_qcqs_alloc(struct ionic_lif *lif) 518 { 519 struct device *dev = lif->ionic->dev; 520 unsigned int q_list_size; 521 unsigned int flags; 522 int err; 523 int i; 524 525 flags = IONIC_QCQ_F_INTR; 526 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 527 IONIC_ADMINQ_LENGTH, 528 sizeof(struct ionic_admin_cmd), 529 sizeof(struct ionic_admin_comp), 530 0, lif->kern_pid, &lif->adminqcq); 531 if (err) 532 return err; 533 ionic_debugfs_add_qcq(lif, lif->adminqcq); 534 535 if (lif->ionic->nnqs_per_lif) { 536 flags = IONIC_QCQ_F_NOTIFYQ; 537 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 538 flags, IONIC_NOTIFYQ_LENGTH, 539 sizeof(struct ionic_notifyq_cmd), 540 sizeof(union ionic_notifyq_comp), 541 0, lif->kern_pid, &lif->notifyqcq); 542 if (err) 543 goto err_out_free_adminqcq; 544 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 545 546 /* Let the notifyq ride on the adminq interrupt */ 547 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 548 } 549 550 q_list_size = sizeof(*lif->txqcqs) * lif->nxqs; 551 err = -ENOMEM; 552 lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); 553 if (!lif->txqcqs) 554 goto err_out_free_notifyqcq; 555 for (i = 0; i < lif->nxqs; i++) { 556 lif->txqcqs[i].stats = devm_kzalloc(dev, 557 sizeof(struct ionic_q_stats), 558 GFP_KERNEL); 559 if (!lif->txqcqs[i].stats) 560 goto err_out_free_tx_stats; 561 } 562 563 lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); 564 if (!lif->rxqcqs) 565 goto err_out_free_tx_stats; 566 for (i = 0; i < lif->nxqs; i++) { 567 lif->rxqcqs[i].stats = devm_kzalloc(dev, 568 sizeof(struct ionic_q_stats), 569 GFP_KERNEL); 570 if (!lif->rxqcqs[i].stats) 571 goto err_out_free_rx_stats; 572 } 573 574 return 0; 575 576 err_out_free_rx_stats: 577 for (i = 0; i < lif->nxqs; i++) 578 if (lif->rxqcqs[i].stats) 579 devm_kfree(dev, lif->rxqcqs[i].stats); 580 devm_kfree(dev, lif->rxqcqs); 581 lif->rxqcqs = NULL; 582 err_out_free_tx_stats: 583 for (i = 0; i < lif->nxqs; i++) 584 if (lif->txqcqs[i].stats) 585 devm_kfree(dev, lif->txqcqs[i].stats); 586 devm_kfree(dev, lif->txqcqs); 587 lif->txqcqs = NULL; 588 err_out_free_notifyqcq: 589 if (lif->notifyqcq) { 590 ionic_qcq_free(lif, lif->notifyqcq); 591 lif->notifyqcq = NULL; 592 } 593 err_out_free_adminqcq: 594 ionic_qcq_free(lif, lif->adminqcq); 595 lif->adminqcq = NULL; 596 597 return err; 598 } 599 600 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 601 { 602 struct device *dev = lif->ionic->dev; 603 struct ionic_queue *q = &qcq->q; 604 struct ionic_cq *cq = &qcq->cq; 605 struct ionic_admin_ctx ctx = { 606 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 607 .cmd.q_init = { 608 .opcode = IONIC_CMD_Q_INIT, 609 .lif_index = cpu_to_le16(lif->index), 610 .type = q->type, 611 .ver = lif->qtype_info[q->type].version, 612 .index = cpu_to_le32(q->index), 613 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 614 IONIC_QINIT_F_SG), 615 .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index), 616 .pid = cpu_to_le16(q->pid), 617 .ring_size = ilog2(q->num_descs), 618 .ring_base = cpu_to_le64(q->base_pa), 619 .cq_ring_base = cpu_to_le64(cq->base_pa), 620 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 621 }, 622 }; 623 int err; 624 625 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 626 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 627 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 628 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 629 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 630 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 631 632 q->tail = q->info; 633 q->head = q->tail; 634 cq->tail = cq->info; 635 636 err = ionic_adminq_post_wait(lif, &ctx); 637 if (err) 638 return err; 639 640 q->hw_type = ctx.comp.q_init.hw_type; 641 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 642 q->dbval = IONIC_DBELL_QID(q->hw_index); 643 644 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 645 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 646 647 qcq->flags |= IONIC_QCQ_F_INITED; 648 649 return 0; 650 } 651 652 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 653 { 654 struct device *dev = lif->ionic->dev; 655 struct ionic_queue *q = &qcq->q; 656 struct ionic_cq *cq = &qcq->cq; 657 struct ionic_admin_ctx ctx = { 658 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 659 .cmd.q_init = { 660 .opcode = IONIC_CMD_Q_INIT, 661 .lif_index = cpu_to_le16(lif->index), 662 .type = q->type, 663 .ver = lif->qtype_info[q->type].version, 664 .index = cpu_to_le32(q->index), 665 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 666 IONIC_QINIT_F_SG), 667 .intr_index = cpu_to_le16(cq->bound_intr->index), 668 .pid = cpu_to_le16(q->pid), 669 .ring_size = ilog2(q->num_descs), 670 .ring_base = cpu_to_le64(q->base_pa), 671 .cq_ring_base = cpu_to_le64(cq->base_pa), 672 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 673 }, 674 }; 675 int err; 676 677 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 678 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 679 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 680 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 681 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 682 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 683 684 q->tail = q->info; 685 q->head = q->tail; 686 cq->tail = cq->info; 687 688 err = ionic_adminq_post_wait(lif, &ctx); 689 if (err) 690 return err; 691 692 q->hw_type = ctx.comp.q_init.hw_type; 693 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 694 q->dbval = IONIC_DBELL_QID(q->hw_index); 695 696 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 697 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 698 699 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, 700 NAPI_POLL_WEIGHT); 701 702 qcq->flags |= IONIC_QCQ_F_INITED; 703 704 return 0; 705 } 706 707 static bool ionic_notifyq_service(struct ionic_cq *cq, 708 struct ionic_cq_info *cq_info) 709 { 710 union ionic_notifyq_comp *comp = cq_info->cq_desc; 711 struct ionic_deferred_work *work; 712 struct net_device *netdev; 713 struct ionic_queue *q; 714 struct ionic_lif *lif; 715 u64 eid; 716 717 q = cq->bound_q; 718 lif = q->info[0].cb_arg; 719 netdev = lif->netdev; 720 eid = le64_to_cpu(comp->event.eid); 721 722 /* Have we run out of new completions to process? */ 723 if (eid <= lif->last_eid) 724 return false; 725 726 lif->last_eid = eid; 727 728 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 729 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 730 comp, sizeof(*comp), true); 731 732 switch (le16_to_cpu(comp->event.ecode)) { 733 case IONIC_EVENT_LINK_CHANGE: 734 ionic_link_status_check_request(lif); 735 break; 736 case IONIC_EVENT_RESET: 737 work = kzalloc(sizeof(*work), GFP_ATOMIC); 738 if (!work) { 739 netdev_err(lif->netdev, "%s OOM\n", __func__); 740 } else { 741 work->type = IONIC_DW_TYPE_LIF_RESET; 742 ionic_lif_deferred_enqueue(&lif->deferred, work); 743 } 744 break; 745 default: 746 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", 747 comp->event.ecode, eid); 748 break; 749 } 750 751 return true; 752 } 753 754 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget) 755 { 756 struct ionic_dev *idev = &lif->ionic->idev; 757 struct ionic_cq *cq = &lif->notifyqcq->cq; 758 u32 work_done; 759 760 work_done = ionic_cq_service(cq, budget, ionic_notifyq_service, 761 NULL, NULL); 762 if (work_done) 763 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, 764 work_done, IONIC_INTR_CRED_RESET_COALESCE); 765 766 return work_done; 767 } 768 769 static bool ionic_adminq_service(struct ionic_cq *cq, 770 struct ionic_cq_info *cq_info) 771 { 772 struct ionic_admin_comp *comp = cq_info->cq_desc; 773 774 if (!color_match(comp->color, cq->done_color)) 775 return false; 776 777 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 778 779 return true; 780 } 781 782 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 783 { 784 struct ionic_lif *lif = napi_to_cq(napi)->lif; 785 int n_work = 0; 786 int a_work = 0; 787 788 if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)) 789 n_work = ionic_notifyq_clean(lif, budget); 790 a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL); 791 792 return max(n_work, a_work); 793 } 794 795 void ionic_get_stats64(struct net_device *netdev, 796 struct rtnl_link_stats64 *ns) 797 { 798 struct ionic_lif *lif = netdev_priv(netdev); 799 struct ionic_lif_stats *ls; 800 801 memset(ns, 0, sizeof(*ns)); 802 ls = &lif->info->stats; 803 804 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 805 le64_to_cpu(ls->rx_mcast_packets) + 806 le64_to_cpu(ls->rx_bcast_packets); 807 808 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 809 le64_to_cpu(ls->tx_mcast_packets) + 810 le64_to_cpu(ls->tx_bcast_packets); 811 812 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 813 le64_to_cpu(ls->rx_mcast_bytes) + 814 le64_to_cpu(ls->rx_bcast_bytes); 815 816 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 817 le64_to_cpu(ls->tx_mcast_bytes) + 818 le64_to_cpu(ls->tx_bcast_bytes); 819 820 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 821 le64_to_cpu(ls->rx_mcast_drop_packets) + 822 le64_to_cpu(ls->rx_bcast_drop_packets); 823 824 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 825 le64_to_cpu(ls->tx_mcast_drop_packets) + 826 le64_to_cpu(ls->tx_bcast_drop_packets); 827 828 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 829 830 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 831 832 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 833 le64_to_cpu(ls->rx_queue_disabled) + 834 le64_to_cpu(ls->rx_desc_fetch_error) + 835 le64_to_cpu(ls->rx_desc_data_error); 836 837 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 838 le64_to_cpu(ls->tx_queue_disabled) + 839 le64_to_cpu(ls->tx_desc_fetch_error) + 840 le64_to_cpu(ls->tx_desc_data_error); 841 842 ns->rx_errors = ns->rx_over_errors + 843 ns->rx_missed_errors; 844 845 ns->tx_errors = ns->tx_aborted_errors; 846 } 847 848 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) 849 { 850 struct ionic_admin_ctx ctx = { 851 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 852 .cmd.rx_filter_add = { 853 .opcode = IONIC_CMD_RX_FILTER_ADD, 854 .lif_index = cpu_to_le16(lif->index), 855 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 856 }, 857 }; 858 struct ionic_rx_filter *f; 859 int err; 860 861 /* don't bother if we already have it */ 862 spin_lock_bh(&lif->rx_filters.lock); 863 f = ionic_rx_filter_by_addr(lif, addr); 864 spin_unlock_bh(&lif->rx_filters.lock); 865 if (f) 866 return 0; 867 868 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr, 869 ctx.comp.rx_filter_add.filter_id); 870 871 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 872 err = ionic_adminq_post_wait(lif, &ctx); 873 if (err && err != -EEXIST) 874 return err; 875 876 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 877 } 878 879 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) 880 { 881 struct ionic_admin_ctx ctx = { 882 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 883 .cmd.rx_filter_del = { 884 .opcode = IONIC_CMD_RX_FILTER_DEL, 885 .lif_index = cpu_to_le16(lif->index), 886 }, 887 }; 888 struct ionic_rx_filter *f; 889 int err; 890 891 spin_lock_bh(&lif->rx_filters.lock); 892 f = ionic_rx_filter_by_addr(lif, addr); 893 if (!f) { 894 spin_unlock_bh(&lif->rx_filters.lock); 895 return -ENOENT; 896 } 897 898 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 899 ionic_rx_filter_free(lif, f); 900 spin_unlock_bh(&lif->rx_filters.lock); 901 902 err = ionic_adminq_post_wait(lif, &ctx); 903 if (err && err != -EEXIST) 904 return err; 905 906 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, 907 ctx.cmd.rx_filter_del.filter_id); 908 909 return 0; 910 } 911 912 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) 913 { 914 struct ionic *ionic = lif->ionic; 915 struct ionic_deferred_work *work; 916 unsigned int nmfilters; 917 unsigned int nufilters; 918 919 if (add) { 920 /* Do we have space for this filter? We test the counters 921 * here before checking the need for deferral so that we 922 * can return an overflow error to the stack. 923 */ 924 nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters); 925 nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters); 926 927 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) 928 lif->nmcast++; 929 else if (!is_multicast_ether_addr(addr) && 930 lif->nucast < nufilters) 931 lif->nucast++; 932 else 933 return -ENOSPC; 934 } else { 935 if (is_multicast_ether_addr(addr) && lif->nmcast) 936 lif->nmcast--; 937 else if (!is_multicast_ether_addr(addr) && lif->nucast) 938 lif->nucast--; 939 } 940 941 if (in_interrupt()) { 942 work = kzalloc(sizeof(*work), GFP_ATOMIC); 943 if (!work) { 944 netdev_err(lif->netdev, "%s OOM\n", __func__); 945 return -ENOMEM; 946 } 947 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : 948 IONIC_DW_TYPE_RX_ADDR_DEL; 949 memcpy(work->addr, addr, ETH_ALEN); 950 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n", 951 add ? "add" : "del", addr); 952 ionic_lif_deferred_enqueue(&lif->deferred, work); 953 } else { 954 netdev_dbg(lif->netdev, "rx_filter %s %pM\n", 955 add ? "add" : "del", addr); 956 if (add) 957 return ionic_lif_addr_add(lif, addr); 958 else 959 return ionic_lif_addr_del(lif, addr); 960 } 961 962 return 0; 963 } 964 965 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 966 { 967 return ionic_lif_addr(netdev_priv(netdev), addr, true); 968 } 969 970 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 971 { 972 return ionic_lif_addr(netdev_priv(netdev), addr, false); 973 } 974 975 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 976 { 977 struct ionic_admin_ctx ctx = { 978 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 979 .cmd.rx_mode_set = { 980 .opcode = IONIC_CMD_RX_MODE_SET, 981 .lif_index = cpu_to_le16(lif->index), 982 .rx_mode = cpu_to_le16(rx_mode), 983 }, 984 }; 985 char buf[128]; 986 int err; 987 int i; 988 #define REMAIN(__x) (sizeof(buf) - (__x)) 989 990 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 991 lif->rx_mode, rx_mode); 992 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 993 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 994 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 995 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 996 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 997 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 998 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 999 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1000 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1001 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1002 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); 1003 1004 err = ionic_adminq_post_wait(lif, &ctx); 1005 if (err) 1006 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n", 1007 rx_mode, err); 1008 else 1009 lif->rx_mode = rx_mode; 1010 } 1011 1012 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 1013 { 1014 struct ionic_deferred_work *work; 1015 1016 if (in_interrupt()) { 1017 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1018 if (!work) { 1019 netdev_err(lif->netdev, "%s OOM\n", __func__); 1020 return; 1021 } 1022 work->type = IONIC_DW_TYPE_RX_MODE; 1023 work->rx_mode = rx_mode; 1024 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1025 ionic_lif_deferred_enqueue(&lif->deferred, work); 1026 } else { 1027 ionic_lif_rx_mode(lif, rx_mode); 1028 } 1029 } 1030 1031 static void ionic_set_rx_mode(struct net_device *netdev) 1032 { 1033 struct ionic_lif *lif = netdev_priv(netdev); 1034 struct ionic_identity *ident; 1035 unsigned int nfilters; 1036 unsigned int rx_mode; 1037 1038 ident = &lif->ionic->ident; 1039 1040 rx_mode = IONIC_RX_MODE_F_UNICAST; 1041 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1042 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1043 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1044 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1045 1046 /* sync unicast addresses 1047 * next check to see if we're in an overflow state 1048 * if so, we track that we overflowed and enable NIC PROMISC 1049 * else if the overflow is set and not needed 1050 * we remove our overflow flag and check the netdev flags 1051 * to see if we can disable NIC PROMISC 1052 */ 1053 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1054 nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters); 1055 if (netdev_uc_count(netdev) + 1 > nfilters) { 1056 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1057 lif->uc_overflow = true; 1058 } else if (lif->uc_overflow) { 1059 lif->uc_overflow = false; 1060 if (!(netdev->flags & IFF_PROMISC)) 1061 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1062 } 1063 1064 /* same for multicast */ 1065 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1066 nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters); 1067 if (netdev_mc_count(netdev) > nfilters) { 1068 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1069 lif->mc_overflow = true; 1070 } else if (lif->mc_overflow) { 1071 lif->mc_overflow = false; 1072 if (!(netdev->flags & IFF_ALLMULTI)) 1073 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1074 } 1075 1076 if (lif->rx_mode != rx_mode) 1077 _ionic_lif_rx_mode(lif, rx_mode); 1078 } 1079 1080 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1081 { 1082 u64 wanted = 0; 1083 1084 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1085 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1086 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1087 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1088 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1089 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1090 if (features & NETIF_F_RXHASH) 1091 wanted |= IONIC_ETH_HW_RX_HASH; 1092 if (features & NETIF_F_RXCSUM) 1093 wanted |= IONIC_ETH_HW_RX_CSUM; 1094 if (features & NETIF_F_SG) 1095 wanted |= IONIC_ETH_HW_TX_SG; 1096 if (features & NETIF_F_HW_CSUM) 1097 wanted |= IONIC_ETH_HW_TX_CSUM; 1098 if (features & NETIF_F_TSO) 1099 wanted |= IONIC_ETH_HW_TSO; 1100 if (features & NETIF_F_TSO6) 1101 wanted |= IONIC_ETH_HW_TSO_IPV6; 1102 if (features & NETIF_F_TSO_ECN) 1103 wanted |= IONIC_ETH_HW_TSO_ECN; 1104 if (features & NETIF_F_GSO_GRE) 1105 wanted |= IONIC_ETH_HW_TSO_GRE; 1106 if (features & NETIF_F_GSO_GRE_CSUM) 1107 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1108 if (features & NETIF_F_GSO_IPXIP4) 1109 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1110 if (features & NETIF_F_GSO_IPXIP6) 1111 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1112 if (features & NETIF_F_GSO_UDP_TUNNEL) 1113 wanted |= IONIC_ETH_HW_TSO_UDP; 1114 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1115 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1116 1117 return cpu_to_le64(wanted); 1118 } 1119 1120 static int ionic_set_nic_features(struct ionic_lif *lif, 1121 netdev_features_t features) 1122 { 1123 struct device *dev = lif->ionic->dev; 1124 struct ionic_admin_ctx ctx = { 1125 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1126 .cmd.lif_setattr = { 1127 .opcode = IONIC_CMD_LIF_SETATTR, 1128 .index = cpu_to_le16(lif->index), 1129 .attr = IONIC_LIF_ATTR_FEATURES, 1130 }, 1131 }; 1132 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1133 IONIC_ETH_HW_VLAN_RX_STRIP | 1134 IONIC_ETH_HW_VLAN_RX_FILTER; 1135 u64 old_hw_features; 1136 int err; 1137 1138 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1139 err = ionic_adminq_post_wait(lif, &ctx); 1140 if (err) 1141 return err; 1142 1143 old_hw_features = lif->hw_features; 1144 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1145 ctx.comp.lif_setattr.features); 1146 1147 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1148 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1149 1150 if ((vlan_flags & features) && 1151 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1152 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1153 1154 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1155 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1156 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1157 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1158 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1159 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1160 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1161 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1162 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1163 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1164 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1165 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1166 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1167 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1168 if (lif->hw_features & IONIC_ETH_HW_TSO) 1169 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1170 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1171 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1172 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1173 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1174 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1175 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1176 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1177 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1178 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1179 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1180 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1181 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1182 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1183 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1184 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1185 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1186 1187 return 0; 1188 } 1189 1190 static int ionic_init_nic_features(struct ionic_lif *lif) 1191 { 1192 struct net_device *netdev = lif->netdev; 1193 netdev_features_t features; 1194 int err; 1195 1196 /* no netdev features on the management device */ 1197 if (lif->ionic->is_mgmt_nic) 1198 return 0; 1199 1200 /* set up what we expect to support by default */ 1201 features = NETIF_F_HW_VLAN_CTAG_TX | 1202 NETIF_F_HW_VLAN_CTAG_RX | 1203 NETIF_F_HW_VLAN_CTAG_FILTER | 1204 NETIF_F_RXHASH | 1205 NETIF_F_SG | 1206 NETIF_F_HW_CSUM | 1207 NETIF_F_RXCSUM | 1208 NETIF_F_TSO | 1209 NETIF_F_TSO6 | 1210 NETIF_F_TSO_ECN; 1211 1212 err = ionic_set_nic_features(lif, features); 1213 if (err) 1214 return err; 1215 1216 /* tell the netdev what we actually can support */ 1217 netdev->features |= NETIF_F_HIGHDMA; 1218 1219 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1220 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1221 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1222 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1223 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1224 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1225 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1226 netdev->hw_features |= NETIF_F_RXHASH; 1227 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1228 netdev->hw_features |= NETIF_F_SG; 1229 1230 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1231 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1232 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1233 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1234 if (lif->hw_features & IONIC_ETH_HW_TSO) 1235 netdev->hw_enc_features |= NETIF_F_TSO; 1236 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1237 netdev->hw_enc_features |= NETIF_F_TSO6; 1238 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1239 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1240 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1241 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1242 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1243 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1244 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1245 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1246 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1247 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1248 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1249 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1250 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1251 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1252 1253 netdev->hw_features |= netdev->hw_enc_features; 1254 netdev->features |= netdev->hw_features; 1255 1256 netdev->priv_flags |= IFF_UNICAST_FLT | 1257 IFF_LIVE_ADDR_CHANGE; 1258 1259 return 0; 1260 } 1261 1262 static int ionic_set_features(struct net_device *netdev, 1263 netdev_features_t features) 1264 { 1265 struct ionic_lif *lif = netdev_priv(netdev); 1266 int err; 1267 1268 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1269 __func__, (u64)lif->netdev->features, (u64)features); 1270 1271 err = ionic_set_nic_features(lif, features); 1272 1273 return err; 1274 } 1275 1276 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1277 { 1278 struct sockaddr *addr = sa; 1279 u8 *mac; 1280 int err; 1281 1282 mac = (u8 *)addr->sa_data; 1283 if (ether_addr_equal(netdev->dev_addr, mac)) 1284 return 0; 1285 1286 err = eth_prepare_mac_addr_change(netdev, addr); 1287 if (err) 1288 return err; 1289 1290 if (!is_zero_ether_addr(netdev->dev_addr)) { 1291 netdev_info(netdev, "deleting mac addr %pM\n", 1292 netdev->dev_addr); 1293 ionic_addr_del(netdev, netdev->dev_addr); 1294 } 1295 1296 eth_commit_mac_addr_change(netdev, addr); 1297 netdev_info(netdev, "updating mac addr %pM\n", mac); 1298 1299 return ionic_addr_add(netdev, mac); 1300 } 1301 1302 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1303 { 1304 struct ionic_lif *lif = netdev_priv(netdev); 1305 struct ionic_admin_ctx ctx = { 1306 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1307 .cmd.lif_setattr = { 1308 .opcode = IONIC_CMD_LIF_SETATTR, 1309 .index = cpu_to_le16(lif->index), 1310 .attr = IONIC_LIF_ATTR_MTU, 1311 .mtu = cpu_to_le32(new_mtu), 1312 }, 1313 }; 1314 int err; 1315 1316 err = ionic_adminq_post_wait(lif, &ctx); 1317 if (err) 1318 return err; 1319 1320 netdev->mtu = new_mtu; 1321 err = ionic_reset_queues(lif); 1322 1323 return err; 1324 } 1325 1326 static void ionic_tx_timeout_work(struct work_struct *ws) 1327 { 1328 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1329 1330 netdev_info(lif->netdev, "Tx Timeout recovery\n"); 1331 1332 rtnl_lock(); 1333 ionic_reset_queues(lif); 1334 rtnl_unlock(); 1335 } 1336 1337 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1338 { 1339 struct ionic_lif *lif = netdev_priv(netdev); 1340 1341 schedule_work(&lif->tx_timeout_work); 1342 } 1343 1344 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1345 u16 vid) 1346 { 1347 struct ionic_lif *lif = netdev_priv(netdev); 1348 struct ionic_admin_ctx ctx = { 1349 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1350 .cmd.rx_filter_add = { 1351 .opcode = IONIC_CMD_RX_FILTER_ADD, 1352 .lif_index = cpu_to_le16(lif->index), 1353 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), 1354 .vlan.vlan = cpu_to_le16(vid), 1355 }, 1356 }; 1357 int err; 1358 1359 err = ionic_adminq_post_wait(lif, &ctx); 1360 if (err) 1361 return err; 1362 1363 netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid, 1364 ctx.comp.rx_filter_add.filter_id); 1365 1366 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1367 } 1368 1369 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1370 u16 vid) 1371 { 1372 struct ionic_lif *lif = netdev_priv(netdev); 1373 struct ionic_admin_ctx ctx = { 1374 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1375 .cmd.rx_filter_del = { 1376 .opcode = IONIC_CMD_RX_FILTER_DEL, 1377 .lif_index = cpu_to_le16(lif->index), 1378 }, 1379 }; 1380 struct ionic_rx_filter *f; 1381 1382 spin_lock_bh(&lif->rx_filters.lock); 1383 1384 f = ionic_rx_filter_by_vlan(lif, vid); 1385 if (!f) { 1386 spin_unlock_bh(&lif->rx_filters.lock); 1387 return -ENOENT; 1388 } 1389 1390 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid, 1391 le32_to_cpu(ctx.cmd.rx_filter_del.filter_id)); 1392 1393 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1394 ionic_rx_filter_free(lif, f); 1395 spin_unlock_bh(&lif->rx_filters.lock); 1396 1397 return ionic_adminq_post_wait(lif, &ctx); 1398 } 1399 1400 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1401 const u8 *key, const u32 *indir) 1402 { 1403 struct ionic_admin_ctx ctx = { 1404 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1405 .cmd.lif_setattr = { 1406 .opcode = IONIC_CMD_LIF_SETATTR, 1407 .attr = IONIC_LIF_ATTR_RSS, 1408 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1409 }, 1410 }; 1411 unsigned int i, tbl_sz; 1412 1413 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1414 lif->rss_types = types; 1415 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1416 } 1417 1418 if (key) 1419 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1420 1421 if (indir) { 1422 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1423 for (i = 0; i < tbl_sz; i++) 1424 lif->rss_ind_tbl[i] = indir[i]; 1425 } 1426 1427 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1428 IONIC_RSS_HASH_KEY_SIZE); 1429 1430 return ionic_adminq_post_wait(lif, &ctx); 1431 } 1432 1433 static int ionic_lif_rss_init(struct ionic_lif *lif) 1434 { 1435 unsigned int tbl_sz; 1436 unsigned int i; 1437 1438 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1439 IONIC_RSS_TYPE_IPV4_TCP | 1440 IONIC_RSS_TYPE_IPV4_UDP | 1441 IONIC_RSS_TYPE_IPV6 | 1442 IONIC_RSS_TYPE_IPV6_TCP | 1443 IONIC_RSS_TYPE_IPV6_UDP; 1444 1445 /* Fill indirection table with 'default' values */ 1446 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1447 for (i = 0; i < tbl_sz; i++) 1448 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1449 1450 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1451 } 1452 1453 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1454 { 1455 int tbl_sz; 1456 1457 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1458 memset(lif->rss_ind_tbl, 0, tbl_sz); 1459 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1460 1461 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1462 } 1463 1464 static void ionic_txrx_disable(struct ionic_lif *lif) 1465 { 1466 unsigned int i; 1467 int err; 1468 1469 if (lif->txqcqs) { 1470 for (i = 0; i < lif->nxqs; i++) { 1471 err = ionic_qcq_disable(lif->txqcqs[i].qcq); 1472 if (err == -ETIMEDOUT) 1473 break; 1474 } 1475 } 1476 1477 if (lif->rxqcqs) { 1478 for (i = 0; i < lif->nxqs; i++) { 1479 err = ionic_qcq_disable(lif->rxqcqs[i].qcq); 1480 if (err == -ETIMEDOUT) 1481 break; 1482 } 1483 } 1484 } 1485 1486 static void ionic_txrx_deinit(struct ionic_lif *lif) 1487 { 1488 unsigned int i; 1489 1490 if (lif->txqcqs) { 1491 for (i = 0; i < lif->nxqs; i++) { 1492 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1493 ionic_tx_flush(&lif->txqcqs[i].qcq->cq); 1494 ionic_tx_empty(&lif->txqcqs[i].qcq->q); 1495 } 1496 } 1497 1498 if (lif->rxqcqs) { 1499 for (i = 0; i < lif->nxqs; i++) { 1500 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); 1501 ionic_rx_flush(&lif->rxqcqs[i].qcq->cq); 1502 ionic_rx_empty(&lif->rxqcqs[i].qcq->q); 1503 } 1504 } 1505 lif->rx_mode = 0; 1506 } 1507 1508 static void ionic_txrx_free(struct ionic_lif *lif) 1509 { 1510 unsigned int i; 1511 1512 if (lif->txqcqs) { 1513 for (i = 0; i < lif->nxqs; i++) { 1514 ionic_qcq_free(lif, lif->txqcqs[i].qcq); 1515 lif->txqcqs[i].qcq = NULL; 1516 } 1517 } 1518 1519 if (lif->rxqcqs) { 1520 for (i = 0; i < lif->nxqs; i++) { 1521 ionic_qcq_free(lif, lif->rxqcqs[i].qcq); 1522 lif->rxqcqs[i].qcq = NULL; 1523 } 1524 } 1525 } 1526 1527 static int ionic_txrx_alloc(struct ionic_lif *lif) 1528 { 1529 unsigned int sg_desc_sz; 1530 unsigned int flags; 1531 unsigned int i; 1532 int err = 0; 1533 1534 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 1535 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 1536 sizeof(struct ionic_txq_sg_desc_v1)) 1537 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 1538 else 1539 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 1540 1541 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 1542 for (i = 0; i < lif->nxqs; i++) { 1543 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 1544 lif->ntxq_descs, 1545 sizeof(struct ionic_txq_desc), 1546 sizeof(struct ionic_txq_comp), 1547 sg_desc_sz, 1548 lif->kern_pid, &lif->txqcqs[i].qcq); 1549 if (err) 1550 goto err_out; 1551 1552 lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats; 1553 ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq); 1554 } 1555 1556 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 1557 for (i = 0; i < lif->nxqs; i++) { 1558 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 1559 lif->nrxq_descs, 1560 sizeof(struct ionic_rxq_desc), 1561 sizeof(struct ionic_rxq_comp), 1562 sizeof(struct ionic_rxq_sg_desc), 1563 lif->kern_pid, &lif->rxqcqs[i].qcq); 1564 if (err) 1565 goto err_out; 1566 1567 lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats; 1568 1569 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 1570 lif->rxqcqs[i].qcq->intr.index, 1571 lif->rx_coalesce_hw); 1572 ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq, 1573 lif->txqcqs[i].qcq); 1574 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq); 1575 } 1576 1577 return 0; 1578 1579 err_out: 1580 ionic_txrx_free(lif); 1581 1582 return err; 1583 } 1584 1585 static int ionic_txrx_init(struct ionic_lif *lif) 1586 { 1587 unsigned int i; 1588 int err; 1589 1590 for (i = 0; i < lif->nxqs; i++) { 1591 err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq); 1592 if (err) 1593 goto err_out; 1594 1595 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq); 1596 if (err) { 1597 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1598 goto err_out; 1599 } 1600 } 1601 1602 if (lif->netdev->features & NETIF_F_RXHASH) 1603 ionic_lif_rss_init(lif); 1604 1605 ionic_set_rx_mode(lif->netdev); 1606 1607 return 0; 1608 1609 err_out: 1610 while (i--) { 1611 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); 1612 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); 1613 } 1614 1615 return err; 1616 } 1617 1618 static int ionic_txrx_enable(struct ionic_lif *lif) 1619 { 1620 int i, err; 1621 1622 for (i = 0; i < lif->nxqs; i++) { 1623 ionic_rx_fill(&lif->rxqcqs[i].qcq->q); 1624 err = ionic_qcq_enable(lif->rxqcqs[i].qcq); 1625 if (err) 1626 goto err_out; 1627 1628 err = ionic_qcq_enable(lif->txqcqs[i].qcq); 1629 if (err) { 1630 if (err != -ETIMEDOUT) 1631 ionic_qcq_disable(lif->rxqcqs[i].qcq); 1632 goto err_out; 1633 } 1634 } 1635 1636 return 0; 1637 1638 err_out: 1639 while (i--) { 1640 err = ionic_qcq_disable(lif->txqcqs[i].qcq); 1641 if (err == -ETIMEDOUT) 1642 break; 1643 err = ionic_qcq_disable(lif->rxqcqs[i].qcq); 1644 if (err == -ETIMEDOUT) 1645 break; 1646 } 1647 1648 return err; 1649 } 1650 1651 static int ionic_start_queues(struct ionic_lif *lif) 1652 { 1653 int err; 1654 1655 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 1656 return 0; 1657 1658 err = ionic_txrx_enable(lif); 1659 if (err) { 1660 clear_bit(IONIC_LIF_F_UP, lif->state); 1661 return err; 1662 } 1663 netif_tx_wake_all_queues(lif->netdev); 1664 1665 return 0; 1666 } 1667 1668 int ionic_open(struct net_device *netdev) 1669 { 1670 struct ionic_lif *lif = netdev_priv(netdev); 1671 int err; 1672 1673 err = ionic_txrx_alloc(lif); 1674 if (err) 1675 return err; 1676 1677 err = ionic_txrx_init(lif); 1678 if (err) 1679 goto err_out; 1680 1681 /* don't start the queues until we have link */ 1682 if (netif_carrier_ok(netdev)) { 1683 err = ionic_start_queues(lif); 1684 if (err) 1685 goto err_txrx_deinit; 1686 } 1687 1688 return 0; 1689 1690 err_txrx_deinit: 1691 ionic_txrx_deinit(lif); 1692 err_out: 1693 ionic_txrx_free(lif); 1694 return err; 1695 } 1696 1697 static void ionic_stop_queues(struct ionic_lif *lif) 1698 { 1699 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 1700 return; 1701 1702 ionic_txrx_disable(lif); 1703 netif_tx_disable(lif->netdev); 1704 } 1705 1706 int ionic_stop(struct net_device *netdev) 1707 { 1708 struct ionic_lif *lif = netdev_priv(netdev); 1709 1710 if (!netif_device_present(netdev)) 1711 return 0; 1712 1713 ionic_stop_queues(lif); 1714 ionic_txrx_deinit(lif); 1715 ionic_txrx_free(lif); 1716 1717 return 0; 1718 } 1719 1720 static int ionic_get_vf_config(struct net_device *netdev, 1721 int vf, struct ifla_vf_info *ivf) 1722 { 1723 struct ionic_lif *lif = netdev_priv(netdev); 1724 struct ionic *ionic = lif->ionic; 1725 int ret = 0; 1726 1727 if (!netif_device_present(netdev)) 1728 return -EBUSY; 1729 1730 down_read(&ionic->vf_op_lock); 1731 1732 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1733 ret = -EINVAL; 1734 } else { 1735 ivf->vf = vf; 1736 ivf->vlan = ionic->vfs[vf].vlanid; 1737 ivf->qos = 0; 1738 ivf->spoofchk = ionic->vfs[vf].spoofchk; 1739 ivf->linkstate = ionic->vfs[vf].linkstate; 1740 ivf->max_tx_rate = ionic->vfs[vf].maxrate; 1741 ivf->trusted = ionic->vfs[vf].trusted; 1742 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr); 1743 } 1744 1745 up_read(&ionic->vf_op_lock); 1746 return ret; 1747 } 1748 1749 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 1750 struct ifla_vf_stats *vf_stats) 1751 { 1752 struct ionic_lif *lif = netdev_priv(netdev); 1753 struct ionic *ionic = lif->ionic; 1754 struct ionic_lif_stats *vs; 1755 int ret = 0; 1756 1757 if (!netif_device_present(netdev)) 1758 return -EBUSY; 1759 1760 down_read(&ionic->vf_op_lock); 1761 1762 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1763 ret = -EINVAL; 1764 } else { 1765 memset(vf_stats, 0, sizeof(*vf_stats)); 1766 vs = &ionic->vfs[vf].stats; 1767 1768 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 1769 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 1770 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 1771 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 1772 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 1773 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 1774 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 1775 le64_to_cpu(vs->rx_mcast_drop_packets) + 1776 le64_to_cpu(vs->rx_bcast_drop_packets); 1777 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 1778 le64_to_cpu(vs->tx_mcast_drop_packets) + 1779 le64_to_cpu(vs->tx_bcast_drop_packets); 1780 } 1781 1782 up_read(&ionic->vf_op_lock); 1783 return ret; 1784 } 1785 1786 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1787 { 1788 struct ionic_lif *lif = netdev_priv(netdev); 1789 struct ionic *ionic = lif->ionic; 1790 int ret; 1791 1792 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 1793 return -EINVAL; 1794 1795 if (!netif_device_present(netdev)) 1796 return -EBUSY; 1797 1798 down_write(&ionic->vf_op_lock); 1799 1800 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1801 ret = -EINVAL; 1802 } else { 1803 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac); 1804 if (!ret) 1805 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 1806 } 1807 1808 up_write(&ionic->vf_op_lock); 1809 return ret; 1810 } 1811 1812 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 1813 u8 qos, __be16 proto) 1814 { 1815 struct ionic_lif *lif = netdev_priv(netdev); 1816 struct ionic *ionic = lif->ionic; 1817 int ret; 1818 1819 /* until someday when we support qos */ 1820 if (qos) 1821 return -EINVAL; 1822 1823 if (vlan > 4095) 1824 return -EINVAL; 1825 1826 if (proto != htons(ETH_P_8021Q)) 1827 return -EPROTONOSUPPORT; 1828 1829 if (!netif_device_present(netdev)) 1830 return -EBUSY; 1831 1832 down_write(&ionic->vf_op_lock); 1833 1834 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1835 ret = -EINVAL; 1836 } else { 1837 ret = ionic_set_vf_config(ionic, vf, 1838 IONIC_VF_ATTR_VLAN, (u8 *)&vlan); 1839 if (!ret) 1840 ionic->vfs[vf].vlanid = vlan; 1841 } 1842 1843 up_write(&ionic->vf_op_lock); 1844 return ret; 1845 } 1846 1847 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 1848 int tx_min, int tx_max) 1849 { 1850 struct ionic_lif *lif = netdev_priv(netdev); 1851 struct ionic *ionic = lif->ionic; 1852 int ret; 1853 1854 /* setting the min just seems silly */ 1855 if (tx_min) 1856 return -EINVAL; 1857 1858 if (!netif_device_present(netdev)) 1859 return -EBUSY; 1860 1861 down_write(&ionic->vf_op_lock); 1862 1863 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1864 ret = -EINVAL; 1865 } else { 1866 ret = ionic_set_vf_config(ionic, vf, 1867 IONIC_VF_ATTR_RATE, (u8 *)&tx_max); 1868 if (!ret) 1869 lif->ionic->vfs[vf].maxrate = tx_max; 1870 } 1871 1872 up_write(&ionic->vf_op_lock); 1873 return ret; 1874 } 1875 1876 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 1877 { 1878 struct ionic_lif *lif = netdev_priv(netdev); 1879 struct ionic *ionic = lif->ionic; 1880 u8 data = set; /* convert to u8 for config */ 1881 int ret; 1882 1883 if (!netif_device_present(netdev)) 1884 return -EBUSY; 1885 1886 down_write(&ionic->vf_op_lock); 1887 1888 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1889 ret = -EINVAL; 1890 } else { 1891 ret = ionic_set_vf_config(ionic, vf, 1892 IONIC_VF_ATTR_SPOOFCHK, &data); 1893 if (!ret) 1894 ionic->vfs[vf].spoofchk = data; 1895 } 1896 1897 up_write(&ionic->vf_op_lock); 1898 return ret; 1899 } 1900 1901 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 1902 { 1903 struct ionic_lif *lif = netdev_priv(netdev); 1904 struct ionic *ionic = lif->ionic; 1905 u8 data = set; /* convert to u8 for config */ 1906 int ret; 1907 1908 if (!netif_device_present(netdev)) 1909 return -EBUSY; 1910 1911 down_write(&ionic->vf_op_lock); 1912 1913 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1914 ret = -EINVAL; 1915 } else { 1916 ret = ionic_set_vf_config(ionic, vf, 1917 IONIC_VF_ATTR_TRUST, &data); 1918 if (!ret) 1919 ionic->vfs[vf].trusted = data; 1920 } 1921 1922 up_write(&ionic->vf_op_lock); 1923 return ret; 1924 } 1925 1926 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 1927 { 1928 struct ionic_lif *lif = netdev_priv(netdev); 1929 struct ionic *ionic = lif->ionic; 1930 u8 data; 1931 int ret; 1932 1933 switch (set) { 1934 case IFLA_VF_LINK_STATE_ENABLE: 1935 data = IONIC_VF_LINK_STATUS_UP; 1936 break; 1937 case IFLA_VF_LINK_STATE_DISABLE: 1938 data = IONIC_VF_LINK_STATUS_DOWN; 1939 break; 1940 case IFLA_VF_LINK_STATE_AUTO: 1941 data = IONIC_VF_LINK_STATUS_AUTO; 1942 break; 1943 default: 1944 return -EINVAL; 1945 } 1946 1947 if (!netif_device_present(netdev)) 1948 return -EBUSY; 1949 1950 down_write(&ionic->vf_op_lock); 1951 1952 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1953 ret = -EINVAL; 1954 } else { 1955 ret = ionic_set_vf_config(ionic, vf, 1956 IONIC_VF_ATTR_LINKSTATE, &data); 1957 if (!ret) 1958 ionic->vfs[vf].linkstate = set; 1959 } 1960 1961 up_write(&ionic->vf_op_lock); 1962 return ret; 1963 } 1964 1965 static const struct net_device_ops ionic_netdev_ops = { 1966 .ndo_open = ionic_open, 1967 .ndo_stop = ionic_stop, 1968 .ndo_start_xmit = ionic_start_xmit, 1969 .ndo_get_stats64 = ionic_get_stats64, 1970 .ndo_set_rx_mode = ionic_set_rx_mode, 1971 .ndo_set_features = ionic_set_features, 1972 .ndo_set_mac_address = ionic_set_mac_address, 1973 .ndo_validate_addr = eth_validate_addr, 1974 .ndo_tx_timeout = ionic_tx_timeout, 1975 .ndo_change_mtu = ionic_change_mtu, 1976 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 1977 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 1978 .ndo_set_vf_vlan = ionic_set_vf_vlan, 1979 .ndo_set_vf_trust = ionic_set_vf_trust, 1980 .ndo_set_vf_mac = ionic_set_vf_mac, 1981 .ndo_set_vf_rate = ionic_set_vf_rate, 1982 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 1983 .ndo_get_vf_config = ionic_get_vf_config, 1984 .ndo_set_vf_link_state = ionic_set_vf_link_state, 1985 .ndo_get_vf_stats = ionic_get_vf_stats, 1986 }; 1987 1988 int ionic_reset_queues(struct ionic_lif *lif) 1989 { 1990 bool running; 1991 int err = 0; 1992 1993 /* Put off the next watchdog timeout */ 1994 netif_trans_update(lif->netdev); 1995 1996 err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); 1997 if (err) 1998 return err; 1999 2000 running = netif_running(lif->netdev); 2001 if (running) 2002 err = ionic_stop(lif->netdev); 2003 if (!err && running) 2004 ionic_open(lif->netdev); 2005 2006 clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); 2007 2008 return err; 2009 } 2010 2011 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index) 2012 { 2013 struct device *dev = ionic->dev; 2014 struct net_device *netdev; 2015 struct ionic_lif *lif; 2016 int tbl_sz; 2017 int err; 2018 2019 netdev = alloc_etherdev_mqs(sizeof(*lif), 2020 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 2021 if (!netdev) { 2022 dev_err(dev, "Cannot allocate netdev, aborting\n"); 2023 return ERR_PTR(-ENOMEM); 2024 } 2025 2026 SET_NETDEV_DEV(netdev, dev); 2027 2028 lif = netdev_priv(netdev); 2029 lif->netdev = netdev; 2030 ionic->master_lif = lif; 2031 netdev->netdev_ops = &ionic_netdev_ops; 2032 ionic_ethtool_set_ops(netdev); 2033 2034 netdev->watchdog_timeo = 2 * HZ; 2035 netif_carrier_off(netdev); 2036 2037 netdev->min_mtu = IONIC_MIN_MTU; 2038 netdev->max_mtu = IONIC_MAX_MTU; 2039 2040 lif->neqs = ionic->neqs_per_lif; 2041 lif->nxqs = ionic->ntxqs_per_lif; 2042 2043 lif->ionic = ionic; 2044 lif->index = index; 2045 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 2046 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 2047 2048 /* Convert the default coalesce value to actual hw resolution */ 2049 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 2050 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 2051 lif->rx_coalesce_usecs); 2052 2053 snprintf(lif->name, sizeof(lif->name), "lif%u", index); 2054 2055 spin_lock_init(&lif->adminq_lock); 2056 2057 spin_lock_init(&lif->deferred.lock); 2058 INIT_LIST_HEAD(&lif->deferred.list); 2059 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 2060 2061 /* allocate lif info */ 2062 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 2063 lif->info = dma_alloc_coherent(dev, lif->info_sz, 2064 &lif->info_pa, GFP_KERNEL); 2065 if (!lif->info) { 2066 dev_err(dev, "Failed to allocate lif info, aborting\n"); 2067 err = -ENOMEM; 2068 goto err_out_free_netdev; 2069 } 2070 2071 ionic_debugfs_add_lif(lif); 2072 2073 /* allocate queues */ 2074 err = ionic_qcqs_alloc(lif); 2075 if (err) 2076 goto err_out_free_lif_info; 2077 2078 /* allocate rss indirection table */ 2079 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 2080 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 2081 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 2082 &lif->rss_ind_tbl_pa, 2083 GFP_KERNEL); 2084 2085 if (!lif->rss_ind_tbl) { 2086 err = -ENOMEM; 2087 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 2088 goto err_out_free_qcqs; 2089 } 2090 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 2091 2092 list_add_tail(&lif->list, &ionic->lifs); 2093 2094 return lif; 2095 2096 err_out_free_qcqs: 2097 ionic_qcqs_free(lif); 2098 err_out_free_lif_info: 2099 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2100 lif->info = NULL; 2101 lif->info_pa = 0; 2102 err_out_free_netdev: 2103 free_netdev(lif->netdev); 2104 lif = NULL; 2105 2106 return ERR_PTR(err); 2107 } 2108 2109 int ionic_lifs_alloc(struct ionic *ionic) 2110 { 2111 struct ionic_lif *lif; 2112 2113 INIT_LIST_HEAD(&ionic->lifs); 2114 2115 /* only build the first lif, others are for later features */ 2116 set_bit(0, ionic->lifbits); 2117 2118 lif = ionic_lif_alloc(ionic, 0); 2119 if (IS_ERR_OR_NULL(lif)) { 2120 clear_bit(0, ionic->lifbits); 2121 return -ENOMEM; 2122 } 2123 2124 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 2125 ionic_lif_queue_identify(lif); 2126 2127 return 0; 2128 } 2129 2130 static void ionic_lif_reset(struct ionic_lif *lif) 2131 { 2132 struct ionic_dev *idev = &lif->ionic->idev; 2133 2134 mutex_lock(&lif->ionic->dev_cmd_lock); 2135 ionic_dev_cmd_lif_reset(idev, lif->index); 2136 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2137 mutex_unlock(&lif->ionic->dev_cmd_lock); 2138 } 2139 2140 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 2141 { 2142 struct ionic *ionic = lif->ionic; 2143 2144 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2145 return; 2146 2147 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 2148 2149 netif_device_detach(lif->netdev); 2150 2151 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 2152 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 2153 ionic_stop_queues(lif); 2154 } 2155 2156 if (netif_running(lif->netdev)) { 2157 ionic_txrx_deinit(lif); 2158 ionic_txrx_free(lif); 2159 } 2160 ionic_lifs_deinit(ionic); 2161 ionic_reset(ionic); 2162 ionic_qcqs_free(lif); 2163 2164 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 2165 } 2166 2167 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 2168 { 2169 struct ionic *ionic = lif->ionic; 2170 int err; 2171 2172 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2173 return; 2174 2175 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 2176 2177 ionic_init_devinfo(ionic); 2178 ionic_port_init(ionic); 2179 err = ionic_qcqs_alloc(lif); 2180 if (err) 2181 goto err_out; 2182 2183 err = ionic_lifs_init(ionic); 2184 if (err) 2185 goto err_qcqs_free; 2186 2187 if (lif->registered) 2188 ionic_lif_set_netdev_info(lif); 2189 2190 ionic_rx_filter_replay(lif); 2191 2192 if (netif_running(lif->netdev)) { 2193 err = ionic_txrx_alloc(lif); 2194 if (err) 2195 goto err_lifs_deinit; 2196 2197 err = ionic_txrx_init(lif); 2198 if (err) 2199 goto err_txrx_free; 2200 } 2201 2202 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 2203 ionic_link_status_check_request(lif); 2204 netif_device_attach(lif->netdev); 2205 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 2206 2207 return; 2208 2209 err_txrx_free: 2210 ionic_txrx_free(lif); 2211 err_lifs_deinit: 2212 ionic_lifs_deinit(ionic); 2213 err_qcqs_free: 2214 ionic_qcqs_free(lif); 2215 err_out: 2216 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 2217 } 2218 2219 static void ionic_lif_free(struct ionic_lif *lif) 2220 { 2221 struct device *dev = lif->ionic->dev; 2222 2223 /* free rss indirection table */ 2224 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 2225 lif->rss_ind_tbl_pa); 2226 lif->rss_ind_tbl = NULL; 2227 lif->rss_ind_tbl_pa = 0; 2228 2229 /* free queues */ 2230 ionic_qcqs_free(lif); 2231 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2232 ionic_lif_reset(lif); 2233 2234 /* free lif info */ 2235 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2236 lif->info = NULL; 2237 lif->info_pa = 0; 2238 2239 /* unmap doorbell page */ 2240 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2241 lif->kern_dbpage = NULL; 2242 kfree(lif->dbid_inuse); 2243 lif->dbid_inuse = NULL; 2244 2245 /* free netdev & lif */ 2246 ionic_debugfs_del_lif(lif); 2247 list_del(&lif->list); 2248 free_netdev(lif->netdev); 2249 } 2250 2251 void ionic_lifs_free(struct ionic *ionic) 2252 { 2253 struct list_head *cur, *tmp; 2254 struct ionic_lif *lif; 2255 2256 list_for_each_safe(cur, tmp, &ionic->lifs) { 2257 lif = list_entry(cur, struct ionic_lif, list); 2258 2259 ionic_lif_free(lif); 2260 } 2261 } 2262 2263 static void ionic_lif_deinit(struct ionic_lif *lif) 2264 { 2265 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 2266 return; 2267 2268 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 2269 cancel_work_sync(&lif->deferred.work); 2270 cancel_work_sync(&lif->tx_timeout_work); 2271 ionic_rx_filters_deinit(lif); 2272 } 2273 2274 if (lif->netdev->features & NETIF_F_RXHASH) 2275 ionic_lif_rss_deinit(lif); 2276 2277 napi_disable(&lif->adminqcq->napi); 2278 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2279 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2280 2281 ionic_lif_reset(lif); 2282 } 2283 2284 void ionic_lifs_deinit(struct ionic *ionic) 2285 { 2286 struct list_head *cur, *tmp; 2287 struct ionic_lif *lif; 2288 2289 list_for_each_safe(cur, tmp, &ionic->lifs) { 2290 lif = list_entry(cur, struct ionic_lif, list); 2291 ionic_lif_deinit(lif); 2292 } 2293 } 2294 2295 static int ionic_lif_adminq_init(struct ionic_lif *lif) 2296 { 2297 struct device *dev = lif->ionic->dev; 2298 struct ionic_q_init_comp comp; 2299 struct ionic_dev *idev; 2300 struct ionic_qcq *qcq; 2301 struct ionic_queue *q; 2302 int err; 2303 2304 idev = &lif->ionic->idev; 2305 qcq = lif->adminqcq; 2306 q = &qcq->q; 2307 2308 mutex_lock(&lif->ionic->dev_cmd_lock); 2309 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 2310 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2311 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 2312 mutex_unlock(&lif->ionic->dev_cmd_lock); 2313 if (err) { 2314 netdev_err(lif->netdev, "adminq init failed %d\n", err); 2315 return err; 2316 } 2317 2318 q->hw_type = comp.hw_type; 2319 q->hw_index = le32_to_cpu(comp.hw_index); 2320 q->dbval = IONIC_DBELL_QID(q->hw_index); 2321 2322 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 2323 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 2324 2325 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, 2326 NAPI_POLL_WEIGHT); 2327 2328 napi_enable(&qcq->napi); 2329 2330 if (qcq->flags & IONIC_QCQ_F_INTR) 2331 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 2332 IONIC_INTR_MASK_CLEAR); 2333 2334 qcq->flags |= IONIC_QCQ_F_INITED; 2335 2336 return 0; 2337 } 2338 2339 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 2340 { 2341 struct ionic_qcq *qcq = lif->notifyqcq; 2342 struct device *dev = lif->ionic->dev; 2343 struct ionic_queue *q = &qcq->q; 2344 int err; 2345 2346 struct ionic_admin_ctx ctx = { 2347 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2348 .cmd.q_init = { 2349 .opcode = IONIC_CMD_Q_INIT, 2350 .lif_index = cpu_to_le16(lif->index), 2351 .type = q->type, 2352 .ver = lif->qtype_info[q->type].version, 2353 .index = cpu_to_le32(q->index), 2354 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 2355 IONIC_QINIT_F_ENA), 2356 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 2357 .pid = cpu_to_le16(q->pid), 2358 .ring_size = ilog2(q->num_descs), 2359 .ring_base = cpu_to_le64(q->base_pa), 2360 } 2361 }; 2362 2363 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 2364 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 2365 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 2366 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 2367 2368 err = ionic_adminq_post_wait(lif, &ctx); 2369 if (err) 2370 return err; 2371 2372 lif->last_eid = 0; 2373 q->hw_type = ctx.comp.q_init.hw_type; 2374 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 2375 q->dbval = IONIC_DBELL_QID(q->hw_index); 2376 2377 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 2378 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 2379 2380 /* preset the callback info */ 2381 q->info[0].cb_arg = lif; 2382 2383 qcq->flags |= IONIC_QCQ_F_INITED; 2384 2385 return 0; 2386 } 2387 2388 static int ionic_station_set(struct ionic_lif *lif) 2389 { 2390 struct net_device *netdev = lif->netdev; 2391 struct ionic_admin_ctx ctx = { 2392 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2393 .cmd.lif_getattr = { 2394 .opcode = IONIC_CMD_LIF_GETATTR, 2395 .index = cpu_to_le16(lif->index), 2396 .attr = IONIC_LIF_ATTR_MAC, 2397 }, 2398 }; 2399 struct sockaddr addr; 2400 int err; 2401 2402 err = ionic_adminq_post_wait(lif, &ctx); 2403 if (err) 2404 return err; 2405 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 2406 ctx.comp.lif_getattr.mac); 2407 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac)) 2408 return 0; 2409 2410 if (!is_zero_ether_addr(netdev->dev_addr)) { 2411 /* If the netdev mac is non-zero and doesn't match the default 2412 * device address, it was set by something earlier and we're 2413 * likely here again after a fw-upgrade reset. We need to be 2414 * sure the netdev mac is in our filter list. 2415 */ 2416 if (!ether_addr_equal(ctx.comp.lif_getattr.mac, 2417 netdev->dev_addr)) 2418 ionic_lif_addr(lif, netdev->dev_addr, true); 2419 } else { 2420 /* Update the netdev mac with the device's mac */ 2421 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); 2422 addr.sa_family = AF_INET; 2423 err = eth_prepare_mac_addr_change(netdev, &addr); 2424 if (err) { 2425 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 2426 addr.sa_data, err); 2427 return 0; 2428 } 2429 2430 eth_commit_mac_addr_change(netdev, &addr); 2431 } 2432 2433 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 2434 netdev->dev_addr); 2435 ionic_lif_addr(lif, netdev->dev_addr, true); 2436 2437 return 0; 2438 } 2439 2440 static int ionic_lif_init(struct ionic_lif *lif) 2441 { 2442 struct ionic_dev *idev = &lif->ionic->idev; 2443 struct device *dev = lif->ionic->dev; 2444 struct ionic_lif_init_comp comp; 2445 int dbpage_num; 2446 int err; 2447 2448 mutex_lock(&lif->ionic->dev_cmd_lock); 2449 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 2450 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2451 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 2452 mutex_unlock(&lif->ionic->dev_cmd_lock); 2453 if (err) 2454 return err; 2455 2456 lif->hw_index = le16_to_cpu(comp.hw_index); 2457 2458 /* now that we have the hw_index we can figure out our doorbell page */ 2459 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 2460 if (!lif->dbid_count) { 2461 dev_err(dev, "No doorbell pages, aborting\n"); 2462 return -EINVAL; 2463 } 2464 2465 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); 2466 if (!lif->dbid_inuse) { 2467 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); 2468 return -ENOMEM; 2469 } 2470 2471 /* first doorbell id reserved for kernel (dbid aka pid == zero) */ 2472 set_bit(0, lif->dbid_inuse); 2473 lif->kern_pid = 0; 2474 2475 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 2476 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 2477 if (!lif->kern_dbpage) { 2478 dev_err(dev, "Cannot map dbpage, aborting\n"); 2479 err = -ENOMEM; 2480 goto err_out_free_dbid; 2481 } 2482 2483 err = ionic_lif_adminq_init(lif); 2484 if (err) 2485 goto err_out_adminq_deinit; 2486 2487 if (lif->ionic->nnqs_per_lif) { 2488 err = ionic_lif_notifyq_init(lif); 2489 if (err) 2490 goto err_out_notifyq_deinit; 2491 } 2492 2493 err = ionic_init_nic_features(lif); 2494 if (err) 2495 goto err_out_notifyq_deinit; 2496 2497 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 2498 err = ionic_rx_filters_init(lif); 2499 if (err) 2500 goto err_out_notifyq_deinit; 2501 } 2502 2503 err = ionic_station_set(lif); 2504 if (err) 2505 goto err_out_notifyq_deinit; 2506 2507 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 2508 2509 set_bit(IONIC_LIF_F_INITED, lif->state); 2510 2511 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 2512 2513 return 0; 2514 2515 err_out_notifyq_deinit: 2516 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2517 err_out_adminq_deinit: 2518 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2519 ionic_lif_reset(lif); 2520 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2521 lif->kern_dbpage = NULL; 2522 err_out_free_dbid: 2523 kfree(lif->dbid_inuse); 2524 lif->dbid_inuse = NULL; 2525 2526 return err; 2527 } 2528 2529 int ionic_lifs_init(struct ionic *ionic) 2530 { 2531 struct list_head *cur, *tmp; 2532 struct ionic_lif *lif; 2533 int err; 2534 2535 list_for_each_safe(cur, tmp, &ionic->lifs) { 2536 lif = list_entry(cur, struct ionic_lif, list); 2537 err = ionic_lif_init(lif); 2538 if (err) 2539 return err; 2540 } 2541 2542 return 0; 2543 } 2544 2545 static void ionic_lif_notify_work(struct work_struct *ws) 2546 { 2547 } 2548 2549 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 2550 { 2551 struct ionic_admin_ctx ctx = { 2552 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2553 .cmd.lif_setattr = { 2554 .opcode = IONIC_CMD_LIF_SETATTR, 2555 .index = cpu_to_le16(lif->index), 2556 .attr = IONIC_LIF_ATTR_NAME, 2557 }, 2558 }; 2559 2560 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 2561 sizeof(ctx.cmd.lif_setattr.name)); 2562 2563 ionic_adminq_post_wait(lif, &ctx); 2564 } 2565 2566 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 2567 { 2568 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 2569 return NULL; 2570 2571 return netdev_priv(netdev); 2572 } 2573 2574 static int ionic_lif_notify(struct notifier_block *nb, 2575 unsigned long event, void *info) 2576 { 2577 struct net_device *ndev = netdev_notifier_info_to_dev(info); 2578 struct ionic *ionic = container_of(nb, struct ionic, nb); 2579 struct ionic_lif *lif = ionic_netdev_lif(ndev); 2580 2581 if (!lif || lif->ionic != ionic) 2582 return NOTIFY_DONE; 2583 2584 switch (event) { 2585 case NETDEV_CHANGENAME: 2586 ionic_lif_set_netdev_info(lif); 2587 break; 2588 } 2589 2590 return NOTIFY_DONE; 2591 } 2592 2593 int ionic_lifs_register(struct ionic *ionic) 2594 { 2595 int err; 2596 2597 /* the netdev is not registered on the management device, it is 2598 * only used as a vehicle for napi operations on the adminq 2599 */ 2600 if (ionic->is_mgmt_nic) 2601 return 0; 2602 2603 INIT_WORK(&ionic->nb_work, ionic_lif_notify_work); 2604 2605 ionic->nb.notifier_call = ionic_lif_notify; 2606 2607 err = register_netdevice_notifier(&ionic->nb); 2608 if (err) 2609 ionic->nb.notifier_call = NULL; 2610 2611 /* only register LIF0 for now */ 2612 err = register_netdev(ionic->master_lif->netdev); 2613 if (err) { 2614 dev_err(ionic->dev, "Cannot register net device, aborting\n"); 2615 return err; 2616 } 2617 ionic->master_lif->registered = true; 2618 2619 return 0; 2620 } 2621 2622 void ionic_lifs_unregister(struct ionic *ionic) 2623 { 2624 if (ionic->nb.notifier_call) { 2625 unregister_netdevice_notifier(&ionic->nb); 2626 cancel_work_sync(&ionic->nb_work); 2627 ionic->nb.notifier_call = NULL; 2628 } 2629 2630 /* There is only one lif ever registered in the 2631 * current model, so don't bother searching the 2632 * ionic->lif for candidates to unregister 2633 */ 2634 if (ionic->master_lif && 2635 ionic->master_lif->netdev->reg_state == NETREG_REGISTERED) 2636 unregister_netdev(ionic->master_lif->netdev); 2637 } 2638 2639 static void ionic_lif_queue_identify(struct ionic_lif *lif) 2640 { 2641 struct ionic *ionic = lif->ionic; 2642 union ionic_q_identity *q_ident; 2643 struct ionic_dev *idev; 2644 int qtype; 2645 int err; 2646 2647 idev = &lif->ionic->idev; 2648 q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data; 2649 2650 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 2651 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 2652 2653 /* filter out the ones we know about */ 2654 switch (qtype) { 2655 case IONIC_QTYPE_ADMINQ: 2656 case IONIC_QTYPE_NOTIFYQ: 2657 case IONIC_QTYPE_RXQ: 2658 case IONIC_QTYPE_TXQ: 2659 break; 2660 default: 2661 continue; 2662 } 2663 2664 memset(qti, 0, sizeof(*qti)); 2665 2666 mutex_lock(&ionic->dev_cmd_lock); 2667 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 2668 ionic_qtype_versions[qtype]); 2669 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 2670 if (!err) { 2671 qti->version = q_ident->version; 2672 qti->supported = q_ident->supported; 2673 qti->features = le64_to_cpu(q_ident->features); 2674 qti->desc_sz = le16_to_cpu(q_ident->desc_sz); 2675 qti->comp_sz = le16_to_cpu(q_ident->comp_sz); 2676 qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz); 2677 qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems); 2678 qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride); 2679 } 2680 mutex_unlock(&ionic->dev_cmd_lock); 2681 2682 if (err == -EINVAL) { 2683 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 2684 continue; 2685 } else if (err == -EIO) { 2686 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 2687 return; 2688 } else if (err) { 2689 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 2690 qtype, err); 2691 return; 2692 } 2693 2694 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 2695 qtype, qti->version); 2696 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 2697 qtype, qti->supported); 2698 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 2699 qtype, qti->features); 2700 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 2701 qtype, qti->desc_sz); 2702 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 2703 qtype, qti->comp_sz); 2704 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 2705 qtype, qti->sg_desc_sz); 2706 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 2707 qtype, qti->max_sg_elems); 2708 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 2709 qtype, qti->sg_desc_stride); 2710 } 2711 } 2712 2713 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 2714 union ionic_lif_identity *lid) 2715 { 2716 struct ionic_dev *idev = &ionic->idev; 2717 size_t sz; 2718 int err; 2719 2720 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 2721 2722 mutex_lock(&ionic->dev_cmd_lock); 2723 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 2724 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 2725 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 2726 mutex_unlock(&ionic->dev_cmd_lock); 2727 if (err) 2728 return (err); 2729 2730 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 2731 le64_to_cpu(lid->capabilities)); 2732 2733 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 2734 le32_to_cpu(lid->eth.max_ucast_filters)); 2735 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 2736 le32_to_cpu(lid->eth.max_mcast_filters)); 2737 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 2738 le64_to_cpu(lid->eth.config.features)); 2739 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 2740 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 2741 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 2742 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 2743 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 2744 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 2745 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 2746 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 2747 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 2748 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 2749 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 2750 le32_to_cpu(lid->eth.config.mtu)); 2751 2752 return 0; 2753 } 2754 2755 int ionic_lifs_size(struct ionic *ionic) 2756 { 2757 struct ionic_identity *ident = &ionic->ident; 2758 unsigned int nintrs, dev_nintrs; 2759 union ionic_lif_config *lc; 2760 unsigned int ntxqs_per_lif; 2761 unsigned int nrxqs_per_lif; 2762 unsigned int neqs_per_lif; 2763 unsigned int nnqs_per_lif; 2764 unsigned int nxqs, neqs; 2765 unsigned int min_intrs; 2766 int err; 2767 2768 lc = &ident->lif.eth.config; 2769 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 2770 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 2771 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 2772 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 2773 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 2774 2775 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 2776 nxqs = min(nxqs, num_online_cpus()); 2777 neqs = min(neqs_per_lif, num_online_cpus()); 2778 2779 try_again: 2780 /* interrupt usage: 2781 * 1 for master lif adminq/notifyq 2782 * 1 for each CPU for master lif TxRx queue pairs 2783 * whatever's left is for RDMA queues 2784 */ 2785 nintrs = 1 + nxqs + neqs; 2786 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 2787 2788 if (nintrs > dev_nintrs) 2789 goto try_fewer; 2790 2791 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 2792 if (err < 0 && err != -ENOSPC) { 2793 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 2794 return err; 2795 } 2796 if (err == -ENOSPC) 2797 goto try_fewer; 2798 2799 if (err != nintrs) { 2800 ionic_bus_free_irq_vectors(ionic); 2801 goto try_fewer; 2802 } 2803 2804 ionic->nnqs_per_lif = nnqs_per_lif; 2805 ionic->neqs_per_lif = neqs; 2806 ionic->ntxqs_per_lif = nxqs; 2807 ionic->nrxqs_per_lif = nxqs; 2808 ionic->nintrs = nintrs; 2809 2810 ionic_debugfs_add_sizes(ionic); 2811 2812 return 0; 2813 2814 try_fewer: 2815 if (nnqs_per_lif > 1) { 2816 nnqs_per_lif >>= 1; 2817 goto try_again; 2818 } 2819 if (neqs > 1) { 2820 neqs >>= 1; 2821 goto try_again; 2822 } 2823 if (nxqs > 1) { 2824 nxqs >>= 1; 2825 goto try_again; 2826 } 2827 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 2828 return -ENOSPC; 2829 } 2830