1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ethtool.h> 5 #include <linux/printk.h> 6 #include <linux/dynamic_debug.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_vlan.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/cpumask.h> 14 #include <linux/crash_dump.h> 15 16 #include "ionic.h" 17 #include "ionic_bus.h" 18 #include "ionic_lif.h" 19 #include "ionic_txrx.h" 20 #include "ionic_ethtool.h" 21 #include "ionic_debugfs.h" 22 23 /* queuetype support level */ 24 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 25 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 26 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 27 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */ 28 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support 29 * 1 = ... with Tx SG version 1 30 */ 31 }; 32 33 static void ionic_link_status_check(struct ionic_lif *lif); 34 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 35 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 36 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 37 38 static void ionic_txrx_deinit(struct ionic_lif *lif); 39 static int ionic_txrx_init(struct ionic_lif *lif); 40 static int ionic_start_queues(struct ionic_lif *lif); 41 static void ionic_stop_queues(struct ionic_lif *lif); 42 static void ionic_lif_queue_identify(struct ionic_lif *lif); 43 44 static void ionic_dim_work(struct work_struct *work) 45 { 46 struct dim *dim = container_of(work, struct dim, work); 47 struct dim_cq_moder cur_moder; 48 struct ionic_qcq *qcq; 49 u32 new_coal; 50 51 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 52 qcq = container_of(dim, struct ionic_qcq, dim); 53 new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec); 54 new_coal = new_coal ? new_coal : 1; 55 56 if (qcq->intr.dim_coal_hw != new_coal) { 57 unsigned int qi = qcq->cq.bound_q->index; 58 struct ionic_lif *lif = qcq->q.lif; 59 60 qcq->intr.dim_coal_hw = new_coal; 61 62 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 63 lif->rxqcqs[qi]->intr.index, 64 qcq->intr.dim_coal_hw); 65 } 66 67 dim->state = DIM_START_MEASURE; 68 } 69 70 static void ionic_lif_deferred_work(struct work_struct *work) 71 { 72 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 73 struct ionic_deferred *def = &lif->deferred; 74 struct ionic_deferred_work *w = NULL; 75 76 do { 77 spin_lock_bh(&def->lock); 78 if (!list_empty(&def->list)) { 79 w = list_first_entry(&def->list, 80 struct ionic_deferred_work, list); 81 list_del(&w->list); 82 } 83 spin_unlock_bh(&def->lock); 84 85 if (!w) 86 break; 87 88 switch (w->type) { 89 case IONIC_DW_TYPE_RX_MODE: 90 ionic_lif_rx_mode(lif); 91 break; 92 case IONIC_DW_TYPE_LINK_STATUS: 93 ionic_link_status_check(lif); 94 break; 95 case IONIC_DW_TYPE_LIF_RESET: 96 if (w->fw_status) 97 ionic_lif_handle_fw_up(lif); 98 else 99 ionic_lif_handle_fw_down(lif); 100 break; 101 default: 102 break; 103 } 104 kfree(w); 105 w = NULL; 106 } while (true); 107 } 108 109 void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 110 struct ionic_deferred_work *work) 111 { 112 spin_lock_bh(&def->lock); 113 list_add_tail(&work->list, &def->list); 114 spin_unlock_bh(&def->lock); 115 schedule_work(&def->work); 116 } 117 118 static void ionic_link_status_check(struct ionic_lif *lif) 119 { 120 struct net_device *netdev = lif->netdev; 121 u16 link_status; 122 bool link_up; 123 124 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 125 return; 126 127 /* Don't put carrier back up if we're in a broken state */ 128 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { 129 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 130 return; 131 } 132 133 link_status = le16_to_cpu(lif->info->status.link_status); 134 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 135 136 if (link_up) { 137 int err = 0; 138 139 if (netdev->flags & IFF_UP && netif_running(netdev)) { 140 mutex_lock(&lif->queue_lock); 141 err = ionic_start_queues(lif); 142 if (err && err != -EBUSY) { 143 netdev_err(lif->netdev, 144 "Failed to start queues: %d\n", err); 145 set_bit(IONIC_LIF_F_BROKEN, lif->state); 146 netif_carrier_off(lif->netdev); 147 } 148 mutex_unlock(&lif->queue_lock); 149 } 150 151 if (!err && !netif_carrier_ok(netdev)) { 152 ionic_port_identify(lif->ionic); 153 netdev_info(netdev, "Link up - %d Gbps\n", 154 le32_to_cpu(lif->info->status.link_speed) / 1000); 155 netif_carrier_on(netdev); 156 } 157 } else { 158 if (netif_carrier_ok(netdev)) { 159 netdev_info(netdev, "Link down\n"); 160 netif_carrier_off(netdev); 161 } 162 163 if (netdev->flags & IFF_UP && netif_running(netdev)) { 164 mutex_lock(&lif->queue_lock); 165 ionic_stop_queues(lif); 166 mutex_unlock(&lif->queue_lock); 167 } 168 } 169 170 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 171 } 172 173 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) 174 { 175 struct ionic_deferred_work *work; 176 177 /* we only need one request outstanding at a time */ 178 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 179 return; 180 181 if (!can_sleep) { 182 work = kzalloc(sizeof(*work), GFP_ATOMIC); 183 if (!work) { 184 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 185 return; 186 } 187 188 work->type = IONIC_DW_TYPE_LINK_STATUS; 189 ionic_lif_deferred_enqueue(&lif->deferred, work); 190 } else { 191 ionic_link_status_check(lif); 192 } 193 } 194 195 static irqreturn_t ionic_isr(int irq, void *data) 196 { 197 struct napi_struct *napi = data; 198 199 napi_schedule_irqoff(napi); 200 201 return IRQ_HANDLED; 202 } 203 204 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 205 { 206 struct ionic_intr_info *intr = &qcq->intr; 207 struct device *dev = lif->ionic->dev; 208 struct ionic_queue *q = &qcq->q; 209 const char *name; 210 211 if (lif->registered) 212 name = lif->netdev->name; 213 else 214 name = dev_name(dev); 215 216 snprintf(intr->name, sizeof(intr->name), 217 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 218 219 return devm_request_irq(dev, intr->vector, ionic_isr, 220 0, intr->name, &qcq->napi); 221 } 222 223 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 224 { 225 struct ionic *ionic = lif->ionic; 226 int index; 227 228 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 229 if (index == ionic->nintrs) { 230 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 231 __func__, index, ionic->nintrs); 232 return -ENOSPC; 233 } 234 235 set_bit(index, ionic->intrs); 236 ionic_intr_init(&ionic->idev, intr, index); 237 238 return 0; 239 } 240 241 static void ionic_intr_free(struct ionic *ionic, int index) 242 { 243 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) 244 clear_bit(index, ionic->intrs); 245 } 246 247 static int ionic_qcq_enable(struct ionic_qcq *qcq) 248 { 249 struct ionic_queue *q = &qcq->q; 250 struct ionic_lif *lif = q->lif; 251 struct ionic_dev *idev; 252 struct device *dev; 253 254 struct ionic_admin_ctx ctx = { 255 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 256 .cmd.q_control = { 257 .opcode = IONIC_CMD_Q_CONTROL, 258 .lif_index = cpu_to_le16(lif->index), 259 .type = q->type, 260 .index = cpu_to_le32(q->index), 261 .oper = IONIC_Q_ENABLE, 262 }, 263 }; 264 265 idev = &lif->ionic->idev; 266 dev = lif->ionic->dev; 267 268 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 269 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 270 271 if (qcq->flags & IONIC_QCQ_F_INTR) { 272 irq_set_affinity_hint(qcq->intr.vector, 273 &qcq->intr.affinity_mask); 274 napi_enable(&qcq->napi); 275 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 276 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 277 IONIC_INTR_MASK_CLEAR); 278 } 279 280 return ionic_adminq_post_wait(lif, &ctx); 281 } 282 283 static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) 284 { 285 struct ionic_queue *q; 286 struct ionic_lif *lif; 287 int err = 0; 288 289 struct ionic_admin_ctx ctx = { 290 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 291 .cmd.q_control = { 292 .opcode = IONIC_CMD_Q_CONTROL, 293 .oper = IONIC_Q_DISABLE, 294 }, 295 }; 296 297 if (!qcq) 298 return -ENXIO; 299 300 q = &qcq->q; 301 lif = q->lif; 302 303 if (qcq->flags & IONIC_QCQ_F_INTR) { 304 struct ionic_dev *idev = &lif->ionic->idev; 305 306 cancel_work_sync(&qcq->dim.work); 307 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 308 IONIC_INTR_MASK_SET); 309 synchronize_irq(qcq->intr.vector); 310 irq_set_affinity_hint(qcq->intr.vector, NULL); 311 napi_disable(&qcq->napi); 312 } 313 314 if (send_to_hw) { 315 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); 316 ctx.cmd.q_control.type = q->type; 317 ctx.cmd.q_control.index = cpu_to_le32(q->index); 318 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", 319 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 320 321 err = ionic_adminq_post_wait(lif, &ctx); 322 } 323 324 return err; 325 } 326 327 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 328 { 329 struct ionic_dev *idev = &lif->ionic->idev; 330 331 if (!qcq) 332 return; 333 334 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 335 return; 336 337 if (qcq->flags & IONIC_QCQ_F_INTR) { 338 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 339 IONIC_INTR_MASK_SET); 340 netif_napi_del(&qcq->napi); 341 } 342 343 qcq->flags &= ~IONIC_QCQ_F_INITED; 344 } 345 346 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 347 { 348 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) 349 return; 350 351 irq_set_affinity_hint(qcq->intr.vector, NULL); 352 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); 353 qcq->intr.vector = 0; 354 ionic_intr_free(lif->ionic, qcq->intr.index); 355 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 356 } 357 358 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 359 { 360 struct device *dev = lif->ionic->dev; 361 362 if (!qcq) 363 return; 364 365 ionic_debugfs_del_qcq(qcq); 366 367 if (qcq->q_base) { 368 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); 369 qcq->q_base = NULL; 370 qcq->q_base_pa = 0; 371 } 372 373 if (qcq->cq_base) { 374 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); 375 qcq->cq_base = NULL; 376 qcq->cq_base_pa = 0; 377 } 378 379 if (qcq->sg_base) { 380 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); 381 qcq->sg_base = NULL; 382 qcq->sg_base_pa = 0; 383 } 384 385 ionic_qcq_intr_free(lif, qcq); 386 387 if (qcq->cq.info) { 388 devm_kfree(dev, qcq->cq.info); 389 qcq->cq.info = NULL; 390 } 391 if (qcq->q.info) { 392 devm_kfree(dev, qcq->q.info); 393 qcq->q.info = NULL; 394 } 395 } 396 397 static void ionic_qcqs_free(struct ionic_lif *lif) 398 { 399 struct device *dev = lif->ionic->dev; 400 struct ionic_qcq *adminqcq; 401 unsigned long irqflags; 402 403 if (lif->notifyqcq) { 404 ionic_qcq_free(lif, lif->notifyqcq); 405 devm_kfree(dev, lif->notifyqcq); 406 lif->notifyqcq = NULL; 407 } 408 409 if (lif->adminqcq) { 410 spin_lock_irqsave(&lif->adminq_lock, irqflags); 411 adminqcq = READ_ONCE(lif->adminqcq); 412 lif->adminqcq = NULL; 413 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 414 if (adminqcq) { 415 ionic_qcq_free(lif, adminqcq); 416 devm_kfree(dev, adminqcq); 417 } 418 } 419 420 if (lif->rxqcqs) { 421 devm_kfree(dev, lif->rxqstats); 422 lif->rxqstats = NULL; 423 devm_kfree(dev, lif->rxqcqs); 424 lif->rxqcqs = NULL; 425 } 426 427 if (lif->txqcqs) { 428 devm_kfree(dev, lif->txqstats); 429 lif->txqstats = NULL; 430 devm_kfree(dev, lif->txqcqs); 431 lif->txqcqs = NULL; 432 } 433 } 434 435 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 436 struct ionic_qcq *n_qcq) 437 { 438 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { 439 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index); 440 n_qcq->flags &= ~IONIC_QCQ_F_INTR; 441 } 442 443 n_qcq->intr.vector = src_qcq->intr.vector; 444 n_qcq->intr.index = src_qcq->intr.index; 445 } 446 447 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) 448 { 449 int err; 450 451 if (!(qcq->flags & IONIC_QCQ_F_INTR)) { 452 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 453 return 0; 454 } 455 456 err = ionic_intr_alloc(lif, &qcq->intr); 457 if (err) { 458 netdev_warn(lif->netdev, "no intr for %s: %d\n", 459 qcq->q.name, err); 460 goto err_out; 461 } 462 463 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index); 464 if (err < 0) { 465 netdev_warn(lif->netdev, "no vector for %s: %d\n", 466 qcq->q.name, err); 467 goto err_out_free_intr; 468 } 469 qcq->intr.vector = err; 470 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, 471 IONIC_INTR_MASK_SET); 472 473 err = ionic_request_irq(lif, qcq); 474 if (err) { 475 netdev_warn(lif->netdev, "irq request failed %d\n", err); 476 goto err_out_free_intr; 477 } 478 479 /* try to get the irq on the local numa node first */ 480 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index, 481 dev_to_node(lif->ionic->dev)); 482 if (qcq->intr.cpu != -1) 483 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask); 484 485 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); 486 return 0; 487 488 err_out_free_intr: 489 ionic_intr_free(lif->ionic, qcq->intr.index); 490 err_out: 491 return err; 492 } 493 494 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 495 unsigned int index, 496 const char *name, unsigned int flags, 497 unsigned int num_descs, unsigned int desc_size, 498 unsigned int cq_desc_size, 499 unsigned int sg_desc_size, 500 unsigned int pid, struct ionic_qcq **qcq) 501 { 502 struct ionic_dev *idev = &lif->ionic->idev; 503 struct device *dev = lif->ionic->dev; 504 void *q_base, *cq_base, *sg_base; 505 dma_addr_t cq_base_pa = 0; 506 dma_addr_t sg_base_pa = 0; 507 dma_addr_t q_base_pa = 0; 508 struct ionic_qcq *new; 509 int err; 510 511 *qcq = NULL; 512 513 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 514 if (!new) { 515 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 516 err = -ENOMEM; 517 goto err_out; 518 } 519 520 new->q.dev = dev; 521 new->flags = flags; 522 523 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info), 524 GFP_KERNEL); 525 if (!new->q.info) { 526 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 527 err = -ENOMEM; 528 goto err_out_free_qcq; 529 } 530 531 new->q.type = type; 532 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; 533 534 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 535 desc_size, sg_desc_size, pid); 536 if (err) { 537 netdev_err(lif->netdev, "Cannot initialize queue\n"); 538 goto err_out_free_q_info; 539 } 540 541 err = ionic_alloc_qcq_interrupt(lif, new); 542 if (err) 543 goto err_out; 544 545 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info), 546 GFP_KERNEL); 547 if (!new->cq.info) { 548 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 549 err = -ENOMEM; 550 goto err_out_free_irq; 551 } 552 553 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 554 if (err) { 555 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 556 goto err_out_free_cq_info; 557 } 558 559 if (flags & IONIC_QCQ_F_NOTIFYQ) { 560 int q_size, cq_size; 561 562 /* q & cq need to be contiguous in case of notifyq */ 563 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); 564 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE); 565 566 new->q_size = PAGE_SIZE + q_size + cq_size; 567 new->q_base = dma_alloc_coherent(dev, new->q_size, 568 &new->q_base_pa, GFP_KERNEL); 569 if (!new->q_base) { 570 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); 571 err = -ENOMEM; 572 goto err_out_free_cq_info; 573 } 574 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 575 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 576 ionic_q_map(&new->q, q_base, q_base_pa); 577 578 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE); 579 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); 580 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 581 ionic_cq_bind(&new->cq, &new->q); 582 } else { 583 new->q_size = PAGE_SIZE + (num_descs * desc_size); 584 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, 585 GFP_KERNEL); 586 if (!new->q_base) { 587 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 588 err = -ENOMEM; 589 goto err_out_free_cq_info; 590 } 591 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 592 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 593 ionic_q_map(&new->q, q_base, q_base_pa); 594 595 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); 596 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, 597 GFP_KERNEL); 598 if (!new->cq_base) { 599 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); 600 err = -ENOMEM; 601 goto err_out_free_q; 602 } 603 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); 604 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); 605 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 606 ionic_cq_bind(&new->cq, &new->q); 607 } 608 609 if (flags & IONIC_QCQ_F_SG) { 610 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); 611 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, 612 GFP_KERNEL); 613 if (!new->sg_base) { 614 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); 615 err = -ENOMEM; 616 goto err_out_free_cq; 617 } 618 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); 619 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); 620 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 621 } 622 623 INIT_WORK(&new->dim.work, ionic_dim_work); 624 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 625 626 *qcq = new; 627 628 return 0; 629 630 err_out_free_cq: 631 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); 632 err_out_free_q: 633 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); 634 err_out_free_cq_info: 635 devm_kfree(dev, new->cq.info); 636 err_out_free_irq: 637 if (flags & IONIC_QCQ_F_INTR) { 638 devm_free_irq(dev, new->intr.vector, &new->napi); 639 ionic_intr_free(lif->ionic, new->intr.index); 640 } 641 err_out_free_q_info: 642 devm_kfree(dev, new->q.info); 643 err_out_free_qcq: 644 devm_kfree(dev, new); 645 err_out: 646 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 647 return err; 648 } 649 650 static int ionic_qcqs_alloc(struct ionic_lif *lif) 651 { 652 struct device *dev = lif->ionic->dev; 653 unsigned int flags; 654 int err; 655 656 flags = IONIC_QCQ_F_INTR; 657 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 658 IONIC_ADMINQ_LENGTH, 659 sizeof(struct ionic_admin_cmd), 660 sizeof(struct ionic_admin_comp), 661 0, lif->kern_pid, &lif->adminqcq); 662 if (err) 663 return err; 664 ionic_debugfs_add_qcq(lif, lif->adminqcq); 665 666 if (lif->ionic->nnqs_per_lif) { 667 flags = IONIC_QCQ_F_NOTIFYQ; 668 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 669 flags, IONIC_NOTIFYQ_LENGTH, 670 sizeof(struct ionic_notifyq_cmd), 671 sizeof(union ionic_notifyq_comp), 672 0, lif->kern_pid, &lif->notifyqcq); 673 if (err) 674 goto err_out; 675 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 676 677 /* Let the notifyq ride on the adminq interrupt */ 678 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 679 } 680 681 err = -ENOMEM; 682 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 683 sizeof(*lif->txqcqs), GFP_KERNEL); 684 if (!lif->txqcqs) 685 goto err_out; 686 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 687 sizeof(*lif->rxqcqs), GFP_KERNEL); 688 if (!lif->rxqcqs) 689 goto err_out; 690 691 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1, 692 sizeof(*lif->txqstats), GFP_KERNEL); 693 if (!lif->txqstats) 694 goto err_out; 695 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1, 696 sizeof(*lif->rxqstats), GFP_KERNEL); 697 if (!lif->rxqstats) 698 goto err_out; 699 700 return 0; 701 702 err_out: 703 ionic_qcqs_free(lif); 704 return err; 705 } 706 707 static void ionic_qcq_sanitize(struct ionic_qcq *qcq) 708 { 709 qcq->q.tail_idx = 0; 710 qcq->q.head_idx = 0; 711 qcq->cq.tail_idx = 0; 712 qcq->cq.done_color = 1; 713 memset(qcq->q_base, 0, qcq->q_size); 714 memset(qcq->cq_base, 0, qcq->cq_size); 715 memset(qcq->sg_base, 0, qcq->sg_size); 716 } 717 718 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 719 { 720 struct device *dev = lif->ionic->dev; 721 struct ionic_queue *q = &qcq->q; 722 struct ionic_cq *cq = &qcq->cq; 723 struct ionic_admin_ctx ctx = { 724 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 725 .cmd.q_init = { 726 .opcode = IONIC_CMD_Q_INIT, 727 .lif_index = cpu_to_le16(lif->index), 728 .type = q->type, 729 .ver = lif->qtype_info[q->type].version, 730 .index = cpu_to_le32(q->index), 731 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 732 IONIC_QINIT_F_SG), 733 .pid = cpu_to_le16(q->pid), 734 .ring_size = ilog2(q->num_descs), 735 .ring_base = cpu_to_le64(q->base_pa), 736 .cq_ring_base = cpu_to_le64(cq->base_pa), 737 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 738 .features = cpu_to_le64(q->features), 739 }, 740 }; 741 unsigned int intr_index; 742 int err; 743 744 intr_index = qcq->intr.index; 745 746 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); 747 748 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 749 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 750 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 751 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 752 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 753 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 754 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 755 756 ionic_qcq_sanitize(qcq); 757 758 err = ionic_adminq_post_wait(lif, &ctx); 759 if (err) 760 return err; 761 762 q->hw_type = ctx.comp.q_init.hw_type; 763 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 764 q->dbval = IONIC_DBELL_QID(q->hw_index); 765 766 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 767 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 768 769 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 770 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi, 771 NAPI_POLL_WEIGHT); 772 773 qcq->flags |= IONIC_QCQ_F_INITED; 774 775 return 0; 776 } 777 778 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 779 { 780 struct device *dev = lif->ionic->dev; 781 struct ionic_queue *q = &qcq->q; 782 struct ionic_cq *cq = &qcq->cq; 783 struct ionic_admin_ctx ctx = { 784 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 785 .cmd.q_init = { 786 .opcode = IONIC_CMD_Q_INIT, 787 .lif_index = cpu_to_le16(lif->index), 788 .type = q->type, 789 .ver = lif->qtype_info[q->type].version, 790 .index = cpu_to_le32(q->index), 791 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 792 IONIC_QINIT_F_SG), 793 .intr_index = cpu_to_le16(cq->bound_intr->index), 794 .pid = cpu_to_le16(q->pid), 795 .ring_size = ilog2(q->num_descs), 796 .ring_base = cpu_to_le64(q->base_pa), 797 .cq_ring_base = cpu_to_le64(cq->base_pa), 798 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 799 .features = cpu_to_le64(q->features), 800 }, 801 }; 802 int err; 803 804 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 805 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 806 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 807 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 808 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 809 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 810 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 811 812 ionic_qcq_sanitize(qcq); 813 814 err = ionic_adminq_post_wait(lif, &ctx); 815 if (err) 816 return err; 817 818 q->hw_type = ctx.comp.q_init.hw_type; 819 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 820 q->dbval = IONIC_DBELL_QID(q->hw_index); 821 822 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 823 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 824 825 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 826 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, 827 NAPI_POLL_WEIGHT); 828 else 829 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi, 830 NAPI_POLL_WEIGHT); 831 832 qcq->flags |= IONIC_QCQ_F_INITED; 833 834 return 0; 835 } 836 837 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) 838 { 839 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 840 unsigned int txq_i, flags; 841 struct ionic_qcq *txq; 842 u64 features; 843 int err; 844 845 mutex_lock(&lif->queue_lock); 846 847 if (lif->hwstamp_txq) 848 goto out; 849 850 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; 851 852 num_desc = IONIC_MIN_TXRX_DESC; 853 desc_sz = sizeof(struct ionic_txq_desc); 854 comp_sz = 2 * sizeof(struct ionic_txq_comp); 855 856 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 857 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1)) 858 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 859 else 860 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 861 862 txq_i = lif->ionic->ntxqs_per_lif; 863 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 864 865 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, 866 num_desc, desc_sz, comp_sz, sg_desc_sz, 867 lif->kern_pid, &txq); 868 if (err) 869 goto err_qcq_alloc; 870 871 txq->q.features = features; 872 873 ionic_link_qcq_interrupts(lif->adminqcq, txq); 874 ionic_debugfs_add_qcq(lif, txq); 875 876 lif->hwstamp_txq = txq; 877 878 if (netif_running(lif->netdev)) { 879 err = ionic_lif_txq_init(lif, txq); 880 if (err) 881 goto err_qcq_init; 882 883 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 884 err = ionic_qcq_enable(txq); 885 if (err) 886 goto err_qcq_enable; 887 } 888 } 889 890 out: 891 mutex_unlock(&lif->queue_lock); 892 893 return 0; 894 895 err_qcq_enable: 896 ionic_lif_qcq_deinit(lif, txq); 897 err_qcq_init: 898 lif->hwstamp_txq = NULL; 899 ionic_debugfs_del_qcq(txq); 900 ionic_qcq_free(lif, txq); 901 devm_kfree(lif->ionic->dev, txq); 902 err_qcq_alloc: 903 mutex_unlock(&lif->queue_lock); 904 return err; 905 } 906 907 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) 908 { 909 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 910 unsigned int rxq_i, flags; 911 struct ionic_qcq *rxq; 912 u64 features; 913 int err; 914 915 mutex_lock(&lif->queue_lock); 916 917 if (lif->hwstamp_rxq) 918 goto out; 919 920 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 921 922 num_desc = IONIC_MIN_TXRX_DESC; 923 desc_sz = sizeof(struct ionic_rxq_desc); 924 comp_sz = 2 * sizeof(struct ionic_rxq_comp); 925 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 926 927 rxq_i = lif->ionic->nrxqs_per_lif; 928 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 929 930 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, 931 num_desc, desc_sz, comp_sz, sg_desc_sz, 932 lif->kern_pid, &rxq); 933 if (err) 934 goto err_qcq_alloc; 935 936 rxq->q.features = features; 937 938 ionic_link_qcq_interrupts(lif->adminqcq, rxq); 939 ionic_debugfs_add_qcq(lif, rxq); 940 941 lif->hwstamp_rxq = rxq; 942 943 if (netif_running(lif->netdev)) { 944 err = ionic_lif_rxq_init(lif, rxq); 945 if (err) 946 goto err_qcq_init; 947 948 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 949 ionic_rx_fill(&rxq->q); 950 err = ionic_qcq_enable(rxq); 951 if (err) 952 goto err_qcq_enable; 953 } 954 } 955 956 out: 957 mutex_unlock(&lif->queue_lock); 958 959 return 0; 960 961 err_qcq_enable: 962 ionic_lif_qcq_deinit(lif, rxq); 963 err_qcq_init: 964 lif->hwstamp_rxq = NULL; 965 ionic_debugfs_del_qcq(rxq); 966 ionic_qcq_free(lif, rxq); 967 devm_kfree(lif->ionic->dev, rxq); 968 err_qcq_alloc: 969 mutex_unlock(&lif->queue_lock); 970 return err; 971 } 972 973 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all) 974 { 975 struct ionic_queue_params qparam; 976 977 ionic_init_queue_params(lif, &qparam); 978 979 if (rx_all) 980 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 981 else 982 qparam.rxq_features = 0; 983 984 /* if we're not running, just set the values and return */ 985 if (!netif_running(lif->netdev)) { 986 lif->rxq_features = qparam.rxq_features; 987 return 0; 988 } 989 990 return ionic_reconfigure_queues(lif, &qparam); 991 } 992 993 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode) 994 { 995 struct ionic_admin_ctx ctx = { 996 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 997 .cmd.lif_setattr = { 998 .opcode = IONIC_CMD_LIF_SETATTR, 999 .index = cpu_to_le16(lif->index), 1000 .attr = IONIC_LIF_ATTR_TXSTAMP, 1001 .txstamp_mode = cpu_to_le16(txstamp_mode), 1002 }, 1003 }; 1004 1005 return ionic_adminq_post_wait(lif, &ctx); 1006 } 1007 1008 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif) 1009 { 1010 struct ionic_admin_ctx ctx = { 1011 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1012 .cmd.rx_filter_del = { 1013 .opcode = IONIC_CMD_RX_FILTER_DEL, 1014 .lif_index = cpu_to_le16(lif->index), 1015 }, 1016 }; 1017 struct ionic_rx_filter *f; 1018 u32 filter_id; 1019 int err; 1020 1021 spin_lock_bh(&lif->rx_filters.lock); 1022 1023 f = ionic_rx_filter_rxsteer(lif); 1024 if (!f) { 1025 spin_unlock_bh(&lif->rx_filters.lock); 1026 return; 1027 } 1028 1029 filter_id = f->filter_id; 1030 ionic_rx_filter_free(lif, f); 1031 1032 spin_unlock_bh(&lif->rx_filters.lock); 1033 1034 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id); 1035 1036 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id); 1037 1038 err = ionic_adminq_post_wait(lif, &ctx); 1039 if (err && err != -EEXIST) 1040 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id); 1041 } 1042 1043 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1044 { 1045 struct ionic_admin_ctx ctx = { 1046 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1047 .cmd.rx_filter_add = { 1048 .opcode = IONIC_CMD_RX_FILTER_ADD, 1049 .lif_index = cpu_to_le16(lif->index), 1050 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS), 1051 .pkt_class = cpu_to_le64(pkt_class), 1052 }, 1053 }; 1054 u8 qtype; 1055 u32 qid; 1056 int err; 1057 1058 if (!lif->hwstamp_rxq) 1059 return -EINVAL; 1060 1061 qtype = lif->hwstamp_rxq->q.type; 1062 ctx.cmd.rx_filter_add.qtype = qtype; 1063 1064 qid = lif->hwstamp_rxq->q.index; 1065 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid); 1066 1067 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n"); 1068 err = ionic_adminq_post_wait(lif, &ctx); 1069 if (err && err != -EEXIST) 1070 return err; 1071 1072 spin_lock_bh(&lif->rx_filters.lock); 1073 err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED); 1074 spin_unlock_bh(&lif->rx_filters.lock); 1075 1076 return err; 1077 } 1078 1079 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1080 { 1081 ionic_lif_del_hwstamp_rxfilt(lif); 1082 1083 if (!pkt_class) 1084 return 0; 1085 1086 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); 1087 } 1088 1089 static bool ionic_notifyq_service(struct ionic_cq *cq, 1090 struct ionic_cq_info *cq_info) 1091 { 1092 union ionic_notifyq_comp *comp = cq_info->cq_desc; 1093 struct ionic_deferred_work *work; 1094 struct net_device *netdev; 1095 struct ionic_queue *q; 1096 struct ionic_lif *lif; 1097 u64 eid; 1098 1099 q = cq->bound_q; 1100 lif = q->info[0].cb_arg; 1101 netdev = lif->netdev; 1102 eid = le64_to_cpu(comp->event.eid); 1103 1104 /* Have we run out of new completions to process? */ 1105 if ((s64)(eid - lif->last_eid) <= 0) 1106 return false; 1107 1108 lif->last_eid = eid; 1109 1110 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 1111 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 1112 comp, sizeof(*comp), true); 1113 1114 switch (le16_to_cpu(comp->event.ecode)) { 1115 case IONIC_EVENT_LINK_CHANGE: 1116 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 1117 break; 1118 case IONIC_EVENT_RESET: 1119 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1120 if (!work) { 1121 netdev_err(lif->netdev, "Reset event dropped\n"); 1122 } else { 1123 work->type = IONIC_DW_TYPE_LIF_RESET; 1124 ionic_lif_deferred_enqueue(&lif->deferred, work); 1125 } 1126 break; 1127 default: 1128 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", 1129 comp->event.ecode, eid); 1130 break; 1131 } 1132 1133 return true; 1134 } 1135 1136 static bool ionic_adminq_service(struct ionic_cq *cq, 1137 struct ionic_cq_info *cq_info) 1138 { 1139 struct ionic_admin_comp *comp = cq_info->cq_desc; 1140 1141 if (!color_match(comp->color, cq->done_color)) 1142 return false; 1143 1144 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 1145 1146 return true; 1147 } 1148 1149 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 1150 { 1151 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; 1152 struct ionic_lif *lif = napi_to_cq(napi)->lif; 1153 struct ionic_dev *idev = &lif->ionic->idev; 1154 unsigned long irqflags; 1155 unsigned int flags = 0; 1156 int rx_work = 0; 1157 int tx_work = 0; 1158 int n_work = 0; 1159 int a_work = 0; 1160 int work_done; 1161 int credits; 1162 1163 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) 1164 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, 1165 ionic_notifyq_service, NULL, NULL); 1166 1167 spin_lock_irqsave(&lif->adminq_lock, irqflags); 1168 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) 1169 a_work = ionic_cq_service(&lif->adminqcq->cq, budget, 1170 ionic_adminq_service, NULL, NULL); 1171 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 1172 1173 if (lif->hwstamp_rxq) 1174 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget, 1175 ionic_rx_service, NULL, NULL); 1176 1177 if (lif->hwstamp_txq) 1178 tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget, 1179 ionic_tx_service, NULL, NULL); 1180 1181 work_done = max(max(n_work, a_work), max(rx_work, tx_work)); 1182 if (work_done < budget && napi_complete_done(napi, work_done)) { 1183 flags |= IONIC_INTR_CRED_UNMASK; 1184 intr->rearm_count++; 1185 } 1186 1187 if (work_done || flags) { 1188 flags |= IONIC_INTR_CRED_RESET_COALESCE; 1189 credits = n_work + a_work + rx_work + tx_work; 1190 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); 1191 } 1192 1193 return work_done; 1194 } 1195 1196 void ionic_get_stats64(struct net_device *netdev, 1197 struct rtnl_link_stats64 *ns) 1198 { 1199 struct ionic_lif *lif = netdev_priv(netdev); 1200 struct ionic_lif_stats *ls; 1201 1202 memset(ns, 0, sizeof(*ns)); 1203 ls = &lif->info->stats; 1204 1205 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 1206 le64_to_cpu(ls->rx_mcast_packets) + 1207 le64_to_cpu(ls->rx_bcast_packets); 1208 1209 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 1210 le64_to_cpu(ls->tx_mcast_packets) + 1211 le64_to_cpu(ls->tx_bcast_packets); 1212 1213 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 1214 le64_to_cpu(ls->rx_mcast_bytes) + 1215 le64_to_cpu(ls->rx_bcast_bytes); 1216 1217 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 1218 le64_to_cpu(ls->tx_mcast_bytes) + 1219 le64_to_cpu(ls->tx_bcast_bytes); 1220 1221 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 1222 le64_to_cpu(ls->rx_mcast_drop_packets) + 1223 le64_to_cpu(ls->rx_bcast_drop_packets); 1224 1225 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 1226 le64_to_cpu(ls->tx_mcast_drop_packets) + 1227 le64_to_cpu(ls->tx_bcast_drop_packets); 1228 1229 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 1230 1231 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 1232 1233 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 1234 le64_to_cpu(ls->rx_queue_disabled) + 1235 le64_to_cpu(ls->rx_desc_fetch_error) + 1236 le64_to_cpu(ls->rx_desc_data_error); 1237 1238 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 1239 le64_to_cpu(ls->tx_queue_disabled) + 1240 le64_to_cpu(ls->tx_desc_fetch_error) + 1241 le64_to_cpu(ls->tx_desc_data_error); 1242 1243 ns->rx_errors = ns->rx_over_errors + 1244 ns->rx_missed_errors; 1245 1246 ns->tx_errors = ns->tx_aborted_errors; 1247 } 1248 1249 int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) 1250 { 1251 struct ionic_admin_ctx ctx = { 1252 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1253 .cmd.rx_filter_add = { 1254 .opcode = IONIC_CMD_RX_FILTER_ADD, 1255 .lif_index = cpu_to_le16(lif->index), 1256 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 1257 }, 1258 }; 1259 int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1260 bool mc = is_multicast_ether_addr(addr); 1261 struct ionic_rx_filter *f; 1262 int err = 0; 1263 1264 spin_lock_bh(&lif->rx_filters.lock); 1265 f = ionic_rx_filter_by_addr(lif, addr); 1266 if (f) { 1267 /* don't bother if we already have it and it is sync'd */ 1268 if (f->state == IONIC_FILTER_STATE_SYNCED) { 1269 spin_unlock_bh(&lif->rx_filters.lock); 1270 return 0; 1271 } 1272 1273 /* mark preemptively as sync'd to block any parallel attempts */ 1274 f->state = IONIC_FILTER_STATE_SYNCED; 1275 } else { 1276 /* save as SYNCED to catch any DEL requests while processing */ 1277 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 1278 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, 1279 IONIC_FILTER_STATE_SYNCED); 1280 } 1281 spin_unlock_bh(&lif->rx_filters.lock); 1282 if (err) 1283 return err; 1284 1285 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); 1286 1287 /* Don't bother with the write to FW if we know there's no room, 1288 * we can try again on the next sync attempt. 1289 */ 1290 if ((lif->nucast + lif->nmcast) >= nfilters) 1291 err = -ENOSPC; 1292 else 1293 err = ionic_adminq_post_wait(lif, &ctx); 1294 1295 spin_lock_bh(&lif->rx_filters.lock); 1296 if (err && err != -EEXIST) { 1297 /* set the state back to NEW so we can try again later */ 1298 f = ionic_rx_filter_by_addr(lif, addr); 1299 if (f && f->state == IONIC_FILTER_STATE_SYNCED) 1300 f->state = IONIC_FILTER_STATE_NEW; 1301 1302 spin_unlock_bh(&lif->rx_filters.lock); 1303 1304 if (err == -ENOSPC) 1305 return 0; 1306 else 1307 return err; 1308 } 1309 1310 if (mc) 1311 lif->nmcast++; 1312 else 1313 lif->nucast++; 1314 1315 f = ionic_rx_filter_by_addr(lif, addr); 1316 if (f && f->state == IONIC_FILTER_STATE_OLD) { 1317 /* Someone requested a delete while we were adding 1318 * so update the filter info with the results from the add 1319 * and the data will be there for the delete on the next 1320 * sync cycle. 1321 */ 1322 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, 1323 IONIC_FILTER_STATE_OLD); 1324 } else { 1325 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, 1326 IONIC_FILTER_STATE_SYNCED); 1327 } 1328 1329 spin_unlock_bh(&lif->rx_filters.lock); 1330 1331 return err; 1332 } 1333 1334 int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) 1335 { 1336 struct ionic_admin_ctx ctx = { 1337 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1338 .cmd.rx_filter_del = { 1339 .opcode = IONIC_CMD_RX_FILTER_DEL, 1340 .lif_index = cpu_to_le16(lif->index), 1341 }, 1342 }; 1343 struct ionic_rx_filter *f; 1344 int state; 1345 int err; 1346 1347 spin_lock_bh(&lif->rx_filters.lock); 1348 f = ionic_rx_filter_by_addr(lif, addr); 1349 if (!f) { 1350 spin_unlock_bh(&lif->rx_filters.lock); 1351 return -ENOENT; 1352 } 1353 1354 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", 1355 addr, f->filter_id); 1356 1357 state = f->state; 1358 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1359 ionic_rx_filter_free(lif, f); 1360 1361 if (is_multicast_ether_addr(addr) && lif->nmcast) 1362 lif->nmcast--; 1363 else if (!is_multicast_ether_addr(addr) && lif->nucast) 1364 lif->nucast--; 1365 1366 spin_unlock_bh(&lif->rx_filters.lock); 1367 1368 if (state != IONIC_FILTER_STATE_NEW) { 1369 err = ionic_adminq_post_wait(lif, &ctx); 1370 if (err && err != -EEXIST) 1371 return err; 1372 } 1373 1374 return 0; 1375 } 1376 1377 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 1378 { 1379 return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); 1380 } 1381 1382 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1383 { 1384 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR); 1385 } 1386 1387 void ionic_lif_rx_mode(struct ionic_lif *lif) 1388 { 1389 struct net_device *netdev = lif->netdev; 1390 unsigned int nfilters; 1391 unsigned int nd_flags; 1392 char buf[128]; 1393 u16 rx_mode; 1394 int i; 1395 #define REMAIN(__x) (sizeof(buf) - (__x)) 1396 1397 mutex_lock(&lif->config_lock); 1398 1399 /* grab the flags once for local use */ 1400 nd_flags = netdev->flags; 1401 1402 rx_mode = IONIC_RX_MODE_F_UNICAST; 1403 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1404 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1405 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1406 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1407 1408 /* sync the mac filters */ 1409 ionic_rx_filter_sync(lif); 1410 1411 /* check for overflow state 1412 * if so, we track that we overflowed and enable NIC PROMISC 1413 * else if the overflow is set and not needed 1414 * we remove our overflow flag and check the netdev flags 1415 * to see if we can disable NIC PROMISC 1416 */ 1417 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1418 if ((lif->nucast + lif->nmcast) >= nfilters) { 1419 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1420 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1421 lif->uc_overflow = true; 1422 lif->mc_overflow = true; 1423 } else if (lif->uc_overflow) { 1424 lif->uc_overflow = false; 1425 lif->mc_overflow = false; 1426 if (!(nd_flags & IFF_PROMISC)) 1427 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1428 if (!(nd_flags & IFF_ALLMULTI)) 1429 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1430 } 1431 1432 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 1433 lif->rx_mode, rx_mode); 1434 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 1435 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 1436 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 1437 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 1438 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1439 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1440 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1441 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1442 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1443 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1444 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER) 1445 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER"); 1446 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf); 1447 1448 if (lif->rx_mode != rx_mode) { 1449 struct ionic_admin_ctx ctx = { 1450 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1451 .cmd.rx_mode_set = { 1452 .opcode = IONIC_CMD_RX_MODE_SET, 1453 .lif_index = cpu_to_le16(lif->index), 1454 }, 1455 }; 1456 int err; 1457 1458 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode); 1459 err = ionic_adminq_post_wait(lif, &ctx); 1460 if (err) 1461 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n", 1462 rx_mode, err); 1463 else 1464 lif->rx_mode = rx_mode; 1465 } 1466 1467 mutex_unlock(&lif->config_lock); 1468 } 1469 1470 static void ionic_ndo_set_rx_mode(struct net_device *netdev) 1471 { 1472 struct ionic_lif *lif = netdev_priv(netdev); 1473 struct ionic_deferred_work *work; 1474 1475 /* Sync the kernel filter list with the driver filter list */ 1476 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1477 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1478 1479 /* Shove off the rest of the rxmode work to the work task 1480 * which will include syncing the filters to the firmware. 1481 */ 1482 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1483 if (!work) { 1484 netdev_err(lif->netdev, "rxmode change dropped\n"); 1485 return; 1486 } 1487 work->type = IONIC_DW_TYPE_RX_MODE; 1488 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1489 ionic_lif_deferred_enqueue(&lif->deferred, work); 1490 } 1491 1492 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1493 { 1494 u64 wanted = 0; 1495 1496 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1497 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1498 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1499 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1500 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1501 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1502 if (features & NETIF_F_RXHASH) 1503 wanted |= IONIC_ETH_HW_RX_HASH; 1504 if (features & NETIF_F_RXCSUM) 1505 wanted |= IONIC_ETH_HW_RX_CSUM; 1506 if (features & NETIF_F_SG) 1507 wanted |= IONIC_ETH_HW_TX_SG; 1508 if (features & NETIF_F_HW_CSUM) 1509 wanted |= IONIC_ETH_HW_TX_CSUM; 1510 if (features & NETIF_F_TSO) 1511 wanted |= IONIC_ETH_HW_TSO; 1512 if (features & NETIF_F_TSO6) 1513 wanted |= IONIC_ETH_HW_TSO_IPV6; 1514 if (features & NETIF_F_TSO_ECN) 1515 wanted |= IONIC_ETH_HW_TSO_ECN; 1516 if (features & NETIF_F_GSO_GRE) 1517 wanted |= IONIC_ETH_HW_TSO_GRE; 1518 if (features & NETIF_F_GSO_GRE_CSUM) 1519 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1520 if (features & NETIF_F_GSO_IPXIP4) 1521 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1522 if (features & NETIF_F_GSO_IPXIP6) 1523 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1524 if (features & NETIF_F_GSO_UDP_TUNNEL) 1525 wanted |= IONIC_ETH_HW_TSO_UDP; 1526 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1527 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1528 1529 return cpu_to_le64(wanted); 1530 } 1531 1532 static int ionic_set_nic_features(struct ionic_lif *lif, 1533 netdev_features_t features) 1534 { 1535 struct device *dev = lif->ionic->dev; 1536 struct ionic_admin_ctx ctx = { 1537 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1538 .cmd.lif_setattr = { 1539 .opcode = IONIC_CMD_LIF_SETATTR, 1540 .index = cpu_to_le16(lif->index), 1541 .attr = IONIC_LIF_ATTR_FEATURES, 1542 }, 1543 }; 1544 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1545 IONIC_ETH_HW_VLAN_RX_STRIP | 1546 IONIC_ETH_HW_VLAN_RX_FILTER; 1547 u64 old_hw_features; 1548 int err; 1549 1550 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1551 1552 if (lif->phc) 1553 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP); 1554 1555 err = ionic_adminq_post_wait(lif, &ctx); 1556 if (err) 1557 return err; 1558 1559 old_hw_features = lif->hw_features; 1560 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1561 ctx.comp.lif_setattr.features); 1562 1563 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1564 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1565 1566 if ((vlan_flags & features) && 1567 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1568 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1569 1570 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1571 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1572 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1573 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1574 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1575 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1576 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1577 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1578 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1579 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1580 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1581 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1582 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1583 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1584 if (lif->hw_features & IONIC_ETH_HW_TSO) 1585 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1586 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1587 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1588 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1589 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1590 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1591 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1592 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1593 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1594 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1595 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1596 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1597 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1598 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1599 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1600 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1601 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1602 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP) 1603 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n"); 1604 1605 return 0; 1606 } 1607 1608 static int ionic_init_nic_features(struct ionic_lif *lif) 1609 { 1610 struct net_device *netdev = lif->netdev; 1611 netdev_features_t features; 1612 int err; 1613 1614 /* set up what we expect to support by default */ 1615 features = NETIF_F_HW_VLAN_CTAG_TX | 1616 NETIF_F_HW_VLAN_CTAG_RX | 1617 NETIF_F_HW_VLAN_CTAG_FILTER | 1618 NETIF_F_SG | 1619 NETIF_F_HW_CSUM | 1620 NETIF_F_RXCSUM | 1621 NETIF_F_TSO | 1622 NETIF_F_TSO6 | 1623 NETIF_F_TSO_ECN; 1624 1625 if (lif->nxqs > 1) 1626 features |= NETIF_F_RXHASH; 1627 1628 err = ionic_set_nic_features(lif, features); 1629 if (err) 1630 return err; 1631 1632 /* tell the netdev what we actually can support */ 1633 netdev->features |= NETIF_F_HIGHDMA; 1634 1635 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1636 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1637 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1638 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1639 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1640 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1641 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1642 netdev->hw_features |= NETIF_F_RXHASH; 1643 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1644 netdev->hw_features |= NETIF_F_SG; 1645 1646 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1647 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1648 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1649 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1650 if (lif->hw_features & IONIC_ETH_HW_TSO) 1651 netdev->hw_enc_features |= NETIF_F_TSO; 1652 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1653 netdev->hw_enc_features |= NETIF_F_TSO6; 1654 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1655 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1656 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1657 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1658 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1659 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1660 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1661 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1662 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1663 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1664 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1665 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1666 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1667 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1668 1669 netdev->hw_features |= netdev->hw_enc_features; 1670 netdev->features |= netdev->hw_features; 1671 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1672 1673 netdev->priv_flags |= IFF_UNICAST_FLT | 1674 IFF_LIVE_ADDR_CHANGE; 1675 1676 return 0; 1677 } 1678 1679 static int ionic_set_features(struct net_device *netdev, 1680 netdev_features_t features) 1681 { 1682 struct ionic_lif *lif = netdev_priv(netdev); 1683 int err; 1684 1685 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1686 __func__, (u64)lif->netdev->features, (u64)features); 1687 1688 err = ionic_set_nic_features(lif, features); 1689 1690 return err; 1691 } 1692 1693 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1694 { 1695 struct sockaddr *addr = sa; 1696 u8 *mac; 1697 int err; 1698 1699 mac = (u8 *)addr->sa_data; 1700 if (ether_addr_equal(netdev->dev_addr, mac)) 1701 return 0; 1702 1703 err = eth_prepare_mac_addr_change(netdev, addr); 1704 if (err) 1705 return err; 1706 1707 if (!is_zero_ether_addr(netdev->dev_addr)) { 1708 netdev_info(netdev, "deleting mac addr %pM\n", 1709 netdev->dev_addr); 1710 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr); 1711 } 1712 1713 eth_commit_mac_addr_change(netdev, addr); 1714 netdev_info(netdev, "updating mac addr %pM\n", mac); 1715 1716 return ionic_lif_addr_add(netdev_priv(netdev), mac); 1717 } 1718 1719 static void ionic_stop_queues_reconfig(struct ionic_lif *lif) 1720 { 1721 /* Stop and clean the queues before reconfiguration */ 1722 mutex_lock(&lif->queue_lock); 1723 netif_device_detach(lif->netdev); 1724 ionic_stop_queues(lif); 1725 ionic_txrx_deinit(lif); 1726 } 1727 1728 static int ionic_start_queues_reconfig(struct ionic_lif *lif) 1729 { 1730 int err; 1731 1732 /* Re-init the queues after reconfiguration */ 1733 1734 /* The only way txrx_init can fail here is if communication 1735 * with FW is suddenly broken. There's not much we can do 1736 * at this point - error messages have already been printed, 1737 * so we can continue on and the user can eventually do a 1738 * DOWN and UP to try to reset and clear the issue. 1739 */ 1740 err = ionic_txrx_init(lif); 1741 mutex_unlock(&lif->queue_lock); 1742 ionic_link_status_check_request(lif, CAN_SLEEP); 1743 netif_device_attach(lif->netdev); 1744 1745 return err; 1746 } 1747 1748 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1749 { 1750 struct ionic_lif *lif = netdev_priv(netdev); 1751 struct ionic_admin_ctx ctx = { 1752 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1753 .cmd.lif_setattr = { 1754 .opcode = IONIC_CMD_LIF_SETATTR, 1755 .index = cpu_to_le16(lif->index), 1756 .attr = IONIC_LIF_ATTR_MTU, 1757 .mtu = cpu_to_le32(new_mtu), 1758 }, 1759 }; 1760 int err; 1761 1762 err = ionic_adminq_post_wait(lif, &ctx); 1763 if (err) 1764 return err; 1765 1766 /* if we're not running, nothing more to do */ 1767 if (!netif_running(netdev)) { 1768 netdev->mtu = new_mtu; 1769 return 0; 1770 } 1771 1772 ionic_stop_queues_reconfig(lif); 1773 netdev->mtu = new_mtu; 1774 return ionic_start_queues_reconfig(lif); 1775 } 1776 1777 static void ionic_tx_timeout_work(struct work_struct *ws) 1778 { 1779 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1780 1781 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1782 return; 1783 1784 /* if we were stopped before this scheduled job was launched, 1785 * don't bother the queues as they are already stopped. 1786 */ 1787 if (!netif_running(lif->netdev)) 1788 return; 1789 1790 ionic_stop_queues_reconfig(lif); 1791 ionic_start_queues_reconfig(lif); 1792 } 1793 1794 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1795 { 1796 struct ionic_lif *lif = netdev_priv(netdev); 1797 1798 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); 1799 schedule_work(&lif->tx_timeout_work); 1800 } 1801 1802 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1803 u16 vid) 1804 { 1805 struct ionic_lif *lif = netdev_priv(netdev); 1806 struct ionic_admin_ctx ctx = { 1807 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1808 .cmd.rx_filter_add = { 1809 .opcode = IONIC_CMD_RX_FILTER_ADD, 1810 .lif_index = cpu_to_le16(lif->index), 1811 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), 1812 .vlan.vlan = cpu_to_le16(vid), 1813 }, 1814 }; 1815 int err; 1816 1817 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); 1818 err = ionic_adminq_post_wait(lif, &ctx); 1819 if (err) 1820 return err; 1821 1822 spin_lock_bh(&lif->rx_filters.lock); 1823 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, 1824 IONIC_FILTER_STATE_SYNCED); 1825 spin_unlock_bh(&lif->rx_filters.lock); 1826 1827 return err; 1828 } 1829 1830 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1831 u16 vid) 1832 { 1833 struct ionic_lif *lif = netdev_priv(netdev); 1834 struct ionic_admin_ctx ctx = { 1835 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1836 .cmd.rx_filter_del = { 1837 .opcode = IONIC_CMD_RX_FILTER_DEL, 1838 .lif_index = cpu_to_le16(lif->index), 1839 }, 1840 }; 1841 struct ionic_rx_filter *f; 1842 1843 spin_lock_bh(&lif->rx_filters.lock); 1844 1845 f = ionic_rx_filter_by_vlan(lif, vid); 1846 if (!f) { 1847 spin_unlock_bh(&lif->rx_filters.lock); 1848 return -ENOENT; 1849 } 1850 1851 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", 1852 vid, f->filter_id); 1853 1854 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1855 ionic_rx_filter_free(lif, f); 1856 spin_unlock_bh(&lif->rx_filters.lock); 1857 1858 return ionic_adminq_post_wait(lif, &ctx); 1859 } 1860 1861 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1862 const u8 *key, const u32 *indir) 1863 { 1864 struct ionic_admin_ctx ctx = { 1865 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1866 .cmd.lif_setattr = { 1867 .opcode = IONIC_CMD_LIF_SETATTR, 1868 .attr = IONIC_LIF_ATTR_RSS, 1869 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1870 }, 1871 }; 1872 unsigned int i, tbl_sz; 1873 1874 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1875 lif->rss_types = types; 1876 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1877 } 1878 1879 if (key) 1880 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1881 1882 if (indir) { 1883 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1884 for (i = 0; i < tbl_sz; i++) 1885 lif->rss_ind_tbl[i] = indir[i]; 1886 } 1887 1888 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1889 IONIC_RSS_HASH_KEY_SIZE); 1890 1891 return ionic_adminq_post_wait(lif, &ctx); 1892 } 1893 1894 static int ionic_lif_rss_init(struct ionic_lif *lif) 1895 { 1896 unsigned int tbl_sz; 1897 unsigned int i; 1898 1899 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1900 IONIC_RSS_TYPE_IPV4_TCP | 1901 IONIC_RSS_TYPE_IPV4_UDP | 1902 IONIC_RSS_TYPE_IPV6 | 1903 IONIC_RSS_TYPE_IPV6_TCP | 1904 IONIC_RSS_TYPE_IPV6_UDP; 1905 1906 /* Fill indirection table with 'default' values */ 1907 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1908 for (i = 0; i < tbl_sz; i++) 1909 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1910 1911 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1912 } 1913 1914 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1915 { 1916 int tbl_sz; 1917 1918 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1919 memset(lif->rss_ind_tbl, 0, tbl_sz); 1920 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1921 1922 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1923 } 1924 1925 static void ionic_lif_quiesce(struct ionic_lif *lif) 1926 { 1927 struct ionic_admin_ctx ctx = { 1928 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1929 .cmd.lif_setattr = { 1930 .opcode = IONIC_CMD_LIF_SETATTR, 1931 .index = cpu_to_le16(lif->index), 1932 .attr = IONIC_LIF_ATTR_STATE, 1933 .state = IONIC_LIF_QUIESCE, 1934 }, 1935 }; 1936 int err; 1937 1938 err = ionic_adminq_post_wait(lif, &ctx); 1939 if (err) 1940 netdev_err(lif->netdev, "lif quiesce failed %d\n", err); 1941 } 1942 1943 static void ionic_txrx_disable(struct ionic_lif *lif) 1944 { 1945 unsigned int i; 1946 int err = 0; 1947 1948 if (lif->txqcqs) { 1949 for (i = 0; i < lif->nxqs; i++) 1950 err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT)); 1951 } 1952 1953 if (lif->hwstamp_txq) 1954 err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT)); 1955 1956 if (lif->rxqcqs) { 1957 for (i = 0; i < lif->nxqs; i++) 1958 err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); 1959 } 1960 1961 if (lif->hwstamp_rxq) 1962 err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT)); 1963 1964 ionic_lif_quiesce(lif); 1965 } 1966 1967 static void ionic_txrx_deinit(struct ionic_lif *lif) 1968 { 1969 unsigned int i; 1970 1971 if (lif->txqcqs) { 1972 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { 1973 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1974 ionic_tx_flush(&lif->txqcqs[i]->cq); 1975 ionic_tx_empty(&lif->txqcqs[i]->q); 1976 } 1977 } 1978 1979 if (lif->rxqcqs) { 1980 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { 1981 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 1982 ionic_rx_empty(&lif->rxqcqs[i]->q); 1983 } 1984 } 1985 lif->rx_mode = 0; 1986 1987 if (lif->hwstamp_txq) { 1988 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq); 1989 ionic_tx_flush(&lif->hwstamp_txq->cq); 1990 ionic_tx_empty(&lif->hwstamp_txq->q); 1991 } 1992 1993 if (lif->hwstamp_rxq) { 1994 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq); 1995 ionic_rx_empty(&lif->hwstamp_rxq->q); 1996 } 1997 } 1998 1999 static void ionic_txrx_free(struct ionic_lif *lif) 2000 { 2001 unsigned int i; 2002 2003 if (lif->txqcqs) { 2004 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { 2005 ionic_qcq_free(lif, lif->txqcqs[i]); 2006 devm_kfree(lif->ionic->dev, lif->txqcqs[i]); 2007 lif->txqcqs[i] = NULL; 2008 } 2009 } 2010 2011 if (lif->rxqcqs) { 2012 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 2013 ionic_qcq_free(lif, lif->rxqcqs[i]); 2014 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); 2015 lif->rxqcqs[i] = NULL; 2016 } 2017 } 2018 2019 if (lif->hwstamp_txq) { 2020 ionic_qcq_free(lif, lif->hwstamp_txq); 2021 devm_kfree(lif->ionic->dev, lif->hwstamp_txq); 2022 lif->hwstamp_txq = NULL; 2023 } 2024 2025 if (lif->hwstamp_rxq) { 2026 ionic_qcq_free(lif, lif->hwstamp_rxq); 2027 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq); 2028 lif->hwstamp_rxq = NULL; 2029 } 2030 } 2031 2032 static int ionic_txrx_alloc(struct ionic_lif *lif) 2033 { 2034 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2035 unsigned int flags, i; 2036 int err = 0; 2037 2038 num_desc = lif->ntxq_descs; 2039 desc_sz = sizeof(struct ionic_txq_desc); 2040 comp_sz = sizeof(struct ionic_txq_comp); 2041 2042 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2043 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2044 sizeof(struct ionic_txq_sg_desc_v1)) 2045 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2046 else 2047 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2048 2049 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 2050 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2051 flags |= IONIC_QCQ_F_INTR; 2052 for (i = 0; i < lif->nxqs; i++) { 2053 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2054 num_desc, desc_sz, comp_sz, sg_desc_sz, 2055 lif->kern_pid, &lif->txqcqs[i]); 2056 if (err) 2057 goto err_out; 2058 2059 if (flags & IONIC_QCQ_F_INTR) { 2060 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2061 lif->txqcqs[i]->intr.index, 2062 lif->tx_coalesce_hw); 2063 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2064 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2065 } 2066 2067 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2068 } 2069 2070 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 2071 2072 num_desc = lif->nrxq_descs; 2073 desc_sz = sizeof(struct ionic_rxq_desc); 2074 comp_sz = sizeof(struct ionic_rxq_comp); 2075 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2076 2077 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2078 comp_sz *= 2; 2079 2080 for (i = 0; i < lif->nxqs; i++) { 2081 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2082 num_desc, desc_sz, comp_sz, sg_desc_sz, 2083 lif->kern_pid, &lif->rxqcqs[i]); 2084 if (err) 2085 goto err_out; 2086 2087 lif->rxqcqs[i]->q.features = lif->rxq_features; 2088 2089 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2090 lif->rxqcqs[i]->intr.index, 2091 lif->rx_coalesce_hw); 2092 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state)) 2093 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw; 2094 2095 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2096 ionic_link_qcq_interrupts(lif->rxqcqs[i], 2097 lif->txqcqs[i]); 2098 2099 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2100 } 2101 2102 return 0; 2103 2104 err_out: 2105 ionic_txrx_free(lif); 2106 2107 return err; 2108 } 2109 2110 static int ionic_txrx_init(struct ionic_lif *lif) 2111 { 2112 unsigned int i; 2113 int err; 2114 2115 for (i = 0; i < lif->nxqs; i++) { 2116 err = ionic_lif_txq_init(lif, lif->txqcqs[i]); 2117 if (err) 2118 goto err_out; 2119 2120 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); 2121 if (err) { 2122 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2123 goto err_out; 2124 } 2125 } 2126 2127 if (lif->netdev->features & NETIF_F_RXHASH) 2128 ionic_lif_rss_init(lif); 2129 2130 ionic_lif_rx_mode(lif); 2131 2132 return 0; 2133 2134 err_out: 2135 while (i--) { 2136 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2137 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2138 } 2139 2140 return err; 2141 } 2142 2143 static int ionic_txrx_enable(struct ionic_lif *lif) 2144 { 2145 int derr = 0; 2146 int i, err; 2147 2148 for (i = 0; i < lif->nxqs; i++) { 2149 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { 2150 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); 2151 err = -ENXIO; 2152 goto err_out; 2153 } 2154 2155 ionic_rx_fill(&lif->rxqcqs[i]->q); 2156 err = ionic_qcq_enable(lif->rxqcqs[i]); 2157 if (err) 2158 goto err_out; 2159 2160 err = ionic_qcq_enable(lif->txqcqs[i]); 2161 if (err) { 2162 derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); 2163 goto err_out; 2164 } 2165 } 2166 2167 if (lif->hwstamp_rxq) { 2168 ionic_rx_fill(&lif->hwstamp_rxq->q); 2169 err = ionic_qcq_enable(lif->hwstamp_rxq); 2170 if (err) 2171 goto err_out_hwstamp_rx; 2172 } 2173 2174 if (lif->hwstamp_txq) { 2175 err = ionic_qcq_enable(lif->hwstamp_txq); 2176 if (err) 2177 goto err_out_hwstamp_tx; 2178 } 2179 2180 return 0; 2181 2182 err_out_hwstamp_tx: 2183 if (lif->hwstamp_rxq) 2184 derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT)); 2185 err_out_hwstamp_rx: 2186 i = lif->nxqs; 2187 err_out: 2188 while (i--) { 2189 derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT)); 2190 derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT)); 2191 } 2192 2193 return err; 2194 } 2195 2196 static int ionic_start_queues(struct ionic_lif *lif) 2197 { 2198 int err; 2199 2200 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) 2201 return -EIO; 2202 2203 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2204 return -EBUSY; 2205 2206 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 2207 return 0; 2208 2209 err = ionic_txrx_enable(lif); 2210 if (err) { 2211 clear_bit(IONIC_LIF_F_UP, lif->state); 2212 return err; 2213 } 2214 netif_tx_wake_all_queues(lif->netdev); 2215 2216 return 0; 2217 } 2218 2219 static int ionic_open(struct net_device *netdev) 2220 { 2221 struct ionic_lif *lif = netdev_priv(netdev); 2222 int err; 2223 2224 /* If recovering from a broken state, clear the bit and we'll try again */ 2225 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 2226 netdev_info(netdev, "clearing broken state\n"); 2227 2228 err = ionic_txrx_alloc(lif); 2229 if (err) 2230 return err; 2231 2232 err = ionic_txrx_init(lif); 2233 if (err) 2234 goto err_txrx_free; 2235 2236 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 2237 if (err) 2238 goto err_txrx_deinit; 2239 2240 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 2241 if (err) 2242 goto err_txrx_deinit; 2243 2244 /* don't start the queues until we have link */ 2245 if (netif_carrier_ok(netdev)) { 2246 err = ionic_start_queues(lif); 2247 if (err) 2248 goto err_txrx_deinit; 2249 } 2250 2251 return 0; 2252 2253 err_txrx_deinit: 2254 ionic_txrx_deinit(lif); 2255 err_txrx_free: 2256 ionic_txrx_free(lif); 2257 return err; 2258 } 2259 2260 static void ionic_stop_queues(struct ionic_lif *lif) 2261 { 2262 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 2263 return; 2264 2265 netif_tx_disable(lif->netdev); 2266 ionic_txrx_disable(lif); 2267 } 2268 2269 static int ionic_stop(struct net_device *netdev) 2270 { 2271 struct ionic_lif *lif = netdev_priv(netdev); 2272 2273 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2274 return 0; 2275 2276 ionic_stop_queues(lif); 2277 ionic_txrx_deinit(lif); 2278 ionic_txrx_free(lif); 2279 2280 return 0; 2281 } 2282 2283 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2284 { 2285 struct ionic_lif *lif = netdev_priv(netdev); 2286 2287 switch (cmd) { 2288 case SIOCSHWTSTAMP: 2289 return ionic_lif_hwstamp_set(lif, ifr); 2290 case SIOCGHWTSTAMP: 2291 return ionic_lif_hwstamp_get(lif, ifr); 2292 default: 2293 return -EOPNOTSUPP; 2294 } 2295 } 2296 2297 static int ionic_get_vf_config(struct net_device *netdev, 2298 int vf, struct ifla_vf_info *ivf) 2299 { 2300 struct ionic_lif *lif = netdev_priv(netdev); 2301 struct ionic *ionic = lif->ionic; 2302 int ret = 0; 2303 2304 if (!netif_device_present(netdev)) 2305 return -EBUSY; 2306 2307 down_read(&ionic->vf_op_lock); 2308 2309 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2310 ret = -EINVAL; 2311 } else { 2312 ivf->vf = vf; 2313 ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid); 2314 ivf->qos = 0; 2315 ivf->spoofchk = ionic->vfs[vf].spoofchk; 2316 ivf->linkstate = ionic->vfs[vf].linkstate; 2317 ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate); 2318 ivf->trusted = ionic->vfs[vf].trusted; 2319 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr); 2320 } 2321 2322 up_read(&ionic->vf_op_lock); 2323 return ret; 2324 } 2325 2326 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 2327 struct ifla_vf_stats *vf_stats) 2328 { 2329 struct ionic_lif *lif = netdev_priv(netdev); 2330 struct ionic *ionic = lif->ionic; 2331 struct ionic_lif_stats *vs; 2332 int ret = 0; 2333 2334 if (!netif_device_present(netdev)) 2335 return -EBUSY; 2336 2337 down_read(&ionic->vf_op_lock); 2338 2339 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2340 ret = -EINVAL; 2341 } else { 2342 memset(vf_stats, 0, sizeof(*vf_stats)); 2343 vs = &ionic->vfs[vf].stats; 2344 2345 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 2346 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 2347 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 2348 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 2349 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 2350 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 2351 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 2352 le64_to_cpu(vs->rx_mcast_drop_packets) + 2353 le64_to_cpu(vs->rx_bcast_drop_packets); 2354 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 2355 le64_to_cpu(vs->tx_mcast_drop_packets) + 2356 le64_to_cpu(vs->tx_bcast_drop_packets); 2357 } 2358 2359 up_read(&ionic->vf_op_lock); 2360 return ret; 2361 } 2362 2363 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 2364 { 2365 struct ionic_lif *lif = netdev_priv(netdev); 2366 struct ionic *ionic = lif->ionic; 2367 int ret; 2368 2369 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 2370 return -EINVAL; 2371 2372 if (!netif_device_present(netdev)) 2373 return -EBUSY; 2374 2375 down_write(&ionic->vf_op_lock); 2376 2377 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2378 ret = -EINVAL; 2379 } else { 2380 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac); 2381 if (!ret) 2382 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 2383 } 2384 2385 up_write(&ionic->vf_op_lock); 2386 return ret; 2387 } 2388 2389 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2390 u8 qos, __be16 proto) 2391 { 2392 struct ionic_lif *lif = netdev_priv(netdev); 2393 struct ionic *ionic = lif->ionic; 2394 int ret; 2395 2396 /* until someday when we support qos */ 2397 if (qos) 2398 return -EINVAL; 2399 2400 if (vlan > 4095) 2401 return -EINVAL; 2402 2403 if (proto != htons(ETH_P_8021Q)) 2404 return -EPROTONOSUPPORT; 2405 2406 if (!netif_device_present(netdev)) 2407 return -EBUSY; 2408 2409 down_write(&ionic->vf_op_lock); 2410 2411 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2412 ret = -EINVAL; 2413 } else { 2414 ret = ionic_set_vf_config(ionic, vf, 2415 IONIC_VF_ATTR_VLAN, (u8 *)&vlan); 2416 if (!ret) 2417 ionic->vfs[vf].vlanid = cpu_to_le16(vlan); 2418 } 2419 2420 up_write(&ionic->vf_op_lock); 2421 return ret; 2422 } 2423 2424 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 2425 int tx_min, int tx_max) 2426 { 2427 struct ionic_lif *lif = netdev_priv(netdev); 2428 struct ionic *ionic = lif->ionic; 2429 int ret; 2430 2431 /* setting the min just seems silly */ 2432 if (tx_min) 2433 return -EINVAL; 2434 2435 if (!netif_device_present(netdev)) 2436 return -EBUSY; 2437 2438 down_write(&ionic->vf_op_lock); 2439 2440 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2441 ret = -EINVAL; 2442 } else { 2443 ret = ionic_set_vf_config(ionic, vf, 2444 IONIC_VF_ATTR_RATE, (u8 *)&tx_max); 2445 if (!ret) 2446 lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max); 2447 } 2448 2449 up_write(&ionic->vf_op_lock); 2450 return ret; 2451 } 2452 2453 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 2454 { 2455 struct ionic_lif *lif = netdev_priv(netdev); 2456 struct ionic *ionic = lif->ionic; 2457 u8 data = set; /* convert to u8 for config */ 2458 int ret; 2459 2460 if (!netif_device_present(netdev)) 2461 return -EBUSY; 2462 2463 down_write(&ionic->vf_op_lock); 2464 2465 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2466 ret = -EINVAL; 2467 } else { 2468 ret = ionic_set_vf_config(ionic, vf, 2469 IONIC_VF_ATTR_SPOOFCHK, &data); 2470 if (!ret) 2471 ionic->vfs[vf].spoofchk = data; 2472 } 2473 2474 up_write(&ionic->vf_op_lock); 2475 return ret; 2476 } 2477 2478 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 2479 { 2480 struct ionic_lif *lif = netdev_priv(netdev); 2481 struct ionic *ionic = lif->ionic; 2482 u8 data = set; /* convert to u8 for config */ 2483 int ret; 2484 2485 if (!netif_device_present(netdev)) 2486 return -EBUSY; 2487 2488 down_write(&ionic->vf_op_lock); 2489 2490 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2491 ret = -EINVAL; 2492 } else { 2493 ret = ionic_set_vf_config(ionic, vf, 2494 IONIC_VF_ATTR_TRUST, &data); 2495 if (!ret) 2496 ionic->vfs[vf].trusted = data; 2497 } 2498 2499 up_write(&ionic->vf_op_lock); 2500 return ret; 2501 } 2502 2503 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 2504 { 2505 struct ionic_lif *lif = netdev_priv(netdev); 2506 struct ionic *ionic = lif->ionic; 2507 u8 data; 2508 int ret; 2509 2510 switch (set) { 2511 case IFLA_VF_LINK_STATE_ENABLE: 2512 data = IONIC_VF_LINK_STATUS_UP; 2513 break; 2514 case IFLA_VF_LINK_STATE_DISABLE: 2515 data = IONIC_VF_LINK_STATUS_DOWN; 2516 break; 2517 case IFLA_VF_LINK_STATE_AUTO: 2518 data = IONIC_VF_LINK_STATUS_AUTO; 2519 break; 2520 default: 2521 return -EINVAL; 2522 } 2523 2524 if (!netif_device_present(netdev)) 2525 return -EBUSY; 2526 2527 down_write(&ionic->vf_op_lock); 2528 2529 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2530 ret = -EINVAL; 2531 } else { 2532 ret = ionic_set_vf_config(ionic, vf, 2533 IONIC_VF_ATTR_LINKSTATE, &data); 2534 if (!ret) 2535 ionic->vfs[vf].linkstate = set; 2536 } 2537 2538 up_write(&ionic->vf_op_lock); 2539 return ret; 2540 } 2541 2542 static const struct net_device_ops ionic_netdev_ops = { 2543 .ndo_open = ionic_open, 2544 .ndo_stop = ionic_stop, 2545 .ndo_eth_ioctl = ionic_eth_ioctl, 2546 .ndo_start_xmit = ionic_start_xmit, 2547 .ndo_get_stats64 = ionic_get_stats64, 2548 .ndo_set_rx_mode = ionic_ndo_set_rx_mode, 2549 .ndo_set_features = ionic_set_features, 2550 .ndo_set_mac_address = ionic_set_mac_address, 2551 .ndo_validate_addr = eth_validate_addr, 2552 .ndo_tx_timeout = ionic_tx_timeout, 2553 .ndo_change_mtu = ionic_change_mtu, 2554 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 2555 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 2556 .ndo_set_vf_vlan = ionic_set_vf_vlan, 2557 .ndo_set_vf_trust = ionic_set_vf_trust, 2558 .ndo_set_vf_mac = ionic_set_vf_mac, 2559 .ndo_set_vf_rate = ionic_set_vf_rate, 2560 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 2561 .ndo_get_vf_config = ionic_get_vf_config, 2562 .ndo_set_vf_link_state = ionic_set_vf_link_state, 2563 .ndo_get_vf_stats = ionic_get_vf_stats, 2564 }; 2565 2566 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) 2567 { 2568 /* only swapping the queues, not the napi, flags, or other stuff */ 2569 swap(a->q.features, b->q.features); 2570 swap(a->q.num_descs, b->q.num_descs); 2571 swap(a->q.desc_size, b->q.desc_size); 2572 swap(a->q.base, b->q.base); 2573 swap(a->q.base_pa, b->q.base_pa); 2574 swap(a->q.info, b->q.info); 2575 swap(a->q_base, b->q_base); 2576 swap(a->q_base_pa, b->q_base_pa); 2577 swap(a->q_size, b->q_size); 2578 2579 swap(a->q.sg_desc_size, b->q.sg_desc_size); 2580 swap(a->q.sg_base, b->q.sg_base); 2581 swap(a->q.sg_base_pa, b->q.sg_base_pa); 2582 swap(a->sg_base, b->sg_base); 2583 swap(a->sg_base_pa, b->sg_base_pa); 2584 swap(a->sg_size, b->sg_size); 2585 2586 swap(a->cq.num_descs, b->cq.num_descs); 2587 swap(a->cq.desc_size, b->cq.desc_size); 2588 swap(a->cq.base, b->cq.base); 2589 swap(a->cq.base_pa, b->cq.base_pa); 2590 swap(a->cq.info, b->cq.info); 2591 swap(a->cq_base, b->cq_base); 2592 swap(a->cq_base_pa, b->cq_base_pa); 2593 swap(a->cq_size, b->cq_size); 2594 2595 ionic_debugfs_del_qcq(a); 2596 ionic_debugfs_add_qcq(a->q.lif, a); 2597 } 2598 2599 int ionic_reconfigure_queues(struct ionic_lif *lif, 2600 struct ionic_queue_params *qparam) 2601 { 2602 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2603 struct ionic_qcq **tx_qcqs = NULL; 2604 struct ionic_qcq **rx_qcqs = NULL; 2605 unsigned int flags, i; 2606 int err = 0; 2607 2608 /* allocate temporary qcq arrays to hold new queue structs */ 2609 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { 2610 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, 2611 sizeof(struct ionic_qcq *), GFP_KERNEL); 2612 if (!tx_qcqs) { 2613 err = -ENOMEM; 2614 goto err_out; 2615 } 2616 } 2617 if (qparam->nxqs != lif->nxqs || 2618 qparam->nrxq_descs != lif->nrxq_descs || 2619 qparam->rxq_features != lif->rxq_features) { 2620 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, 2621 sizeof(struct ionic_qcq *), GFP_KERNEL); 2622 if (!rx_qcqs) { 2623 err = -ENOMEM; 2624 goto err_out; 2625 } 2626 } 2627 2628 /* allocate new desc_info and rings, but leave the interrupt setup 2629 * until later so as to not mess with the still-running queues 2630 */ 2631 if (tx_qcqs) { 2632 num_desc = qparam->ntxq_descs; 2633 desc_sz = sizeof(struct ionic_txq_desc); 2634 comp_sz = sizeof(struct ionic_txq_comp); 2635 2636 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2637 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2638 sizeof(struct ionic_txq_sg_desc_v1)) 2639 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2640 else 2641 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2642 2643 for (i = 0; i < qparam->nxqs; i++) { 2644 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2645 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2646 num_desc, desc_sz, comp_sz, sg_desc_sz, 2647 lif->kern_pid, &tx_qcqs[i]); 2648 if (err) 2649 goto err_out; 2650 } 2651 } 2652 2653 if (rx_qcqs) { 2654 num_desc = qparam->nrxq_descs; 2655 desc_sz = sizeof(struct ionic_rxq_desc); 2656 comp_sz = sizeof(struct ionic_rxq_comp); 2657 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2658 2659 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2660 comp_sz *= 2; 2661 2662 for (i = 0; i < qparam->nxqs; i++) { 2663 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2664 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2665 num_desc, desc_sz, comp_sz, sg_desc_sz, 2666 lif->kern_pid, &rx_qcqs[i]); 2667 if (err) 2668 goto err_out; 2669 2670 rx_qcqs[i]->q.features = qparam->rxq_features; 2671 } 2672 } 2673 2674 /* stop and clean the queues */ 2675 ionic_stop_queues_reconfig(lif); 2676 2677 if (qparam->nxqs != lif->nxqs) { 2678 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); 2679 if (err) 2680 goto err_out_reinit_unlock; 2681 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); 2682 if (err) { 2683 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); 2684 goto err_out_reinit_unlock; 2685 } 2686 } 2687 2688 /* swap new desc_info and rings, keeping existing interrupt config */ 2689 if (tx_qcqs) { 2690 lif->ntxq_descs = qparam->ntxq_descs; 2691 for (i = 0; i < qparam->nxqs; i++) 2692 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); 2693 } 2694 2695 if (rx_qcqs) { 2696 lif->nrxq_descs = qparam->nrxq_descs; 2697 for (i = 0; i < qparam->nxqs; i++) 2698 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); 2699 } 2700 2701 /* if we need to change the interrupt layout, this is the time */ 2702 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || 2703 qparam->nxqs != lif->nxqs) { 2704 if (qparam->intr_split) { 2705 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2706 } else { 2707 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2708 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2709 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2710 } 2711 2712 /* clear existing interrupt assignments */ 2713 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { 2714 ionic_qcq_intr_free(lif, lif->txqcqs[i]); 2715 ionic_qcq_intr_free(lif, lif->rxqcqs[i]); 2716 } 2717 2718 /* re-assign the interrupts */ 2719 for (i = 0; i < qparam->nxqs; i++) { 2720 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2721 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); 2722 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2723 lif->rxqcqs[i]->intr.index, 2724 lif->rx_coalesce_hw); 2725 2726 if (qparam->intr_split) { 2727 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2728 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); 2729 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2730 lif->txqcqs[i]->intr.index, 2731 lif->tx_coalesce_hw); 2732 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2733 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2734 } else { 2735 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2736 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); 2737 } 2738 } 2739 } 2740 2741 /* now we can rework the debugfs mappings */ 2742 if (tx_qcqs) { 2743 for (i = 0; i < qparam->nxqs; i++) { 2744 ionic_debugfs_del_qcq(lif->txqcqs[i]); 2745 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2746 } 2747 } 2748 2749 if (rx_qcqs) { 2750 for (i = 0; i < qparam->nxqs; i++) { 2751 ionic_debugfs_del_qcq(lif->rxqcqs[i]); 2752 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2753 } 2754 } 2755 2756 swap(lif->nxqs, qparam->nxqs); 2757 swap(lif->rxq_features, qparam->rxq_features); 2758 2759 err_out_reinit_unlock: 2760 /* re-init the queues, but don't lose an error code */ 2761 if (err) 2762 ionic_start_queues_reconfig(lif); 2763 else 2764 err = ionic_start_queues_reconfig(lif); 2765 2766 err_out: 2767 /* free old allocs without cleaning intr */ 2768 for (i = 0; i < qparam->nxqs; i++) { 2769 if (tx_qcqs && tx_qcqs[i]) { 2770 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2771 ionic_qcq_free(lif, tx_qcqs[i]); 2772 devm_kfree(lif->ionic->dev, tx_qcqs[i]); 2773 tx_qcqs[i] = NULL; 2774 } 2775 if (rx_qcqs && rx_qcqs[i]) { 2776 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2777 ionic_qcq_free(lif, rx_qcqs[i]); 2778 devm_kfree(lif->ionic->dev, rx_qcqs[i]); 2779 rx_qcqs[i] = NULL; 2780 } 2781 } 2782 2783 /* free q array */ 2784 if (rx_qcqs) { 2785 devm_kfree(lif->ionic->dev, rx_qcqs); 2786 rx_qcqs = NULL; 2787 } 2788 if (tx_qcqs) { 2789 devm_kfree(lif->ionic->dev, tx_qcqs); 2790 tx_qcqs = NULL; 2791 } 2792 2793 /* clean the unused dma and info allocations when new set is smaller 2794 * than the full array, but leave the qcq shells in place 2795 */ 2796 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { 2797 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2798 ionic_qcq_free(lif, lif->txqcqs[i]); 2799 2800 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2801 ionic_qcq_free(lif, lif->rxqcqs[i]); 2802 } 2803 2804 if (err) 2805 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err); 2806 2807 return err; 2808 } 2809 2810 int ionic_lif_alloc(struct ionic *ionic) 2811 { 2812 struct device *dev = ionic->dev; 2813 union ionic_lif_identity *lid; 2814 struct net_device *netdev; 2815 struct ionic_lif *lif; 2816 int tbl_sz; 2817 int err; 2818 2819 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 2820 if (!lid) 2821 return -ENOMEM; 2822 2823 netdev = alloc_etherdev_mqs(sizeof(*lif), 2824 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 2825 if (!netdev) { 2826 dev_err(dev, "Cannot allocate netdev, aborting\n"); 2827 err = -ENOMEM; 2828 goto err_out_free_lid; 2829 } 2830 2831 SET_NETDEV_DEV(netdev, dev); 2832 2833 lif = netdev_priv(netdev); 2834 lif->netdev = netdev; 2835 ionic->lif = lif; 2836 netdev->netdev_ops = &ionic_netdev_ops; 2837 ionic_ethtool_set_ops(netdev); 2838 2839 netdev->watchdog_timeo = 2 * HZ; 2840 netif_carrier_off(netdev); 2841 2842 lif->identity = lid; 2843 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 2844 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity); 2845 if (err) { 2846 dev_err(ionic->dev, "Cannot identify type %d: %d\n", 2847 lif->lif_type, err); 2848 goto err_out_free_netdev; 2849 } 2850 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, 2851 le32_to_cpu(lif->identity->eth.min_frame_size)); 2852 lif->netdev->max_mtu = 2853 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; 2854 2855 lif->neqs = ionic->neqs_per_lif; 2856 lif->nxqs = ionic->ntxqs_per_lif; 2857 2858 lif->ionic = ionic; 2859 lif->index = 0; 2860 2861 if (is_kdump_kernel()) { 2862 lif->ntxq_descs = IONIC_MIN_TXRX_DESC; 2863 lif->nrxq_descs = IONIC_MIN_TXRX_DESC; 2864 } else { 2865 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 2866 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 2867 } 2868 2869 /* Convert the default coalesce value to actual hw resolution */ 2870 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 2871 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 2872 lif->rx_coalesce_usecs); 2873 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2874 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2875 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); 2876 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); 2877 2878 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); 2879 2880 spin_lock_init(&lif->adminq_lock); 2881 2882 spin_lock_init(&lif->deferred.lock); 2883 INIT_LIST_HEAD(&lif->deferred.list); 2884 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 2885 2886 /* allocate lif info */ 2887 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 2888 lif->info = dma_alloc_coherent(dev, lif->info_sz, 2889 &lif->info_pa, GFP_KERNEL); 2890 if (!lif->info) { 2891 dev_err(dev, "Failed to allocate lif info, aborting\n"); 2892 err = -ENOMEM; 2893 goto err_out_free_netdev; 2894 } 2895 2896 ionic_debugfs_add_lif(lif); 2897 2898 /* allocate control queues and txrx queue arrays */ 2899 ionic_lif_queue_identify(lif); 2900 err = ionic_qcqs_alloc(lif); 2901 if (err) 2902 goto err_out_free_lif_info; 2903 2904 /* allocate rss indirection table */ 2905 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 2906 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 2907 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 2908 &lif->rss_ind_tbl_pa, 2909 GFP_KERNEL); 2910 2911 if (!lif->rss_ind_tbl) { 2912 err = -ENOMEM; 2913 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 2914 goto err_out_free_qcqs; 2915 } 2916 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 2917 2918 ionic_lif_alloc_phc(lif); 2919 2920 return 0; 2921 2922 err_out_free_qcqs: 2923 ionic_qcqs_free(lif); 2924 err_out_free_lif_info: 2925 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2926 lif->info = NULL; 2927 lif->info_pa = 0; 2928 err_out_free_netdev: 2929 free_netdev(lif->netdev); 2930 lif = NULL; 2931 err_out_free_lid: 2932 kfree(lid); 2933 2934 return err; 2935 } 2936 2937 static void ionic_lif_reset(struct ionic_lif *lif) 2938 { 2939 struct ionic_dev *idev = &lif->ionic->idev; 2940 2941 mutex_lock(&lif->ionic->dev_cmd_lock); 2942 ionic_dev_cmd_lif_reset(idev, lif->index); 2943 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2944 mutex_unlock(&lif->ionic->dev_cmd_lock); 2945 } 2946 2947 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 2948 { 2949 struct ionic *ionic = lif->ionic; 2950 2951 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2952 return; 2953 2954 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 2955 2956 netif_device_detach(lif->netdev); 2957 2958 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 2959 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 2960 mutex_lock(&lif->queue_lock); 2961 ionic_stop_queues(lif); 2962 mutex_unlock(&lif->queue_lock); 2963 } 2964 2965 if (netif_running(lif->netdev)) { 2966 ionic_txrx_deinit(lif); 2967 ionic_txrx_free(lif); 2968 } 2969 ionic_lif_deinit(lif); 2970 ionic_reset(ionic); 2971 ionic_qcqs_free(lif); 2972 2973 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 2974 } 2975 2976 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 2977 { 2978 struct ionic *ionic = lif->ionic; 2979 int err; 2980 2981 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2982 return; 2983 2984 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 2985 2986 ionic_init_devinfo(ionic); 2987 err = ionic_identify(ionic); 2988 if (err) 2989 goto err_out; 2990 err = ionic_port_identify(ionic); 2991 if (err) 2992 goto err_out; 2993 err = ionic_port_init(ionic); 2994 if (err) 2995 goto err_out; 2996 err = ionic_qcqs_alloc(lif); 2997 if (err) 2998 goto err_out; 2999 3000 err = ionic_lif_init(lif); 3001 if (err) 3002 goto err_qcqs_free; 3003 3004 if (lif->registered) 3005 ionic_lif_set_netdev_info(lif); 3006 3007 ionic_rx_filter_replay(lif); 3008 3009 if (netif_running(lif->netdev)) { 3010 err = ionic_txrx_alloc(lif); 3011 if (err) 3012 goto err_lifs_deinit; 3013 3014 err = ionic_txrx_init(lif); 3015 if (err) 3016 goto err_txrx_free; 3017 } 3018 3019 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 3020 ionic_link_status_check_request(lif, CAN_SLEEP); 3021 netif_device_attach(lif->netdev); 3022 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 3023 3024 /* restore the hardware timestamping queues */ 3025 ionic_lif_hwstamp_replay(lif); 3026 3027 return; 3028 3029 err_txrx_free: 3030 ionic_txrx_free(lif); 3031 err_lifs_deinit: 3032 ionic_lif_deinit(lif); 3033 err_qcqs_free: 3034 ionic_qcqs_free(lif); 3035 err_out: 3036 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 3037 } 3038 3039 void ionic_lif_free(struct ionic_lif *lif) 3040 { 3041 struct device *dev = lif->ionic->dev; 3042 3043 ionic_lif_free_phc(lif); 3044 3045 /* free rss indirection table */ 3046 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 3047 lif->rss_ind_tbl_pa); 3048 lif->rss_ind_tbl = NULL; 3049 lif->rss_ind_tbl_pa = 0; 3050 3051 /* free queues */ 3052 ionic_qcqs_free(lif); 3053 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3054 ionic_lif_reset(lif); 3055 3056 /* free lif info */ 3057 kfree(lif->identity); 3058 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3059 lif->info = NULL; 3060 lif->info_pa = 0; 3061 3062 /* unmap doorbell page */ 3063 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3064 lif->kern_dbpage = NULL; 3065 kfree(lif->dbid_inuse); 3066 lif->dbid_inuse = NULL; 3067 3068 /* free netdev & lif */ 3069 ionic_debugfs_del_lif(lif); 3070 free_netdev(lif->netdev); 3071 } 3072 3073 void ionic_lif_deinit(struct ionic_lif *lif) 3074 { 3075 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 3076 return; 3077 3078 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3079 cancel_work_sync(&lif->deferred.work); 3080 cancel_work_sync(&lif->tx_timeout_work); 3081 ionic_rx_filters_deinit(lif); 3082 if (lif->netdev->features & NETIF_F_RXHASH) 3083 ionic_lif_rss_deinit(lif); 3084 } 3085 3086 napi_disable(&lif->adminqcq->napi); 3087 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3088 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3089 3090 mutex_destroy(&lif->config_lock); 3091 mutex_destroy(&lif->queue_lock); 3092 ionic_lif_reset(lif); 3093 } 3094 3095 static int ionic_lif_adminq_init(struct ionic_lif *lif) 3096 { 3097 struct device *dev = lif->ionic->dev; 3098 struct ionic_q_init_comp comp; 3099 struct ionic_dev *idev; 3100 struct ionic_qcq *qcq; 3101 struct ionic_queue *q; 3102 int err; 3103 3104 idev = &lif->ionic->idev; 3105 qcq = lif->adminqcq; 3106 q = &qcq->q; 3107 3108 mutex_lock(&lif->ionic->dev_cmd_lock); 3109 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 3110 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3111 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3112 mutex_unlock(&lif->ionic->dev_cmd_lock); 3113 if (err) { 3114 netdev_err(lif->netdev, "adminq init failed %d\n", err); 3115 return err; 3116 } 3117 3118 q->hw_type = comp.hw_type; 3119 q->hw_index = le32_to_cpu(comp.hw_index); 3120 q->dbval = IONIC_DBELL_QID(q->hw_index); 3121 3122 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 3123 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 3124 3125 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, 3126 NAPI_POLL_WEIGHT); 3127 3128 napi_enable(&qcq->napi); 3129 3130 if (qcq->flags & IONIC_QCQ_F_INTR) 3131 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 3132 IONIC_INTR_MASK_CLEAR); 3133 3134 qcq->flags |= IONIC_QCQ_F_INITED; 3135 3136 return 0; 3137 } 3138 3139 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 3140 { 3141 struct ionic_qcq *qcq = lif->notifyqcq; 3142 struct device *dev = lif->ionic->dev; 3143 struct ionic_queue *q = &qcq->q; 3144 int err; 3145 3146 struct ionic_admin_ctx ctx = { 3147 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3148 .cmd.q_init = { 3149 .opcode = IONIC_CMD_Q_INIT, 3150 .lif_index = cpu_to_le16(lif->index), 3151 .type = q->type, 3152 .ver = lif->qtype_info[q->type].version, 3153 .index = cpu_to_le32(q->index), 3154 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 3155 IONIC_QINIT_F_ENA), 3156 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 3157 .pid = cpu_to_le16(q->pid), 3158 .ring_size = ilog2(q->num_descs), 3159 .ring_base = cpu_to_le64(q->base_pa), 3160 } 3161 }; 3162 3163 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 3164 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 3165 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 3166 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 3167 3168 err = ionic_adminq_post_wait(lif, &ctx); 3169 if (err) 3170 return err; 3171 3172 lif->last_eid = 0; 3173 q->hw_type = ctx.comp.q_init.hw_type; 3174 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 3175 q->dbval = IONIC_DBELL_QID(q->hw_index); 3176 3177 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 3178 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 3179 3180 /* preset the callback info */ 3181 q->info[0].cb_arg = lif; 3182 3183 qcq->flags |= IONIC_QCQ_F_INITED; 3184 3185 return 0; 3186 } 3187 3188 static int ionic_station_set(struct ionic_lif *lif) 3189 { 3190 struct net_device *netdev = lif->netdev; 3191 struct ionic_admin_ctx ctx = { 3192 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3193 .cmd.lif_getattr = { 3194 .opcode = IONIC_CMD_LIF_GETATTR, 3195 .index = cpu_to_le16(lif->index), 3196 .attr = IONIC_LIF_ATTR_MAC, 3197 }, 3198 }; 3199 struct sockaddr addr; 3200 int err; 3201 3202 err = ionic_adminq_post_wait(lif, &ctx); 3203 if (err) 3204 return err; 3205 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 3206 ctx.comp.lif_getattr.mac); 3207 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac)) 3208 return 0; 3209 3210 if (!is_zero_ether_addr(netdev->dev_addr)) { 3211 /* If the netdev mac is non-zero and doesn't match the default 3212 * device address, it was set by something earlier and we're 3213 * likely here again after a fw-upgrade reset. We need to be 3214 * sure the netdev mac is in our filter list. 3215 */ 3216 if (!ether_addr_equal(ctx.comp.lif_getattr.mac, 3217 netdev->dev_addr)) 3218 ionic_lif_addr_add(lif, netdev->dev_addr); 3219 } else { 3220 /* Update the netdev mac with the device's mac */ 3221 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); 3222 addr.sa_family = AF_INET; 3223 err = eth_prepare_mac_addr_change(netdev, &addr); 3224 if (err) { 3225 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 3226 addr.sa_data, err); 3227 return 0; 3228 } 3229 3230 eth_commit_mac_addr_change(netdev, &addr); 3231 } 3232 3233 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 3234 netdev->dev_addr); 3235 ionic_lif_addr_add(lif, netdev->dev_addr); 3236 3237 return 0; 3238 } 3239 3240 int ionic_lif_init(struct ionic_lif *lif) 3241 { 3242 struct ionic_dev *idev = &lif->ionic->idev; 3243 struct device *dev = lif->ionic->dev; 3244 struct ionic_lif_init_comp comp; 3245 int dbpage_num; 3246 int err; 3247 3248 mutex_lock(&lif->ionic->dev_cmd_lock); 3249 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 3250 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3251 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3252 mutex_unlock(&lif->ionic->dev_cmd_lock); 3253 if (err) 3254 return err; 3255 3256 lif->hw_index = le16_to_cpu(comp.hw_index); 3257 mutex_init(&lif->queue_lock); 3258 mutex_init(&lif->config_lock); 3259 3260 /* now that we have the hw_index we can figure out our doorbell page */ 3261 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 3262 if (!lif->dbid_count) { 3263 dev_err(dev, "No doorbell pages, aborting\n"); 3264 return -EINVAL; 3265 } 3266 3267 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); 3268 if (!lif->dbid_inuse) { 3269 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); 3270 return -ENOMEM; 3271 } 3272 3273 /* first doorbell id reserved for kernel (dbid aka pid == zero) */ 3274 set_bit(0, lif->dbid_inuse); 3275 lif->kern_pid = 0; 3276 3277 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 3278 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 3279 if (!lif->kern_dbpage) { 3280 dev_err(dev, "Cannot map dbpage, aborting\n"); 3281 err = -ENOMEM; 3282 goto err_out_free_dbid; 3283 } 3284 3285 err = ionic_lif_adminq_init(lif); 3286 if (err) 3287 goto err_out_adminq_deinit; 3288 3289 if (lif->ionic->nnqs_per_lif) { 3290 err = ionic_lif_notifyq_init(lif); 3291 if (err) 3292 goto err_out_notifyq_deinit; 3293 } 3294 3295 err = ionic_init_nic_features(lif); 3296 if (err) 3297 goto err_out_notifyq_deinit; 3298 3299 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3300 err = ionic_rx_filters_init(lif); 3301 if (err) 3302 goto err_out_notifyq_deinit; 3303 } 3304 3305 err = ionic_station_set(lif); 3306 if (err) 3307 goto err_out_notifyq_deinit; 3308 3309 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 3310 3311 set_bit(IONIC_LIF_F_INITED, lif->state); 3312 3313 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 3314 3315 return 0; 3316 3317 err_out_notifyq_deinit: 3318 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3319 err_out_adminq_deinit: 3320 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3321 ionic_lif_reset(lif); 3322 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3323 lif->kern_dbpage = NULL; 3324 err_out_free_dbid: 3325 kfree(lif->dbid_inuse); 3326 lif->dbid_inuse = NULL; 3327 3328 return err; 3329 } 3330 3331 static void ionic_lif_notify_work(struct work_struct *ws) 3332 { 3333 } 3334 3335 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 3336 { 3337 struct ionic_admin_ctx ctx = { 3338 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3339 .cmd.lif_setattr = { 3340 .opcode = IONIC_CMD_LIF_SETATTR, 3341 .index = cpu_to_le16(lif->index), 3342 .attr = IONIC_LIF_ATTR_NAME, 3343 }, 3344 }; 3345 3346 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 3347 sizeof(ctx.cmd.lif_setattr.name)); 3348 3349 ionic_adminq_post_wait(lif, &ctx); 3350 } 3351 3352 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 3353 { 3354 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 3355 return NULL; 3356 3357 return netdev_priv(netdev); 3358 } 3359 3360 static int ionic_lif_notify(struct notifier_block *nb, 3361 unsigned long event, void *info) 3362 { 3363 struct net_device *ndev = netdev_notifier_info_to_dev(info); 3364 struct ionic *ionic = container_of(nb, struct ionic, nb); 3365 struct ionic_lif *lif = ionic_netdev_lif(ndev); 3366 3367 if (!lif || lif->ionic != ionic) 3368 return NOTIFY_DONE; 3369 3370 switch (event) { 3371 case NETDEV_CHANGENAME: 3372 ionic_lif_set_netdev_info(lif); 3373 break; 3374 } 3375 3376 return NOTIFY_DONE; 3377 } 3378 3379 int ionic_lif_register(struct ionic_lif *lif) 3380 { 3381 int err; 3382 3383 ionic_lif_register_phc(lif); 3384 3385 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); 3386 3387 lif->ionic->nb.notifier_call = ionic_lif_notify; 3388 3389 err = register_netdevice_notifier(&lif->ionic->nb); 3390 if (err) 3391 lif->ionic->nb.notifier_call = NULL; 3392 3393 /* only register LIF0 for now */ 3394 err = register_netdev(lif->netdev); 3395 if (err) { 3396 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); 3397 ionic_lif_unregister_phc(lif); 3398 return err; 3399 } 3400 3401 ionic_link_status_check_request(lif, CAN_SLEEP); 3402 lif->registered = true; 3403 ionic_lif_set_netdev_info(lif); 3404 3405 return 0; 3406 } 3407 3408 void ionic_lif_unregister(struct ionic_lif *lif) 3409 { 3410 if (lif->ionic->nb.notifier_call) { 3411 unregister_netdevice_notifier(&lif->ionic->nb); 3412 cancel_work_sync(&lif->ionic->nb_work); 3413 lif->ionic->nb.notifier_call = NULL; 3414 } 3415 3416 if (lif->netdev->reg_state == NETREG_REGISTERED) 3417 unregister_netdev(lif->netdev); 3418 3419 ionic_lif_unregister_phc(lif); 3420 3421 lif->registered = false; 3422 } 3423 3424 static void ionic_lif_queue_identify(struct ionic_lif *lif) 3425 { 3426 union ionic_q_identity __iomem *q_ident; 3427 struct ionic *ionic = lif->ionic; 3428 struct ionic_dev *idev; 3429 int qtype; 3430 int err; 3431 3432 idev = &lif->ionic->idev; 3433 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data; 3434 3435 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 3436 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 3437 3438 /* filter out the ones we know about */ 3439 switch (qtype) { 3440 case IONIC_QTYPE_ADMINQ: 3441 case IONIC_QTYPE_NOTIFYQ: 3442 case IONIC_QTYPE_RXQ: 3443 case IONIC_QTYPE_TXQ: 3444 break; 3445 default: 3446 continue; 3447 } 3448 3449 memset(qti, 0, sizeof(*qti)); 3450 3451 mutex_lock(&ionic->dev_cmd_lock); 3452 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 3453 ionic_qtype_versions[qtype]); 3454 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3455 if (!err) { 3456 qti->version = readb(&q_ident->version); 3457 qti->supported = readb(&q_ident->supported); 3458 qti->features = readq(&q_ident->features); 3459 qti->desc_sz = readw(&q_ident->desc_sz); 3460 qti->comp_sz = readw(&q_ident->comp_sz); 3461 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz); 3462 qti->max_sg_elems = readw(&q_ident->max_sg_elems); 3463 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride); 3464 } 3465 mutex_unlock(&ionic->dev_cmd_lock); 3466 3467 if (err == -EINVAL) { 3468 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 3469 continue; 3470 } else if (err == -EIO) { 3471 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 3472 return; 3473 } else if (err) { 3474 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 3475 qtype, err); 3476 return; 3477 } 3478 3479 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 3480 qtype, qti->version); 3481 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 3482 qtype, qti->supported); 3483 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 3484 qtype, qti->features); 3485 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 3486 qtype, qti->desc_sz); 3487 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 3488 qtype, qti->comp_sz); 3489 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 3490 qtype, qti->sg_desc_sz); 3491 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 3492 qtype, qti->max_sg_elems); 3493 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 3494 qtype, qti->sg_desc_stride); 3495 } 3496 } 3497 3498 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 3499 union ionic_lif_identity *lid) 3500 { 3501 struct ionic_dev *idev = &ionic->idev; 3502 size_t sz; 3503 int err; 3504 3505 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 3506 3507 mutex_lock(&ionic->dev_cmd_lock); 3508 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 3509 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3510 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 3511 mutex_unlock(&ionic->dev_cmd_lock); 3512 if (err) 3513 return (err); 3514 3515 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 3516 le64_to_cpu(lid->capabilities)); 3517 3518 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 3519 le32_to_cpu(lid->eth.max_ucast_filters)); 3520 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 3521 le32_to_cpu(lid->eth.max_mcast_filters)); 3522 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 3523 le64_to_cpu(lid->eth.config.features)); 3524 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 3525 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 3526 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 3527 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 3528 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 3529 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 3530 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 3531 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 3532 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 3533 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 3534 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 3535 le32_to_cpu(lid->eth.config.mtu)); 3536 3537 return 0; 3538 } 3539 3540 int ionic_lif_size(struct ionic *ionic) 3541 { 3542 struct ionic_identity *ident = &ionic->ident; 3543 unsigned int nintrs, dev_nintrs; 3544 union ionic_lif_config *lc; 3545 unsigned int ntxqs_per_lif; 3546 unsigned int nrxqs_per_lif; 3547 unsigned int neqs_per_lif; 3548 unsigned int nnqs_per_lif; 3549 unsigned int nxqs, neqs; 3550 unsigned int min_intrs; 3551 int err; 3552 3553 /* retrieve basic values from FW */ 3554 lc = &ident->lif.eth.config; 3555 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 3556 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 3557 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 3558 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 3559 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 3560 3561 /* limit values to play nice with kdump */ 3562 if (is_kdump_kernel()) { 3563 dev_nintrs = 2; 3564 neqs_per_lif = 0; 3565 nnqs_per_lif = 0; 3566 ntxqs_per_lif = 1; 3567 nrxqs_per_lif = 1; 3568 } 3569 3570 /* reserve last queue id for hardware timestamping */ 3571 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { 3572 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { 3573 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP); 3574 } else { 3575 ntxqs_per_lif -= 1; 3576 nrxqs_per_lif -= 1; 3577 } 3578 } 3579 3580 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 3581 nxqs = min(nxqs, num_online_cpus()); 3582 neqs = min(neqs_per_lif, num_online_cpus()); 3583 3584 try_again: 3585 /* interrupt usage: 3586 * 1 for master lif adminq/notifyq 3587 * 1 for each CPU for master lif TxRx queue pairs 3588 * whatever's left is for RDMA queues 3589 */ 3590 nintrs = 1 + nxqs + neqs; 3591 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 3592 3593 if (nintrs > dev_nintrs) 3594 goto try_fewer; 3595 3596 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 3597 if (err < 0 && err != -ENOSPC) { 3598 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 3599 return err; 3600 } 3601 if (err == -ENOSPC) 3602 goto try_fewer; 3603 3604 if (err != nintrs) { 3605 ionic_bus_free_irq_vectors(ionic); 3606 goto try_fewer; 3607 } 3608 3609 ionic->nnqs_per_lif = nnqs_per_lif; 3610 ionic->neqs_per_lif = neqs; 3611 ionic->ntxqs_per_lif = nxqs; 3612 ionic->nrxqs_per_lif = nxqs; 3613 ionic->nintrs = nintrs; 3614 3615 ionic_debugfs_add_sizes(ionic); 3616 3617 return 0; 3618 3619 try_fewer: 3620 if (nnqs_per_lif > 1) { 3621 nnqs_per_lif >>= 1; 3622 goto try_again; 3623 } 3624 if (neqs > 1) { 3625 neqs >>= 1; 3626 goto try_again; 3627 } 3628 if (nxqs > 1) { 3629 nxqs >>= 1; 3630 goto try_again; 3631 } 3632 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 3633 return -ENOSPC; 3634 } 3635