1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ethtool.h> 5 #include <linux/printk.h> 6 #include <linux/dynamic_debug.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_vlan.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/cpumask.h> 14 15 #include "ionic.h" 16 #include "ionic_bus.h" 17 #include "ionic_lif.h" 18 #include "ionic_txrx.h" 19 #include "ionic_ethtool.h" 20 #include "ionic_debugfs.h" 21 22 /* queuetype support level */ 23 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 24 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 25 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 26 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */ 27 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support 28 * 1 = ... with Tx SG version 1 29 */ 30 }; 31 32 static void ionic_lif_rx_mode(struct ionic_lif *lif); 33 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); 34 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); 35 static void ionic_link_status_check(struct ionic_lif *lif); 36 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 37 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 38 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 39 40 static void ionic_txrx_deinit(struct ionic_lif *lif); 41 static int ionic_txrx_init(struct ionic_lif *lif); 42 static int ionic_start_queues(struct ionic_lif *lif); 43 static void ionic_stop_queues(struct ionic_lif *lif); 44 static void ionic_lif_queue_identify(struct ionic_lif *lif); 45 46 static void ionic_dim_work(struct work_struct *work) 47 { 48 struct dim *dim = container_of(work, struct dim, work); 49 struct dim_cq_moder cur_moder; 50 struct ionic_qcq *qcq; 51 u32 new_coal; 52 53 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 54 qcq = container_of(dim, struct ionic_qcq, dim); 55 new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec); 56 new_coal = new_coal ? new_coal : 1; 57 58 if (qcq->intr.dim_coal_hw != new_coal) { 59 unsigned int qi = qcq->cq.bound_q->index; 60 struct ionic_lif *lif = qcq->q.lif; 61 62 qcq->intr.dim_coal_hw = new_coal; 63 64 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 65 lif->rxqcqs[qi]->intr.index, 66 qcq->intr.dim_coal_hw); 67 } 68 69 dim->state = DIM_START_MEASURE; 70 } 71 72 static void ionic_lif_deferred_work(struct work_struct *work) 73 { 74 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 75 struct ionic_deferred *def = &lif->deferred; 76 struct ionic_deferred_work *w = NULL; 77 78 do { 79 spin_lock_bh(&def->lock); 80 if (!list_empty(&def->list)) { 81 w = list_first_entry(&def->list, 82 struct ionic_deferred_work, list); 83 list_del(&w->list); 84 } 85 spin_unlock_bh(&def->lock); 86 87 if (!w) 88 break; 89 90 switch (w->type) { 91 case IONIC_DW_TYPE_RX_MODE: 92 ionic_lif_rx_mode(lif); 93 break; 94 case IONIC_DW_TYPE_RX_ADDR_ADD: 95 ionic_lif_addr_add(lif, w->addr); 96 break; 97 case IONIC_DW_TYPE_RX_ADDR_DEL: 98 ionic_lif_addr_del(lif, w->addr); 99 break; 100 case IONIC_DW_TYPE_LINK_STATUS: 101 ionic_link_status_check(lif); 102 break; 103 case IONIC_DW_TYPE_LIF_RESET: 104 if (w->fw_status) 105 ionic_lif_handle_fw_up(lif); 106 else 107 ionic_lif_handle_fw_down(lif); 108 break; 109 default: 110 break; 111 } 112 kfree(w); 113 w = NULL; 114 } while (true); 115 } 116 117 void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 118 struct ionic_deferred_work *work) 119 { 120 spin_lock_bh(&def->lock); 121 list_add_tail(&work->list, &def->list); 122 spin_unlock_bh(&def->lock); 123 schedule_work(&def->work); 124 } 125 126 static void ionic_link_status_check(struct ionic_lif *lif) 127 { 128 struct net_device *netdev = lif->netdev; 129 u16 link_status; 130 bool link_up; 131 132 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 133 return; 134 135 /* Don't put carrier back up if we're in a broken state */ 136 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) { 137 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 138 return; 139 } 140 141 link_status = le16_to_cpu(lif->info->status.link_status); 142 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 143 144 if (link_up) { 145 int err = 0; 146 147 if (netdev->flags & IFF_UP && netif_running(netdev)) { 148 mutex_lock(&lif->queue_lock); 149 err = ionic_start_queues(lif); 150 if (err && err != -EBUSY) { 151 netdev_err(lif->netdev, 152 "Failed to start queues: %d\n", err); 153 set_bit(IONIC_LIF_F_BROKEN, lif->state); 154 netif_carrier_off(lif->netdev); 155 } 156 mutex_unlock(&lif->queue_lock); 157 } 158 159 if (!err && !netif_carrier_ok(netdev)) { 160 ionic_port_identify(lif->ionic); 161 netdev_info(netdev, "Link up - %d Gbps\n", 162 le32_to_cpu(lif->info->status.link_speed) / 1000); 163 netif_carrier_on(netdev); 164 } 165 } else { 166 if (netif_carrier_ok(netdev)) { 167 netdev_info(netdev, "Link down\n"); 168 netif_carrier_off(netdev); 169 } 170 171 if (netdev->flags & IFF_UP && netif_running(netdev)) { 172 mutex_lock(&lif->queue_lock); 173 ionic_stop_queues(lif); 174 mutex_unlock(&lif->queue_lock); 175 } 176 } 177 178 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 179 } 180 181 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) 182 { 183 struct ionic_deferred_work *work; 184 185 /* we only need one request outstanding at a time */ 186 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 187 return; 188 189 if (!can_sleep) { 190 work = kzalloc(sizeof(*work), GFP_ATOMIC); 191 if (!work) { 192 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 193 return; 194 } 195 196 work->type = IONIC_DW_TYPE_LINK_STATUS; 197 ionic_lif_deferred_enqueue(&lif->deferred, work); 198 } else { 199 ionic_link_status_check(lif); 200 } 201 } 202 203 static irqreturn_t ionic_isr(int irq, void *data) 204 { 205 struct napi_struct *napi = data; 206 207 napi_schedule_irqoff(napi); 208 209 return IRQ_HANDLED; 210 } 211 212 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 213 { 214 struct ionic_intr_info *intr = &qcq->intr; 215 struct device *dev = lif->ionic->dev; 216 struct ionic_queue *q = &qcq->q; 217 const char *name; 218 219 if (lif->registered) 220 name = lif->netdev->name; 221 else 222 name = dev_name(dev); 223 224 snprintf(intr->name, sizeof(intr->name), 225 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 226 227 return devm_request_irq(dev, intr->vector, ionic_isr, 228 0, intr->name, &qcq->napi); 229 } 230 231 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 232 { 233 struct ionic *ionic = lif->ionic; 234 int index; 235 236 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 237 if (index == ionic->nintrs) { 238 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 239 __func__, index, ionic->nintrs); 240 return -ENOSPC; 241 } 242 243 set_bit(index, ionic->intrs); 244 ionic_intr_init(&ionic->idev, intr, index); 245 246 return 0; 247 } 248 249 static void ionic_intr_free(struct ionic *ionic, int index) 250 { 251 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) 252 clear_bit(index, ionic->intrs); 253 } 254 255 static int ionic_qcq_enable(struct ionic_qcq *qcq) 256 { 257 struct ionic_queue *q = &qcq->q; 258 struct ionic_lif *lif = q->lif; 259 struct ionic_dev *idev; 260 struct device *dev; 261 262 struct ionic_admin_ctx ctx = { 263 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 264 .cmd.q_control = { 265 .opcode = IONIC_CMD_Q_CONTROL, 266 .lif_index = cpu_to_le16(lif->index), 267 .type = q->type, 268 .index = cpu_to_le32(q->index), 269 .oper = IONIC_Q_ENABLE, 270 }, 271 }; 272 273 idev = &lif->ionic->idev; 274 dev = lif->ionic->dev; 275 276 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 277 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 278 279 if (qcq->flags & IONIC_QCQ_F_INTR) { 280 irq_set_affinity_hint(qcq->intr.vector, 281 &qcq->intr.affinity_mask); 282 napi_enable(&qcq->napi); 283 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 284 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 285 IONIC_INTR_MASK_CLEAR); 286 } 287 288 return ionic_adminq_post_wait(lif, &ctx); 289 } 290 291 static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) 292 { 293 struct ionic_queue *q; 294 struct ionic_lif *lif; 295 int err = 0; 296 297 struct ionic_admin_ctx ctx = { 298 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 299 .cmd.q_control = { 300 .opcode = IONIC_CMD_Q_CONTROL, 301 .oper = IONIC_Q_DISABLE, 302 }, 303 }; 304 305 if (!qcq) 306 return -ENXIO; 307 308 q = &qcq->q; 309 lif = q->lif; 310 311 if (qcq->flags & IONIC_QCQ_F_INTR) { 312 struct ionic_dev *idev = &lif->ionic->idev; 313 314 cancel_work_sync(&qcq->dim.work); 315 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 316 IONIC_INTR_MASK_SET); 317 synchronize_irq(qcq->intr.vector); 318 irq_set_affinity_hint(qcq->intr.vector, NULL); 319 napi_disable(&qcq->napi); 320 } 321 322 if (send_to_hw) { 323 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); 324 ctx.cmd.q_control.type = q->type; 325 ctx.cmd.q_control.index = cpu_to_le32(q->index); 326 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", 327 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 328 329 err = ionic_adminq_post_wait(lif, &ctx); 330 } 331 332 return err; 333 } 334 335 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 336 { 337 struct ionic_dev *idev = &lif->ionic->idev; 338 339 if (!qcq) 340 return; 341 342 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 343 return; 344 345 if (qcq->flags & IONIC_QCQ_F_INTR) { 346 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 347 IONIC_INTR_MASK_SET); 348 netif_napi_del(&qcq->napi); 349 } 350 351 qcq->flags &= ~IONIC_QCQ_F_INITED; 352 } 353 354 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 355 { 356 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) 357 return; 358 359 irq_set_affinity_hint(qcq->intr.vector, NULL); 360 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); 361 qcq->intr.vector = 0; 362 ionic_intr_free(lif->ionic, qcq->intr.index); 363 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 364 } 365 366 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 367 { 368 struct device *dev = lif->ionic->dev; 369 370 if (!qcq) 371 return; 372 373 ionic_debugfs_del_qcq(qcq); 374 375 if (qcq->q_base) { 376 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); 377 qcq->q_base = NULL; 378 qcq->q_base_pa = 0; 379 } 380 381 if (qcq->cq_base) { 382 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); 383 qcq->cq_base = NULL; 384 qcq->cq_base_pa = 0; 385 } 386 387 if (qcq->sg_base) { 388 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); 389 qcq->sg_base = NULL; 390 qcq->sg_base_pa = 0; 391 } 392 393 ionic_qcq_intr_free(lif, qcq); 394 395 if (qcq->cq.info) { 396 devm_kfree(dev, qcq->cq.info); 397 qcq->cq.info = NULL; 398 } 399 if (qcq->q.info) { 400 devm_kfree(dev, qcq->q.info); 401 qcq->q.info = NULL; 402 } 403 } 404 405 static void ionic_qcqs_free(struct ionic_lif *lif) 406 { 407 struct device *dev = lif->ionic->dev; 408 struct ionic_qcq *adminqcq; 409 unsigned long irqflags; 410 411 if (lif->notifyqcq) { 412 ionic_qcq_free(lif, lif->notifyqcq); 413 devm_kfree(dev, lif->notifyqcq); 414 lif->notifyqcq = NULL; 415 } 416 417 if (lif->adminqcq) { 418 spin_lock_irqsave(&lif->adminq_lock, irqflags); 419 adminqcq = READ_ONCE(lif->adminqcq); 420 lif->adminqcq = NULL; 421 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 422 if (adminqcq) { 423 ionic_qcq_free(lif, adminqcq); 424 devm_kfree(dev, adminqcq); 425 } 426 } 427 428 if (lif->rxqcqs) { 429 devm_kfree(dev, lif->rxqstats); 430 lif->rxqstats = NULL; 431 devm_kfree(dev, lif->rxqcqs); 432 lif->rxqcqs = NULL; 433 } 434 435 if (lif->txqcqs) { 436 devm_kfree(dev, lif->txqstats); 437 lif->txqstats = NULL; 438 devm_kfree(dev, lif->txqcqs); 439 lif->txqcqs = NULL; 440 } 441 } 442 443 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 444 struct ionic_qcq *n_qcq) 445 { 446 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { 447 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index); 448 n_qcq->flags &= ~IONIC_QCQ_F_INTR; 449 } 450 451 n_qcq->intr.vector = src_qcq->intr.vector; 452 n_qcq->intr.index = src_qcq->intr.index; 453 } 454 455 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) 456 { 457 int err; 458 459 if (!(qcq->flags & IONIC_QCQ_F_INTR)) { 460 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 461 return 0; 462 } 463 464 err = ionic_intr_alloc(lif, &qcq->intr); 465 if (err) { 466 netdev_warn(lif->netdev, "no intr for %s: %d\n", 467 qcq->q.name, err); 468 goto err_out; 469 } 470 471 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index); 472 if (err < 0) { 473 netdev_warn(lif->netdev, "no vector for %s: %d\n", 474 qcq->q.name, err); 475 goto err_out_free_intr; 476 } 477 qcq->intr.vector = err; 478 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, 479 IONIC_INTR_MASK_SET); 480 481 err = ionic_request_irq(lif, qcq); 482 if (err) { 483 netdev_warn(lif->netdev, "irq request failed %d\n", err); 484 goto err_out_free_intr; 485 } 486 487 /* try to get the irq on the local numa node first */ 488 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index, 489 dev_to_node(lif->ionic->dev)); 490 if (qcq->intr.cpu != -1) 491 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask); 492 493 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); 494 return 0; 495 496 err_out_free_intr: 497 ionic_intr_free(lif->ionic, qcq->intr.index); 498 err_out: 499 return err; 500 } 501 502 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 503 unsigned int index, 504 const char *name, unsigned int flags, 505 unsigned int num_descs, unsigned int desc_size, 506 unsigned int cq_desc_size, 507 unsigned int sg_desc_size, 508 unsigned int pid, struct ionic_qcq **qcq) 509 { 510 struct ionic_dev *idev = &lif->ionic->idev; 511 struct device *dev = lif->ionic->dev; 512 void *q_base, *cq_base, *sg_base; 513 dma_addr_t cq_base_pa = 0; 514 dma_addr_t sg_base_pa = 0; 515 dma_addr_t q_base_pa = 0; 516 struct ionic_qcq *new; 517 int err; 518 519 *qcq = NULL; 520 521 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 522 if (!new) { 523 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 524 err = -ENOMEM; 525 goto err_out; 526 } 527 528 new->q.dev = dev; 529 new->flags = flags; 530 531 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info), 532 GFP_KERNEL); 533 if (!new->q.info) { 534 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 535 err = -ENOMEM; 536 goto err_out_free_qcq; 537 } 538 539 new->q.type = type; 540 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; 541 542 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 543 desc_size, sg_desc_size, pid); 544 if (err) { 545 netdev_err(lif->netdev, "Cannot initialize queue\n"); 546 goto err_out_free_q_info; 547 } 548 549 err = ionic_alloc_qcq_interrupt(lif, new); 550 if (err) 551 goto err_out; 552 553 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info), 554 GFP_KERNEL); 555 if (!new->cq.info) { 556 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 557 err = -ENOMEM; 558 goto err_out_free_irq; 559 } 560 561 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 562 if (err) { 563 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 564 goto err_out_free_cq_info; 565 } 566 567 if (flags & IONIC_QCQ_F_NOTIFYQ) { 568 int q_size, cq_size; 569 570 /* q & cq need to be contiguous in case of notifyq */ 571 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); 572 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE); 573 574 new->q_size = PAGE_SIZE + q_size + cq_size; 575 new->q_base = dma_alloc_coherent(dev, new->q_size, 576 &new->q_base_pa, GFP_KERNEL); 577 if (!new->q_base) { 578 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); 579 err = -ENOMEM; 580 goto err_out_free_cq_info; 581 } 582 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 583 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 584 ionic_q_map(&new->q, q_base, q_base_pa); 585 586 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE); 587 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); 588 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 589 ionic_cq_bind(&new->cq, &new->q); 590 } else { 591 new->q_size = PAGE_SIZE + (num_descs * desc_size); 592 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, 593 GFP_KERNEL); 594 if (!new->q_base) { 595 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 596 err = -ENOMEM; 597 goto err_out_free_cq_info; 598 } 599 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 600 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 601 ionic_q_map(&new->q, q_base, q_base_pa); 602 603 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); 604 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, 605 GFP_KERNEL); 606 if (!new->cq_base) { 607 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); 608 err = -ENOMEM; 609 goto err_out_free_q; 610 } 611 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); 612 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); 613 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 614 ionic_cq_bind(&new->cq, &new->q); 615 } 616 617 if (flags & IONIC_QCQ_F_SG) { 618 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); 619 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, 620 GFP_KERNEL); 621 if (!new->sg_base) { 622 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); 623 err = -ENOMEM; 624 goto err_out_free_cq; 625 } 626 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); 627 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); 628 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 629 } 630 631 INIT_WORK(&new->dim.work, ionic_dim_work); 632 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 633 634 *qcq = new; 635 636 return 0; 637 638 err_out_free_cq: 639 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); 640 err_out_free_q: 641 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); 642 err_out_free_cq_info: 643 devm_kfree(dev, new->cq.info); 644 err_out_free_irq: 645 if (flags & IONIC_QCQ_F_INTR) { 646 devm_free_irq(dev, new->intr.vector, &new->napi); 647 ionic_intr_free(lif->ionic, new->intr.index); 648 } 649 err_out_free_q_info: 650 devm_kfree(dev, new->q.info); 651 err_out_free_qcq: 652 devm_kfree(dev, new); 653 err_out: 654 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 655 return err; 656 } 657 658 static int ionic_qcqs_alloc(struct ionic_lif *lif) 659 { 660 struct device *dev = lif->ionic->dev; 661 unsigned int flags; 662 int err; 663 664 flags = IONIC_QCQ_F_INTR; 665 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 666 IONIC_ADMINQ_LENGTH, 667 sizeof(struct ionic_admin_cmd), 668 sizeof(struct ionic_admin_comp), 669 0, lif->kern_pid, &lif->adminqcq); 670 if (err) 671 return err; 672 ionic_debugfs_add_qcq(lif, lif->adminqcq); 673 674 if (lif->ionic->nnqs_per_lif) { 675 flags = IONIC_QCQ_F_NOTIFYQ; 676 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 677 flags, IONIC_NOTIFYQ_LENGTH, 678 sizeof(struct ionic_notifyq_cmd), 679 sizeof(union ionic_notifyq_comp), 680 0, lif->kern_pid, &lif->notifyqcq); 681 if (err) 682 goto err_out; 683 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 684 685 /* Let the notifyq ride on the adminq interrupt */ 686 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 687 } 688 689 err = -ENOMEM; 690 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 691 sizeof(*lif->txqcqs), GFP_KERNEL); 692 if (!lif->txqcqs) 693 goto err_out; 694 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 695 sizeof(*lif->rxqcqs), GFP_KERNEL); 696 if (!lif->rxqcqs) 697 goto err_out; 698 699 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1, 700 sizeof(*lif->txqstats), GFP_KERNEL); 701 if (!lif->txqstats) 702 goto err_out; 703 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1, 704 sizeof(*lif->rxqstats), GFP_KERNEL); 705 if (!lif->rxqstats) 706 goto err_out; 707 708 return 0; 709 710 err_out: 711 ionic_qcqs_free(lif); 712 return err; 713 } 714 715 static void ionic_qcq_sanitize(struct ionic_qcq *qcq) 716 { 717 qcq->q.tail_idx = 0; 718 qcq->q.head_idx = 0; 719 qcq->cq.tail_idx = 0; 720 qcq->cq.done_color = 1; 721 memset(qcq->q_base, 0, qcq->q_size); 722 memset(qcq->cq_base, 0, qcq->cq_size); 723 memset(qcq->sg_base, 0, qcq->sg_size); 724 } 725 726 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 727 { 728 struct device *dev = lif->ionic->dev; 729 struct ionic_queue *q = &qcq->q; 730 struct ionic_cq *cq = &qcq->cq; 731 struct ionic_admin_ctx ctx = { 732 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 733 .cmd.q_init = { 734 .opcode = IONIC_CMD_Q_INIT, 735 .lif_index = cpu_to_le16(lif->index), 736 .type = q->type, 737 .ver = lif->qtype_info[q->type].version, 738 .index = cpu_to_le32(q->index), 739 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 740 IONIC_QINIT_F_SG), 741 .pid = cpu_to_le16(q->pid), 742 .ring_size = ilog2(q->num_descs), 743 .ring_base = cpu_to_le64(q->base_pa), 744 .cq_ring_base = cpu_to_le64(cq->base_pa), 745 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 746 .features = cpu_to_le64(q->features), 747 }, 748 }; 749 unsigned int intr_index; 750 int err; 751 752 intr_index = qcq->intr.index; 753 754 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); 755 756 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 757 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 758 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 759 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 760 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 761 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 762 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 763 764 ionic_qcq_sanitize(qcq); 765 766 err = ionic_adminq_post_wait(lif, &ctx); 767 if (err) 768 return err; 769 770 q->hw_type = ctx.comp.q_init.hw_type; 771 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 772 q->dbval = IONIC_DBELL_QID(q->hw_index); 773 774 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 775 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 776 777 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 778 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi, 779 NAPI_POLL_WEIGHT); 780 781 qcq->flags |= IONIC_QCQ_F_INITED; 782 783 return 0; 784 } 785 786 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 787 { 788 struct device *dev = lif->ionic->dev; 789 struct ionic_queue *q = &qcq->q; 790 struct ionic_cq *cq = &qcq->cq; 791 struct ionic_admin_ctx ctx = { 792 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 793 .cmd.q_init = { 794 .opcode = IONIC_CMD_Q_INIT, 795 .lif_index = cpu_to_le16(lif->index), 796 .type = q->type, 797 .ver = lif->qtype_info[q->type].version, 798 .index = cpu_to_le32(q->index), 799 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 800 IONIC_QINIT_F_SG), 801 .intr_index = cpu_to_le16(cq->bound_intr->index), 802 .pid = cpu_to_le16(q->pid), 803 .ring_size = ilog2(q->num_descs), 804 .ring_base = cpu_to_le64(q->base_pa), 805 .cq_ring_base = cpu_to_le64(cq->base_pa), 806 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 807 .features = cpu_to_le64(q->features), 808 }, 809 }; 810 int err; 811 812 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 813 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 814 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 815 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 816 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 817 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 818 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 819 820 ionic_qcq_sanitize(qcq); 821 822 err = ionic_adminq_post_wait(lif, &ctx); 823 if (err) 824 return err; 825 826 q->hw_type = ctx.comp.q_init.hw_type; 827 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 828 q->dbval = IONIC_DBELL_QID(q->hw_index); 829 830 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 831 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 832 833 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 834 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, 835 NAPI_POLL_WEIGHT); 836 else 837 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi, 838 NAPI_POLL_WEIGHT); 839 840 qcq->flags |= IONIC_QCQ_F_INITED; 841 842 return 0; 843 } 844 845 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) 846 { 847 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 848 unsigned int txq_i, flags; 849 struct ionic_qcq *txq; 850 u64 features; 851 int err; 852 853 mutex_lock(&lif->queue_lock); 854 855 if (lif->hwstamp_txq) 856 goto out; 857 858 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; 859 860 num_desc = IONIC_MIN_TXRX_DESC; 861 desc_sz = sizeof(struct ionic_txq_desc); 862 comp_sz = 2 * sizeof(struct ionic_txq_comp); 863 864 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 865 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1)) 866 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 867 else 868 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 869 870 txq_i = lif->ionic->ntxqs_per_lif; 871 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 872 873 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, 874 num_desc, desc_sz, comp_sz, sg_desc_sz, 875 lif->kern_pid, &txq); 876 if (err) 877 goto err_qcq_alloc; 878 879 txq->q.features = features; 880 881 ionic_link_qcq_interrupts(lif->adminqcq, txq); 882 ionic_debugfs_add_qcq(lif, txq); 883 884 lif->hwstamp_txq = txq; 885 886 if (netif_running(lif->netdev)) { 887 err = ionic_lif_txq_init(lif, txq); 888 if (err) 889 goto err_qcq_init; 890 891 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 892 err = ionic_qcq_enable(txq); 893 if (err) 894 goto err_qcq_enable; 895 } 896 } 897 898 out: 899 mutex_unlock(&lif->queue_lock); 900 901 return 0; 902 903 err_qcq_enable: 904 ionic_lif_qcq_deinit(lif, txq); 905 err_qcq_init: 906 lif->hwstamp_txq = NULL; 907 ionic_debugfs_del_qcq(txq); 908 ionic_qcq_free(lif, txq); 909 devm_kfree(lif->ionic->dev, txq); 910 err_qcq_alloc: 911 mutex_unlock(&lif->queue_lock); 912 return err; 913 } 914 915 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) 916 { 917 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz; 918 unsigned int rxq_i, flags; 919 struct ionic_qcq *rxq; 920 u64 features; 921 int err; 922 923 mutex_lock(&lif->queue_lock); 924 925 if (lif->hwstamp_rxq) 926 goto out; 927 928 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 929 930 num_desc = IONIC_MIN_TXRX_DESC; 931 desc_sz = sizeof(struct ionic_rxq_desc); 932 comp_sz = 2 * sizeof(struct ionic_rxq_comp); 933 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 934 935 rxq_i = lif->ionic->nrxqs_per_lif; 936 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; 937 938 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, 939 num_desc, desc_sz, comp_sz, sg_desc_sz, 940 lif->kern_pid, &rxq); 941 if (err) 942 goto err_qcq_alloc; 943 944 rxq->q.features = features; 945 946 ionic_link_qcq_interrupts(lif->adminqcq, rxq); 947 ionic_debugfs_add_qcq(lif, rxq); 948 949 lif->hwstamp_rxq = rxq; 950 951 if (netif_running(lif->netdev)) { 952 err = ionic_lif_rxq_init(lif, rxq); 953 if (err) 954 goto err_qcq_init; 955 956 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 957 ionic_rx_fill(&rxq->q); 958 err = ionic_qcq_enable(rxq); 959 if (err) 960 goto err_qcq_enable; 961 } 962 } 963 964 out: 965 mutex_unlock(&lif->queue_lock); 966 967 return 0; 968 969 err_qcq_enable: 970 ionic_lif_qcq_deinit(lif, rxq); 971 err_qcq_init: 972 lif->hwstamp_rxq = NULL; 973 ionic_debugfs_del_qcq(rxq); 974 ionic_qcq_free(lif, rxq); 975 devm_kfree(lif->ionic->dev, rxq); 976 err_qcq_alloc: 977 mutex_unlock(&lif->queue_lock); 978 return err; 979 } 980 981 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all) 982 { 983 struct ionic_queue_params qparam; 984 985 ionic_init_queue_params(lif, &qparam); 986 987 if (rx_all) 988 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; 989 else 990 qparam.rxq_features = 0; 991 992 /* if we're not running, just set the values and return */ 993 if (!netif_running(lif->netdev)) { 994 lif->rxq_features = qparam.rxq_features; 995 return 0; 996 } 997 998 return ionic_reconfigure_queues(lif, &qparam); 999 } 1000 1001 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode) 1002 { 1003 struct ionic_admin_ctx ctx = { 1004 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1005 .cmd.lif_setattr = { 1006 .opcode = IONIC_CMD_LIF_SETATTR, 1007 .index = cpu_to_le16(lif->index), 1008 .attr = IONIC_LIF_ATTR_TXSTAMP, 1009 .txstamp_mode = cpu_to_le16(txstamp_mode), 1010 }, 1011 }; 1012 1013 return ionic_adminq_post_wait(lif, &ctx); 1014 } 1015 1016 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif) 1017 { 1018 struct ionic_admin_ctx ctx = { 1019 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1020 .cmd.rx_filter_del = { 1021 .opcode = IONIC_CMD_RX_FILTER_DEL, 1022 .lif_index = cpu_to_le16(lif->index), 1023 }, 1024 }; 1025 struct ionic_rx_filter *f; 1026 u32 filter_id; 1027 int err; 1028 1029 spin_lock_bh(&lif->rx_filters.lock); 1030 1031 f = ionic_rx_filter_rxsteer(lif); 1032 if (!f) { 1033 spin_unlock_bh(&lif->rx_filters.lock); 1034 return; 1035 } 1036 1037 filter_id = f->filter_id; 1038 ionic_rx_filter_free(lif, f); 1039 1040 spin_unlock_bh(&lif->rx_filters.lock); 1041 1042 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id); 1043 1044 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id); 1045 1046 err = ionic_adminq_post_wait(lif, &ctx); 1047 if (err && err != -EEXIST) 1048 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id); 1049 } 1050 1051 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1052 { 1053 struct ionic_admin_ctx ctx = { 1054 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1055 .cmd.rx_filter_add = { 1056 .opcode = IONIC_CMD_RX_FILTER_ADD, 1057 .lif_index = cpu_to_le16(lif->index), 1058 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS), 1059 .pkt_class = cpu_to_le64(pkt_class), 1060 }, 1061 }; 1062 u8 qtype; 1063 u32 qid; 1064 int err; 1065 1066 if (!lif->hwstamp_rxq) 1067 return -EINVAL; 1068 1069 qtype = lif->hwstamp_rxq->q.type; 1070 ctx.cmd.rx_filter_add.qtype = qtype; 1071 1072 qid = lif->hwstamp_rxq->q.index; 1073 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid); 1074 1075 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n"); 1076 err = ionic_adminq_post_wait(lif, &ctx); 1077 if (err && err != -EEXIST) 1078 return err; 1079 1080 return ionic_rx_filter_save(lif, 0, qid, 0, &ctx); 1081 } 1082 1083 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) 1084 { 1085 ionic_lif_del_hwstamp_rxfilt(lif); 1086 1087 if (!pkt_class) 1088 return 0; 1089 1090 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); 1091 } 1092 1093 static bool ionic_notifyq_service(struct ionic_cq *cq, 1094 struct ionic_cq_info *cq_info) 1095 { 1096 union ionic_notifyq_comp *comp = cq_info->cq_desc; 1097 struct ionic_deferred_work *work; 1098 struct net_device *netdev; 1099 struct ionic_queue *q; 1100 struct ionic_lif *lif; 1101 u64 eid; 1102 1103 q = cq->bound_q; 1104 lif = q->info[0].cb_arg; 1105 netdev = lif->netdev; 1106 eid = le64_to_cpu(comp->event.eid); 1107 1108 /* Have we run out of new completions to process? */ 1109 if ((s64)(eid - lif->last_eid) <= 0) 1110 return false; 1111 1112 lif->last_eid = eid; 1113 1114 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 1115 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 1116 comp, sizeof(*comp), true); 1117 1118 switch (le16_to_cpu(comp->event.ecode)) { 1119 case IONIC_EVENT_LINK_CHANGE: 1120 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 1121 break; 1122 case IONIC_EVENT_RESET: 1123 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1124 if (!work) { 1125 netdev_err(lif->netdev, "Reset event dropped\n"); 1126 } else { 1127 work->type = IONIC_DW_TYPE_LIF_RESET; 1128 ionic_lif_deferred_enqueue(&lif->deferred, work); 1129 } 1130 break; 1131 default: 1132 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", 1133 comp->event.ecode, eid); 1134 break; 1135 } 1136 1137 return true; 1138 } 1139 1140 static bool ionic_adminq_service(struct ionic_cq *cq, 1141 struct ionic_cq_info *cq_info) 1142 { 1143 struct ionic_admin_comp *comp = cq_info->cq_desc; 1144 1145 if (!color_match(comp->color, cq->done_color)) 1146 return false; 1147 1148 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 1149 1150 return true; 1151 } 1152 1153 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 1154 { 1155 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; 1156 struct ionic_lif *lif = napi_to_cq(napi)->lif; 1157 struct ionic_dev *idev = &lif->ionic->idev; 1158 unsigned long irqflags; 1159 unsigned int flags = 0; 1160 int rx_work = 0; 1161 int tx_work = 0; 1162 int n_work = 0; 1163 int a_work = 0; 1164 int work_done; 1165 int credits; 1166 1167 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) 1168 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, 1169 ionic_notifyq_service, NULL, NULL); 1170 1171 spin_lock_irqsave(&lif->adminq_lock, irqflags); 1172 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) 1173 a_work = ionic_cq_service(&lif->adminqcq->cq, budget, 1174 ionic_adminq_service, NULL, NULL); 1175 spin_unlock_irqrestore(&lif->adminq_lock, irqflags); 1176 1177 if (lif->hwstamp_rxq) 1178 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget, 1179 ionic_rx_service, NULL, NULL); 1180 1181 if (lif->hwstamp_txq) 1182 tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget, 1183 ionic_tx_service, NULL, NULL); 1184 1185 work_done = max(max(n_work, a_work), max(rx_work, tx_work)); 1186 if (work_done < budget && napi_complete_done(napi, work_done)) { 1187 flags |= IONIC_INTR_CRED_UNMASK; 1188 intr->rearm_count++; 1189 } 1190 1191 if (work_done || flags) { 1192 flags |= IONIC_INTR_CRED_RESET_COALESCE; 1193 credits = n_work + a_work + rx_work + tx_work; 1194 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); 1195 } 1196 1197 return work_done; 1198 } 1199 1200 void ionic_get_stats64(struct net_device *netdev, 1201 struct rtnl_link_stats64 *ns) 1202 { 1203 struct ionic_lif *lif = netdev_priv(netdev); 1204 struct ionic_lif_stats *ls; 1205 1206 memset(ns, 0, sizeof(*ns)); 1207 ls = &lif->info->stats; 1208 1209 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 1210 le64_to_cpu(ls->rx_mcast_packets) + 1211 le64_to_cpu(ls->rx_bcast_packets); 1212 1213 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 1214 le64_to_cpu(ls->tx_mcast_packets) + 1215 le64_to_cpu(ls->tx_bcast_packets); 1216 1217 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 1218 le64_to_cpu(ls->rx_mcast_bytes) + 1219 le64_to_cpu(ls->rx_bcast_bytes); 1220 1221 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 1222 le64_to_cpu(ls->tx_mcast_bytes) + 1223 le64_to_cpu(ls->tx_bcast_bytes); 1224 1225 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 1226 le64_to_cpu(ls->rx_mcast_drop_packets) + 1227 le64_to_cpu(ls->rx_bcast_drop_packets); 1228 1229 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 1230 le64_to_cpu(ls->tx_mcast_drop_packets) + 1231 le64_to_cpu(ls->tx_bcast_drop_packets); 1232 1233 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 1234 1235 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 1236 1237 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 1238 le64_to_cpu(ls->rx_queue_disabled) + 1239 le64_to_cpu(ls->rx_desc_fetch_error) + 1240 le64_to_cpu(ls->rx_desc_data_error); 1241 1242 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 1243 le64_to_cpu(ls->tx_queue_disabled) + 1244 le64_to_cpu(ls->tx_desc_fetch_error) + 1245 le64_to_cpu(ls->tx_desc_data_error); 1246 1247 ns->rx_errors = ns->rx_over_errors + 1248 ns->rx_missed_errors; 1249 1250 ns->tx_errors = ns->tx_aborted_errors; 1251 } 1252 1253 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) 1254 { 1255 struct ionic_admin_ctx ctx = { 1256 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1257 .cmd.rx_filter_add = { 1258 .opcode = IONIC_CMD_RX_FILTER_ADD, 1259 .lif_index = cpu_to_le16(lif->index), 1260 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 1261 }, 1262 }; 1263 struct ionic_rx_filter *f; 1264 int err; 1265 1266 /* don't bother if we already have it */ 1267 spin_lock_bh(&lif->rx_filters.lock); 1268 f = ionic_rx_filter_by_addr(lif, addr); 1269 spin_unlock_bh(&lif->rx_filters.lock); 1270 if (f) 1271 return 0; 1272 1273 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); 1274 1275 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 1276 err = ionic_adminq_post_wait(lif, &ctx); 1277 if (err && err != -EEXIST) 1278 return err; 1279 1280 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1281 } 1282 1283 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) 1284 { 1285 struct ionic_admin_ctx ctx = { 1286 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1287 .cmd.rx_filter_del = { 1288 .opcode = IONIC_CMD_RX_FILTER_DEL, 1289 .lif_index = cpu_to_le16(lif->index), 1290 }, 1291 }; 1292 struct ionic_rx_filter *f; 1293 int err; 1294 1295 spin_lock_bh(&lif->rx_filters.lock); 1296 f = ionic_rx_filter_by_addr(lif, addr); 1297 if (!f) { 1298 spin_unlock_bh(&lif->rx_filters.lock); 1299 return -ENOENT; 1300 } 1301 1302 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", 1303 addr, f->filter_id); 1304 1305 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1306 ionic_rx_filter_free(lif, f); 1307 spin_unlock_bh(&lif->rx_filters.lock); 1308 1309 err = ionic_adminq_post_wait(lif, &ctx); 1310 if (err && err != -EEXIST) 1311 return err; 1312 1313 return 0; 1314 } 1315 1316 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) 1317 { 1318 unsigned int nmfilters; 1319 unsigned int nufilters; 1320 1321 if (add) { 1322 /* Do we have space for this filter? We test the counters 1323 * here before checking the need for deferral so that we 1324 * can return an overflow error to the stack. 1325 */ 1326 nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); 1327 nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1328 1329 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) 1330 lif->nmcast++; 1331 else if (!is_multicast_ether_addr(addr) && 1332 lif->nucast < nufilters) 1333 lif->nucast++; 1334 else 1335 return -ENOSPC; 1336 } else { 1337 if (is_multicast_ether_addr(addr) && lif->nmcast) 1338 lif->nmcast--; 1339 else if (!is_multicast_ether_addr(addr) && lif->nucast) 1340 lif->nucast--; 1341 } 1342 1343 netdev_dbg(lif->netdev, "rx_filter %s %pM\n", 1344 add ? "add" : "del", addr); 1345 if (add) 1346 return ionic_lif_addr_add(lif, addr); 1347 else 1348 return ionic_lif_addr_del(lif, addr); 1349 1350 return 0; 1351 } 1352 1353 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 1354 { 1355 return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR); 1356 } 1357 1358 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1359 { 1360 return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR); 1361 } 1362 1363 static void ionic_lif_rx_mode(struct ionic_lif *lif) 1364 { 1365 struct net_device *netdev = lif->netdev; 1366 unsigned int nfilters; 1367 unsigned int nd_flags; 1368 char buf[128]; 1369 u16 rx_mode; 1370 int i; 1371 #define REMAIN(__x) (sizeof(buf) - (__x)) 1372 1373 mutex_lock(&lif->config_lock); 1374 1375 /* grab the flags once for local use */ 1376 nd_flags = netdev->flags; 1377 1378 rx_mode = IONIC_RX_MODE_F_UNICAST; 1379 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1380 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1381 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1382 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1383 1384 /* sync unicast addresses 1385 * next check to see if we're in an overflow state 1386 * if so, we track that we overflowed and enable NIC PROMISC 1387 * else if the overflow is set and not needed 1388 * we remove our overflow flag and check the netdev flags 1389 * to see if we can disable NIC PROMISC 1390 */ 1391 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1392 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1393 if (netdev_uc_count(netdev) + 1 > nfilters) { 1394 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1395 lif->uc_overflow = true; 1396 } else if (lif->uc_overflow) { 1397 lif->uc_overflow = false; 1398 if (!(nd_flags & IFF_PROMISC)) 1399 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1400 } 1401 1402 /* same for multicast */ 1403 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1404 nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); 1405 if (netdev_mc_count(netdev) > nfilters) { 1406 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1407 lif->mc_overflow = true; 1408 } else if (lif->mc_overflow) { 1409 lif->mc_overflow = false; 1410 if (!(nd_flags & IFF_ALLMULTI)) 1411 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1412 } 1413 1414 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 1415 lif->rx_mode, rx_mode); 1416 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 1417 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 1418 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 1419 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 1420 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1421 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1422 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1423 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1424 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1425 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1426 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER) 1427 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER"); 1428 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf); 1429 1430 if (lif->rx_mode != rx_mode) { 1431 struct ionic_admin_ctx ctx = { 1432 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1433 .cmd.rx_mode_set = { 1434 .opcode = IONIC_CMD_RX_MODE_SET, 1435 .lif_index = cpu_to_le16(lif->index), 1436 }, 1437 }; 1438 int err; 1439 1440 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode); 1441 err = ionic_adminq_post_wait(lif, &ctx); 1442 if (err) 1443 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n", 1444 rx_mode, err); 1445 else 1446 lif->rx_mode = rx_mode; 1447 } 1448 1449 mutex_unlock(&lif->config_lock); 1450 } 1451 1452 static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) 1453 { 1454 struct ionic_lif *lif = netdev_priv(netdev); 1455 struct ionic_deferred_work *work; 1456 1457 if (!can_sleep) { 1458 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1459 if (!work) { 1460 netdev_err(lif->netdev, "rxmode change dropped\n"); 1461 return; 1462 } 1463 work->type = IONIC_DW_TYPE_RX_MODE; 1464 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1465 ionic_lif_deferred_enqueue(&lif->deferred, work); 1466 } else { 1467 ionic_lif_rx_mode(lif); 1468 } 1469 } 1470 1471 static void ionic_ndo_set_rx_mode(struct net_device *netdev) 1472 { 1473 ionic_set_rx_mode(netdev, CAN_NOT_SLEEP); 1474 } 1475 1476 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1477 { 1478 u64 wanted = 0; 1479 1480 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1481 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1482 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1483 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1484 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1485 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1486 if (features & NETIF_F_RXHASH) 1487 wanted |= IONIC_ETH_HW_RX_HASH; 1488 if (features & NETIF_F_RXCSUM) 1489 wanted |= IONIC_ETH_HW_RX_CSUM; 1490 if (features & NETIF_F_SG) 1491 wanted |= IONIC_ETH_HW_TX_SG; 1492 if (features & NETIF_F_HW_CSUM) 1493 wanted |= IONIC_ETH_HW_TX_CSUM; 1494 if (features & NETIF_F_TSO) 1495 wanted |= IONIC_ETH_HW_TSO; 1496 if (features & NETIF_F_TSO6) 1497 wanted |= IONIC_ETH_HW_TSO_IPV6; 1498 if (features & NETIF_F_TSO_ECN) 1499 wanted |= IONIC_ETH_HW_TSO_ECN; 1500 if (features & NETIF_F_GSO_GRE) 1501 wanted |= IONIC_ETH_HW_TSO_GRE; 1502 if (features & NETIF_F_GSO_GRE_CSUM) 1503 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1504 if (features & NETIF_F_GSO_IPXIP4) 1505 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1506 if (features & NETIF_F_GSO_IPXIP6) 1507 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1508 if (features & NETIF_F_GSO_UDP_TUNNEL) 1509 wanted |= IONIC_ETH_HW_TSO_UDP; 1510 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1511 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1512 1513 return cpu_to_le64(wanted); 1514 } 1515 1516 static int ionic_set_nic_features(struct ionic_lif *lif, 1517 netdev_features_t features) 1518 { 1519 struct device *dev = lif->ionic->dev; 1520 struct ionic_admin_ctx ctx = { 1521 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1522 .cmd.lif_setattr = { 1523 .opcode = IONIC_CMD_LIF_SETATTR, 1524 .index = cpu_to_le16(lif->index), 1525 .attr = IONIC_LIF_ATTR_FEATURES, 1526 }, 1527 }; 1528 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1529 IONIC_ETH_HW_VLAN_RX_STRIP | 1530 IONIC_ETH_HW_VLAN_RX_FILTER; 1531 u64 old_hw_features; 1532 int err; 1533 1534 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1535 1536 if (lif->phc) 1537 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP); 1538 1539 err = ionic_adminq_post_wait(lif, &ctx); 1540 if (err) 1541 return err; 1542 1543 old_hw_features = lif->hw_features; 1544 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1545 ctx.comp.lif_setattr.features); 1546 1547 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1548 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1549 1550 if ((vlan_flags & features) && 1551 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1552 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1553 1554 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1555 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1556 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1557 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1558 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1559 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1560 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1561 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1562 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1563 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1564 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1565 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1566 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1567 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1568 if (lif->hw_features & IONIC_ETH_HW_TSO) 1569 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1570 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1571 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1572 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1573 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1574 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1575 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1576 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1577 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1578 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1579 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1580 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1581 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1582 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1583 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1584 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1585 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1586 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP) 1587 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n"); 1588 1589 return 0; 1590 } 1591 1592 static int ionic_init_nic_features(struct ionic_lif *lif) 1593 { 1594 struct net_device *netdev = lif->netdev; 1595 netdev_features_t features; 1596 int err; 1597 1598 /* set up what we expect to support by default */ 1599 features = NETIF_F_HW_VLAN_CTAG_TX | 1600 NETIF_F_HW_VLAN_CTAG_RX | 1601 NETIF_F_HW_VLAN_CTAG_FILTER | 1602 NETIF_F_RXHASH | 1603 NETIF_F_SG | 1604 NETIF_F_HW_CSUM | 1605 NETIF_F_RXCSUM | 1606 NETIF_F_TSO | 1607 NETIF_F_TSO6 | 1608 NETIF_F_TSO_ECN; 1609 1610 err = ionic_set_nic_features(lif, features); 1611 if (err) 1612 return err; 1613 1614 /* tell the netdev what we actually can support */ 1615 netdev->features |= NETIF_F_HIGHDMA; 1616 1617 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1618 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1619 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1620 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1621 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1622 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1623 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1624 netdev->hw_features |= NETIF_F_RXHASH; 1625 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1626 netdev->hw_features |= NETIF_F_SG; 1627 1628 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1629 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1630 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1631 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1632 if (lif->hw_features & IONIC_ETH_HW_TSO) 1633 netdev->hw_enc_features |= NETIF_F_TSO; 1634 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1635 netdev->hw_enc_features |= NETIF_F_TSO6; 1636 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1637 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1638 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1639 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1640 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1641 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1642 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1643 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1644 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1645 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1646 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1647 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1648 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1649 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1650 1651 netdev->hw_features |= netdev->hw_enc_features; 1652 netdev->features |= netdev->hw_features; 1653 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1654 1655 netdev->priv_flags |= IFF_UNICAST_FLT | 1656 IFF_LIVE_ADDR_CHANGE; 1657 1658 return 0; 1659 } 1660 1661 static int ionic_set_features(struct net_device *netdev, 1662 netdev_features_t features) 1663 { 1664 struct ionic_lif *lif = netdev_priv(netdev); 1665 int err; 1666 1667 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1668 __func__, (u64)lif->netdev->features, (u64)features); 1669 1670 err = ionic_set_nic_features(lif, features); 1671 1672 return err; 1673 } 1674 1675 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1676 { 1677 struct sockaddr *addr = sa; 1678 u8 *mac; 1679 int err; 1680 1681 mac = (u8 *)addr->sa_data; 1682 if (ether_addr_equal(netdev->dev_addr, mac)) 1683 return 0; 1684 1685 err = eth_prepare_mac_addr_change(netdev, addr); 1686 if (err) 1687 return err; 1688 1689 if (!is_zero_ether_addr(netdev->dev_addr)) { 1690 netdev_info(netdev, "deleting mac addr %pM\n", 1691 netdev->dev_addr); 1692 ionic_addr_del(netdev, netdev->dev_addr); 1693 } 1694 1695 eth_commit_mac_addr_change(netdev, addr); 1696 netdev_info(netdev, "updating mac addr %pM\n", mac); 1697 1698 return ionic_addr_add(netdev, mac); 1699 } 1700 1701 static void ionic_stop_queues_reconfig(struct ionic_lif *lif) 1702 { 1703 /* Stop and clean the queues before reconfiguration */ 1704 mutex_lock(&lif->queue_lock); 1705 netif_device_detach(lif->netdev); 1706 ionic_stop_queues(lif); 1707 ionic_txrx_deinit(lif); 1708 } 1709 1710 static int ionic_start_queues_reconfig(struct ionic_lif *lif) 1711 { 1712 int err; 1713 1714 /* Re-init the queues after reconfiguration */ 1715 1716 /* The only way txrx_init can fail here is if communication 1717 * with FW is suddenly broken. There's not much we can do 1718 * at this point - error messages have already been printed, 1719 * so we can continue on and the user can eventually do a 1720 * DOWN and UP to try to reset and clear the issue. 1721 */ 1722 err = ionic_txrx_init(lif); 1723 mutex_unlock(&lif->queue_lock); 1724 ionic_link_status_check_request(lif, CAN_SLEEP); 1725 netif_device_attach(lif->netdev); 1726 1727 return err; 1728 } 1729 1730 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1731 { 1732 struct ionic_lif *lif = netdev_priv(netdev); 1733 struct ionic_admin_ctx ctx = { 1734 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1735 .cmd.lif_setattr = { 1736 .opcode = IONIC_CMD_LIF_SETATTR, 1737 .index = cpu_to_le16(lif->index), 1738 .attr = IONIC_LIF_ATTR_MTU, 1739 .mtu = cpu_to_le32(new_mtu), 1740 }, 1741 }; 1742 int err; 1743 1744 err = ionic_adminq_post_wait(lif, &ctx); 1745 if (err) 1746 return err; 1747 1748 /* if we're not running, nothing more to do */ 1749 if (!netif_running(netdev)) { 1750 netdev->mtu = new_mtu; 1751 return 0; 1752 } 1753 1754 ionic_stop_queues_reconfig(lif); 1755 netdev->mtu = new_mtu; 1756 return ionic_start_queues_reconfig(lif); 1757 } 1758 1759 static void ionic_tx_timeout_work(struct work_struct *ws) 1760 { 1761 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1762 1763 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1764 return; 1765 1766 /* if we were stopped before this scheduled job was launched, 1767 * don't bother the queues as they are already stopped. 1768 */ 1769 if (!netif_running(lif->netdev)) 1770 return; 1771 1772 ionic_stop_queues_reconfig(lif); 1773 ionic_start_queues_reconfig(lif); 1774 } 1775 1776 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1777 { 1778 struct ionic_lif *lif = netdev_priv(netdev); 1779 1780 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue); 1781 schedule_work(&lif->tx_timeout_work); 1782 } 1783 1784 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1785 u16 vid) 1786 { 1787 struct ionic_lif *lif = netdev_priv(netdev); 1788 struct ionic_admin_ctx ctx = { 1789 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1790 .cmd.rx_filter_add = { 1791 .opcode = IONIC_CMD_RX_FILTER_ADD, 1792 .lif_index = cpu_to_le16(lif->index), 1793 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), 1794 .vlan.vlan = cpu_to_le16(vid), 1795 }, 1796 }; 1797 int err; 1798 1799 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); 1800 err = ionic_adminq_post_wait(lif, &ctx); 1801 if (err) 1802 return err; 1803 1804 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1805 } 1806 1807 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1808 u16 vid) 1809 { 1810 struct ionic_lif *lif = netdev_priv(netdev); 1811 struct ionic_admin_ctx ctx = { 1812 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1813 .cmd.rx_filter_del = { 1814 .opcode = IONIC_CMD_RX_FILTER_DEL, 1815 .lif_index = cpu_to_le16(lif->index), 1816 }, 1817 }; 1818 struct ionic_rx_filter *f; 1819 1820 spin_lock_bh(&lif->rx_filters.lock); 1821 1822 f = ionic_rx_filter_by_vlan(lif, vid); 1823 if (!f) { 1824 spin_unlock_bh(&lif->rx_filters.lock); 1825 return -ENOENT; 1826 } 1827 1828 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", 1829 vid, f->filter_id); 1830 1831 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1832 ionic_rx_filter_free(lif, f); 1833 spin_unlock_bh(&lif->rx_filters.lock); 1834 1835 return ionic_adminq_post_wait(lif, &ctx); 1836 } 1837 1838 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1839 const u8 *key, const u32 *indir) 1840 { 1841 struct ionic_admin_ctx ctx = { 1842 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1843 .cmd.lif_setattr = { 1844 .opcode = IONIC_CMD_LIF_SETATTR, 1845 .attr = IONIC_LIF_ATTR_RSS, 1846 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1847 }, 1848 }; 1849 unsigned int i, tbl_sz; 1850 1851 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1852 lif->rss_types = types; 1853 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1854 } 1855 1856 if (key) 1857 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1858 1859 if (indir) { 1860 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1861 for (i = 0; i < tbl_sz; i++) 1862 lif->rss_ind_tbl[i] = indir[i]; 1863 } 1864 1865 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1866 IONIC_RSS_HASH_KEY_SIZE); 1867 1868 return ionic_adminq_post_wait(lif, &ctx); 1869 } 1870 1871 static int ionic_lif_rss_init(struct ionic_lif *lif) 1872 { 1873 unsigned int tbl_sz; 1874 unsigned int i; 1875 1876 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1877 IONIC_RSS_TYPE_IPV4_TCP | 1878 IONIC_RSS_TYPE_IPV4_UDP | 1879 IONIC_RSS_TYPE_IPV6 | 1880 IONIC_RSS_TYPE_IPV6_TCP | 1881 IONIC_RSS_TYPE_IPV6_UDP; 1882 1883 /* Fill indirection table with 'default' values */ 1884 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1885 for (i = 0; i < tbl_sz; i++) 1886 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1887 1888 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1889 } 1890 1891 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1892 { 1893 int tbl_sz; 1894 1895 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1896 memset(lif->rss_ind_tbl, 0, tbl_sz); 1897 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1898 1899 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1900 } 1901 1902 static void ionic_lif_quiesce(struct ionic_lif *lif) 1903 { 1904 struct ionic_admin_ctx ctx = { 1905 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1906 .cmd.lif_setattr = { 1907 .opcode = IONIC_CMD_LIF_SETATTR, 1908 .index = cpu_to_le16(lif->index), 1909 .attr = IONIC_LIF_ATTR_STATE, 1910 .state = IONIC_LIF_QUIESCE, 1911 }, 1912 }; 1913 int err; 1914 1915 err = ionic_adminq_post_wait(lif, &ctx); 1916 if (err) 1917 netdev_err(lif->netdev, "lif quiesce failed %d\n", err); 1918 } 1919 1920 static void ionic_txrx_disable(struct ionic_lif *lif) 1921 { 1922 unsigned int i; 1923 int err = 0; 1924 1925 if (lif->txqcqs) { 1926 for (i = 0; i < lif->nxqs; i++) 1927 err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT)); 1928 } 1929 1930 if (lif->hwstamp_txq) 1931 err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT)); 1932 1933 if (lif->rxqcqs) { 1934 for (i = 0; i < lif->nxqs; i++) 1935 err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); 1936 } 1937 1938 if (lif->hwstamp_rxq) 1939 err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT)); 1940 1941 ionic_lif_quiesce(lif); 1942 } 1943 1944 static void ionic_txrx_deinit(struct ionic_lif *lif) 1945 { 1946 unsigned int i; 1947 1948 if (lif->txqcqs) { 1949 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { 1950 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1951 ionic_tx_flush(&lif->txqcqs[i]->cq); 1952 ionic_tx_empty(&lif->txqcqs[i]->q); 1953 } 1954 } 1955 1956 if (lif->rxqcqs) { 1957 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { 1958 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 1959 ionic_rx_empty(&lif->rxqcqs[i]->q); 1960 } 1961 } 1962 lif->rx_mode = 0; 1963 1964 if (lif->hwstamp_txq) { 1965 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq); 1966 ionic_tx_flush(&lif->hwstamp_txq->cq); 1967 ionic_tx_empty(&lif->hwstamp_txq->q); 1968 } 1969 1970 if (lif->hwstamp_rxq) { 1971 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq); 1972 ionic_rx_empty(&lif->hwstamp_rxq->q); 1973 } 1974 } 1975 1976 static void ionic_txrx_free(struct ionic_lif *lif) 1977 { 1978 unsigned int i; 1979 1980 if (lif->txqcqs) { 1981 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { 1982 ionic_qcq_free(lif, lif->txqcqs[i]); 1983 devm_kfree(lif->ionic->dev, lif->txqcqs[i]); 1984 lif->txqcqs[i] = NULL; 1985 } 1986 } 1987 1988 if (lif->rxqcqs) { 1989 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 1990 ionic_qcq_free(lif, lif->rxqcqs[i]); 1991 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); 1992 lif->rxqcqs[i] = NULL; 1993 } 1994 } 1995 1996 if (lif->hwstamp_txq) { 1997 ionic_qcq_free(lif, lif->hwstamp_txq); 1998 devm_kfree(lif->ionic->dev, lif->hwstamp_txq); 1999 lif->hwstamp_txq = NULL; 2000 } 2001 2002 if (lif->hwstamp_rxq) { 2003 ionic_qcq_free(lif, lif->hwstamp_rxq); 2004 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq); 2005 lif->hwstamp_rxq = NULL; 2006 } 2007 } 2008 2009 static int ionic_txrx_alloc(struct ionic_lif *lif) 2010 { 2011 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2012 unsigned int flags, i; 2013 int err = 0; 2014 2015 num_desc = lif->ntxq_descs; 2016 desc_sz = sizeof(struct ionic_txq_desc); 2017 comp_sz = sizeof(struct ionic_txq_comp); 2018 2019 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2020 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2021 sizeof(struct ionic_txq_sg_desc_v1)) 2022 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2023 else 2024 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2025 2026 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 2027 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2028 flags |= IONIC_QCQ_F_INTR; 2029 for (i = 0; i < lif->nxqs; i++) { 2030 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2031 num_desc, desc_sz, comp_sz, sg_desc_sz, 2032 lif->kern_pid, &lif->txqcqs[i]); 2033 if (err) 2034 goto err_out; 2035 2036 if (flags & IONIC_QCQ_F_INTR) { 2037 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2038 lif->txqcqs[i]->intr.index, 2039 lif->tx_coalesce_hw); 2040 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2041 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2042 } 2043 2044 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2045 } 2046 2047 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 2048 2049 num_desc = lif->nrxq_descs; 2050 desc_sz = sizeof(struct ionic_rxq_desc); 2051 comp_sz = sizeof(struct ionic_rxq_comp); 2052 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2053 2054 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2055 comp_sz *= 2; 2056 2057 for (i = 0; i < lif->nxqs; i++) { 2058 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2059 num_desc, desc_sz, comp_sz, sg_desc_sz, 2060 lif->kern_pid, &lif->rxqcqs[i]); 2061 if (err) 2062 goto err_out; 2063 2064 lif->rxqcqs[i]->q.features = lif->rxq_features; 2065 2066 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2067 lif->rxqcqs[i]->intr.index, 2068 lif->rx_coalesce_hw); 2069 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state)) 2070 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw; 2071 2072 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 2073 ionic_link_qcq_interrupts(lif->rxqcqs[i], 2074 lif->txqcqs[i]); 2075 2076 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2077 } 2078 2079 return 0; 2080 2081 err_out: 2082 ionic_txrx_free(lif); 2083 2084 return err; 2085 } 2086 2087 static int ionic_txrx_init(struct ionic_lif *lif) 2088 { 2089 unsigned int i; 2090 int err; 2091 2092 for (i = 0; i < lif->nxqs; i++) { 2093 err = ionic_lif_txq_init(lif, lif->txqcqs[i]); 2094 if (err) 2095 goto err_out; 2096 2097 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); 2098 if (err) { 2099 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2100 goto err_out; 2101 } 2102 } 2103 2104 if (lif->netdev->features & NETIF_F_RXHASH) 2105 ionic_lif_rss_init(lif); 2106 2107 ionic_set_rx_mode(lif->netdev, CAN_SLEEP); 2108 2109 return 0; 2110 2111 err_out: 2112 while (i--) { 2113 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 2114 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 2115 } 2116 2117 return err; 2118 } 2119 2120 static int ionic_txrx_enable(struct ionic_lif *lif) 2121 { 2122 int derr = 0; 2123 int i, err; 2124 2125 for (i = 0; i < lif->nxqs; i++) { 2126 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { 2127 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); 2128 err = -ENXIO; 2129 goto err_out; 2130 } 2131 2132 ionic_rx_fill(&lif->rxqcqs[i]->q); 2133 err = ionic_qcq_enable(lif->rxqcqs[i]); 2134 if (err) 2135 goto err_out; 2136 2137 err = ionic_qcq_enable(lif->txqcqs[i]); 2138 if (err) { 2139 derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); 2140 goto err_out; 2141 } 2142 } 2143 2144 if (lif->hwstamp_rxq) { 2145 ionic_rx_fill(&lif->hwstamp_rxq->q); 2146 err = ionic_qcq_enable(lif->hwstamp_rxq); 2147 if (err) 2148 goto err_out_hwstamp_rx; 2149 } 2150 2151 if (lif->hwstamp_txq) { 2152 err = ionic_qcq_enable(lif->hwstamp_txq); 2153 if (err) 2154 goto err_out_hwstamp_tx; 2155 } 2156 2157 return 0; 2158 2159 err_out_hwstamp_tx: 2160 if (lif->hwstamp_rxq) 2161 derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT)); 2162 err_out_hwstamp_rx: 2163 i = lif->nxqs; 2164 err_out: 2165 while (i--) { 2166 derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT)); 2167 derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT)); 2168 } 2169 2170 return err; 2171 } 2172 2173 static int ionic_start_queues(struct ionic_lif *lif) 2174 { 2175 int err; 2176 2177 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) 2178 return -EIO; 2179 2180 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2181 return -EBUSY; 2182 2183 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 2184 return 0; 2185 2186 err = ionic_txrx_enable(lif); 2187 if (err) { 2188 clear_bit(IONIC_LIF_F_UP, lif->state); 2189 return err; 2190 } 2191 netif_tx_wake_all_queues(lif->netdev); 2192 2193 return 0; 2194 } 2195 2196 static int ionic_open(struct net_device *netdev) 2197 { 2198 struct ionic_lif *lif = netdev_priv(netdev); 2199 int err; 2200 2201 /* If recovering from a broken state, clear the bit and we'll try again */ 2202 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) 2203 netdev_info(netdev, "clearing broken state\n"); 2204 2205 err = ionic_txrx_alloc(lif); 2206 if (err) 2207 return err; 2208 2209 err = ionic_txrx_init(lif); 2210 if (err) 2211 goto err_txrx_free; 2212 2213 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 2214 if (err) 2215 goto err_txrx_deinit; 2216 2217 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 2218 if (err) 2219 goto err_txrx_deinit; 2220 2221 /* don't start the queues until we have link */ 2222 if (netif_carrier_ok(netdev)) { 2223 err = ionic_start_queues(lif); 2224 if (err) 2225 goto err_txrx_deinit; 2226 } 2227 2228 return 0; 2229 2230 err_txrx_deinit: 2231 ionic_txrx_deinit(lif); 2232 err_txrx_free: 2233 ionic_txrx_free(lif); 2234 return err; 2235 } 2236 2237 static void ionic_stop_queues(struct ionic_lif *lif) 2238 { 2239 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 2240 return; 2241 2242 netif_tx_disable(lif->netdev); 2243 ionic_txrx_disable(lif); 2244 } 2245 2246 static int ionic_stop(struct net_device *netdev) 2247 { 2248 struct ionic_lif *lif = netdev_priv(netdev); 2249 2250 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2251 return 0; 2252 2253 ionic_stop_queues(lif); 2254 ionic_txrx_deinit(lif); 2255 ionic_txrx_free(lif); 2256 2257 return 0; 2258 } 2259 2260 static int ionic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2261 { 2262 struct ionic_lif *lif = netdev_priv(netdev); 2263 2264 switch (cmd) { 2265 case SIOCSHWTSTAMP: 2266 return ionic_lif_hwstamp_set(lif, ifr); 2267 case SIOCGHWTSTAMP: 2268 return ionic_lif_hwstamp_get(lif, ifr); 2269 default: 2270 return -EOPNOTSUPP; 2271 } 2272 } 2273 2274 static int ionic_get_vf_config(struct net_device *netdev, 2275 int vf, struct ifla_vf_info *ivf) 2276 { 2277 struct ionic_lif *lif = netdev_priv(netdev); 2278 struct ionic *ionic = lif->ionic; 2279 int ret = 0; 2280 2281 if (!netif_device_present(netdev)) 2282 return -EBUSY; 2283 2284 down_read(&ionic->vf_op_lock); 2285 2286 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2287 ret = -EINVAL; 2288 } else { 2289 ivf->vf = vf; 2290 ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid); 2291 ivf->qos = 0; 2292 ivf->spoofchk = ionic->vfs[vf].spoofchk; 2293 ivf->linkstate = ionic->vfs[vf].linkstate; 2294 ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate); 2295 ivf->trusted = ionic->vfs[vf].trusted; 2296 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr); 2297 } 2298 2299 up_read(&ionic->vf_op_lock); 2300 return ret; 2301 } 2302 2303 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 2304 struct ifla_vf_stats *vf_stats) 2305 { 2306 struct ionic_lif *lif = netdev_priv(netdev); 2307 struct ionic *ionic = lif->ionic; 2308 struct ionic_lif_stats *vs; 2309 int ret = 0; 2310 2311 if (!netif_device_present(netdev)) 2312 return -EBUSY; 2313 2314 down_read(&ionic->vf_op_lock); 2315 2316 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2317 ret = -EINVAL; 2318 } else { 2319 memset(vf_stats, 0, sizeof(*vf_stats)); 2320 vs = &ionic->vfs[vf].stats; 2321 2322 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 2323 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 2324 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 2325 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 2326 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 2327 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 2328 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 2329 le64_to_cpu(vs->rx_mcast_drop_packets) + 2330 le64_to_cpu(vs->rx_bcast_drop_packets); 2331 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 2332 le64_to_cpu(vs->tx_mcast_drop_packets) + 2333 le64_to_cpu(vs->tx_bcast_drop_packets); 2334 } 2335 2336 up_read(&ionic->vf_op_lock); 2337 return ret; 2338 } 2339 2340 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 2341 { 2342 struct ionic_lif *lif = netdev_priv(netdev); 2343 struct ionic *ionic = lif->ionic; 2344 int ret; 2345 2346 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 2347 return -EINVAL; 2348 2349 if (!netif_device_present(netdev)) 2350 return -EBUSY; 2351 2352 down_write(&ionic->vf_op_lock); 2353 2354 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2355 ret = -EINVAL; 2356 } else { 2357 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac); 2358 if (!ret) 2359 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 2360 } 2361 2362 up_write(&ionic->vf_op_lock); 2363 return ret; 2364 } 2365 2366 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2367 u8 qos, __be16 proto) 2368 { 2369 struct ionic_lif *lif = netdev_priv(netdev); 2370 struct ionic *ionic = lif->ionic; 2371 int ret; 2372 2373 /* until someday when we support qos */ 2374 if (qos) 2375 return -EINVAL; 2376 2377 if (vlan > 4095) 2378 return -EINVAL; 2379 2380 if (proto != htons(ETH_P_8021Q)) 2381 return -EPROTONOSUPPORT; 2382 2383 if (!netif_device_present(netdev)) 2384 return -EBUSY; 2385 2386 down_write(&ionic->vf_op_lock); 2387 2388 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2389 ret = -EINVAL; 2390 } else { 2391 ret = ionic_set_vf_config(ionic, vf, 2392 IONIC_VF_ATTR_VLAN, (u8 *)&vlan); 2393 if (!ret) 2394 ionic->vfs[vf].vlanid = cpu_to_le16(vlan); 2395 } 2396 2397 up_write(&ionic->vf_op_lock); 2398 return ret; 2399 } 2400 2401 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 2402 int tx_min, int tx_max) 2403 { 2404 struct ionic_lif *lif = netdev_priv(netdev); 2405 struct ionic *ionic = lif->ionic; 2406 int ret; 2407 2408 /* setting the min just seems silly */ 2409 if (tx_min) 2410 return -EINVAL; 2411 2412 if (!netif_device_present(netdev)) 2413 return -EBUSY; 2414 2415 down_write(&ionic->vf_op_lock); 2416 2417 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2418 ret = -EINVAL; 2419 } else { 2420 ret = ionic_set_vf_config(ionic, vf, 2421 IONIC_VF_ATTR_RATE, (u8 *)&tx_max); 2422 if (!ret) 2423 lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max); 2424 } 2425 2426 up_write(&ionic->vf_op_lock); 2427 return ret; 2428 } 2429 2430 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 2431 { 2432 struct ionic_lif *lif = netdev_priv(netdev); 2433 struct ionic *ionic = lif->ionic; 2434 u8 data = set; /* convert to u8 for config */ 2435 int ret; 2436 2437 if (!netif_device_present(netdev)) 2438 return -EBUSY; 2439 2440 down_write(&ionic->vf_op_lock); 2441 2442 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2443 ret = -EINVAL; 2444 } else { 2445 ret = ionic_set_vf_config(ionic, vf, 2446 IONIC_VF_ATTR_SPOOFCHK, &data); 2447 if (!ret) 2448 ionic->vfs[vf].spoofchk = data; 2449 } 2450 2451 up_write(&ionic->vf_op_lock); 2452 return ret; 2453 } 2454 2455 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 2456 { 2457 struct ionic_lif *lif = netdev_priv(netdev); 2458 struct ionic *ionic = lif->ionic; 2459 u8 data = set; /* convert to u8 for config */ 2460 int ret; 2461 2462 if (!netif_device_present(netdev)) 2463 return -EBUSY; 2464 2465 down_write(&ionic->vf_op_lock); 2466 2467 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2468 ret = -EINVAL; 2469 } else { 2470 ret = ionic_set_vf_config(ionic, vf, 2471 IONIC_VF_ATTR_TRUST, &data); 2472 if (!ret) 2473 ionic->vfs[vf].trusted = data; 2474 } 2475 2476 up_write(&ionic->vf_op_lock); 2477 return ret; 2478 } 2479 2480 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 2481 { 2482 struct ionic_lif *lif = netdev_priv(netdev); 2483 struct ionic *ionic = lif->ionic; 2484 u8 data; 2485 int ret; 2486 2487 switch (set) { 2488 case IFLA_VF_LINK_STATE_ENABLE: 2489 data = IONIC_VF_LINK_STATUS_UP; 2490 break; 2491 case IFLA_VF_LINK_STATE_DISABLE: 2492 data = IONIC_VF_LINK_STATUS_DOWN; 2493 break; 2494 case IFLA_VF_LINK_STATE_AUTO: 2495 data = IONIC_VF_LINK_STATUS_AUTO; 2496 break; 2497 default: 2498 return -EINVAL; 2499 } 2500 2501 if (!netif_device_present(netdev)) 2502 return -EBUSY; 2503 2504 down_write(&ionic->vf_op_lock); 2505 2506 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2507 ret = -EINVAL; 2508 } else { 2509 ret = ionic_set_vf_config(ionic, vf, 2510 IONIC_VF_ATTR_LINKSTATE, &data); 2511 if (!ret) 2512 ionic->vfs[vf].linkstate = set; 2513 } 2514 2515 up_write(&ionic->vf_op_lock); 2516 return ret; 2517 } 2518 2519 static const struct net_device_ops ionic_netdev_ops = { 2520 .ndo_open = ionic_open, 2521 .ndo_stop = ionic_stop, 2522 .ndo_do_ioctl = ionic_do_ioctl, 2523 .ndo_start_xmit = ionic_start_xmit, 2524 .ndo_get_stats64 = ionic_get_stats64, 2525 .ndo_set_rx_mode = ionic_ndo_set_rx_mode, 2526 .ndo_set_features = ionic_set_features, 2527 .ndo_set_mac_address = ionic_set_mac_address, 2528 .ndo_validate_addr = eth_validate_addr, 2529 .ndo_tx_timeout = ionic_tx_timeout, 2530 .ndo_change_mtu = ionic_change_mtu, 2531 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 2532 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 2533 .ndo_set_vf_vlan = ionic_set_vf_vlan, 2534 .ndo_set_vf_trust = ionic_set_vf_trust, 2535 .ndo_set_vf_mac = ionic_set_vf_mac, 2536 .ndo_set_vf_rate = ionic_set_vf_rate, 2537 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 2538 .ndo_get_vf_config = ionic_get_vf_config, 2539 .ndo_set_vf_link_state = ionic_set_vf_link_state, 2540 .ndo_get_vf_stats = ionic_get_vf_stats, 2541 }; 2542 2543 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) 2544 { 2545 /* only swapping the queues, not the napi, flags, or other stuff */ 2546 swap(a->q.features, b->q.features); 2547 swap(a->q.num_descs, b->q.num_descs); 2548 swap(a->q.desc_size, b->q.desc_size); 2549 swap(a->q.base, b->q.base); 2550 swap(a->q.base_pa, b->q.base_pa); 2551 swap(a->q.info, b->q.info); 2552 swap(a->q_base, b->q_base); 2553 swap(a->q_base_pa, b->q_base_pa); 2554 swap(a->q_size, b->q_size); 2555 2556 swap(a->q.sg_desc_size, b->q.sg_desc_size); 2557 swap(a->q.sg_base, b->q.sg_base); 2558 swap(a->q.sg_base_pa, b->q.sg_base_pa); 2559 swap(a->sg_base, b->sg_base); 2560 swap(a->sg_base_pa, b->sg_base_pa); 2561 swap(a->sg_size, b->sg_size); 2562 2563 swap(a->cq.num_descs, b->cq.num_descs); 2564 swap(a->cq.desc_size, b->cq.desc_size); 2565 swap(a->cq.base, b->cq.base); 2566 swap(a->cq.base_pa, b->cq.base_pa); 2567 swap(a->cq.info, b->cq.info); 2568 swap(a->cq_base, b->cq_base); 2569 swap(a->cq_base_pa, b->cq_base_pa); 2570 swap(a->cq_size, b->cq_size); 2571 2572 ionic_debugfs_del_qcq(a); 2573 ionic_debugfs_add_qcq(a->q.lif, a); 2574 } 2575 2576 int ionic_reconfigure_queues(struct ionic_lif *lif, 2577 struct ionic_queue_params *qparam) 2578 { 2579 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz; 2580 struct ionic_qcq **tx_qcqs = NULL; 2581 struct ionic_qcq **rx_qcqs = NULL; 2582 unsigned int flags, i; 2583 int err = -ENOMEM; 2584 2585 /* allocate temporary qcq arrays to hold new queue structs */ 2586 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { 2587 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, 2588 sizeof(struct ionic_qcq *), GFP_KERNEL); 2589 if (!tx_qcqs) 2590 goto err_out; 2591 } 2592 if (qparam->nxqs != lif->nxqs || 2593 qparam->nrxq_descs != lif->nrxq_descs || 2594 qparam->rxq_features != lif->rxq_features) { 2595 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, 2596 sizeof(struct ionic_qcq *), GFP_KERNEL); 2597 if (!rx_qcqs) 2598 goto err_out; 2599 } 2600 2601 /* allocate new desc_info and rings, but leave the interrupt setup 2602 * until later so as to not mess with the still-running queues 2603 */ 2604 if (tx_qcqs) { 2605 num_desc = qparam->ntxq_descs; 2606 desc_sz = sizeof(struct ionic_txq_desc); 2607 comp_sz = sizeof(struct ionic_txq_comp); 2608 2609 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2610 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2611 sizeof(struct ionic_txq_sg_desc_v1)) 2612 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2613 else 2614 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2615 2616 for (i = 0; i < qparam->nxqs; i++) { 2617 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2618 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2619 num_desc, desc_sz, comp_sz, sg_desc_sz, 2620 lif->kern_pid, &tx_qcqs[i]); 2621 if (err) 2622 goto err_out; 2623 } 2624 } 2625 2626 if (rx_qcqs) { 2627 num_desc = qparam->nrxq_descs; 2628 desc_sz = sizeof(struct ionic_rxq_desc); 2629 comp_sz = sizeof(struct ionic_rxq_comp); 2630 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc); 2631 2632 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC) 2633 comp_sz *= 2; 2634 2635 for (i = 0; i < qparam->nxqs; i++) { 2636 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2637 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2638 num_desc, desc_sz, comp_sz, sg_desc_sz, 2639 lif->kern_pid, &rx_qcqs[i]); 2640 if (err) 2641 goto err_out; 2642 2643 rx_qcqs[i]->q.features = qparam->rxq_features; 2644 } 2645 } 2646 2647 /* stop and clean the queues */ 2648 ionic_stop_queues_reconfig(lif); 2649 2650 if (qparam->nxqs != lif->nxqs) { 2651 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); 2652 if (err) 2653 goto err_out_reinit_unlock; 2654 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); 2655 if (err) { 2656 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); 2657 goto err_out_reinit_unlock; 2658 } 2659 } 2660 2661 /* swap new desc_info and rings, keeping existing interrupt config */ 2662 if (tx_qcqs) { 2663 lif->ntxq_descs = qparam->ntxq_descs; 2664 for (i = 0; i < qparam->nxqs; i++) 2665 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); 2666 } 2667 2668 if (rx_qcqs) { 2669 lif->nrxq_descs = qparam->nrxq_descs; 2670 for (i = 0; i < qparam->nxqs; i++) 2671 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); 2672 } 2673 2674 /* if we need to change the interrupt layout, this is the time */ 2675 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || 2676 qparam->nxqs != lif->nxqs) { 2677 if (qparam->intr_split) { 2678 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2679 } else { 2680 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2681 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2682 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2683 } 2684 2685 /* clear existing interrupt assignments */ 2686 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { 2687 ionic_qcq_intr_free(lif, lif->txqcqs[i]); 2688 ionic_qcq_intr_free(lif, lif->rxqcqs[i]); 2689 } 2690 2691 /* re-assign the interrupts */ 2692 for (i = 0; i < qparam->nxqs; i++) { 2693 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2694 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); 2695 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2696 lif->rxqcqs[i]->intr.index, 2697 lif->rx_coalesce_hw); 2698 2699 if (qparam->intr_split) { 2700 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2701 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); 2702 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2703 lif->txqcqs[i]->intr.index, 2704 lif->tx_coalesce_hw); 2705 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2706 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2707 } else { 2708 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2709 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); 2710 } 2711 } 2712 } 2713 2714 /* now we can rework the debugfs mappings */ 2715 if (tx_qcqs) { 2716 for (i = 0; i < qparam->nxqs; i++) { 2717 ionic_debugfs_del_qcq(lif->txqcqs[i]); 2718 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2719 } 2720 } 2721 2722 if (rx_qcqs) { 2723 for (i = 0; i < qparam->nxqs; i++) { 2724 ionic_debugfs_del_qcq(lif->rxqcqs[i]); 2725 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2726 } 2727 } 2728 2729 swap(lif->nxqs, qparam->nxqs); 2730 swap(lif->rxq_features, qparam->rxq_features); 2731 2732 err_out_reinit_unlock: 2733 /* re-init the queues, but don't lose an error code */ 2734 if (err) 2735 ionic_start_queues_reconfig(lif); 2736 else 2737 err = ionic_start_queues_reconfig(lif); 2738 2739 err_out: 2740 /* free old allocs without cleaning intr */ 2741 for (i = 0; i < qparam->nxqs; i++) { 2742 if (tx_qcqs && tx_qcqs[i]) { 2743 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2744 ionic_qcq_free(lif, tx_qcqs[i]); 2745 devm_kfree(lif->ionic->dev, tx_qcqs[i]); 2746 tx_qcqs[i] = NULL; 2747 } 2748 if (rx_qcqs && rx_qcqs[i]) { 2749 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2750 ionic_qcq_free(lif, rx_qcqs[i]); 2751 devm_kfree(lif->ionic->dev, rx_qcqs[i]); 2752 rx_qcqs[i] = NULL; 2753 } 2754 } 2755 2756 /* free q array */ 2757 if (rx_qcqs) { 2758 devm_kfree(lif->ionic->dev, rx_qcqs); 2759 rx_qcqs = NULL; 2760 } 2761 if (tx_qcqs) { 2762 devm_kfree(lif->ionic->dev, tx_qcqs); 2763 tx_qcqs = NULL; 2764 } 2765 2766 /* clean the unused dma and info allocations when new set is smaller 2767 * than the full array, but leave the qcq shells in place 2768 */ 2769 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { 2770 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2771 ionic_qcq_free(lif, lif->txqcqs[i]); 2772 2773 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2774 ionic_qcq_free(lif, lif->rxqcqs[i]); 2775 } 2776 2777 return err; 2778 } 2779 2780 int ionic_lif_alloc(struct ionic *ionic) 2781 { 2782 struct device *dev = ionic->dev; 2783 union ionic_lif_identity *lid; 2784 struct net_device *netdev; 2785 struct ionic_lif *lif; 2786 int tbl_sz; 2787 int err; 2788 2789 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 2790 if (!lid) 2791 return -ENOMEM; 2792 2793 netdev = alloc_etherdev_mqs(sizeof(*lif), 2794 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 2795 if (!netdev) { 2796 dev_err(dev, "Cannot allocate netdev, aborting\n"); 2797 err = -ENOMEM; 2798 goto err_out_free_lid; 2799 } 2800 2801 SET_NETDEV_DEV(netdev, dev); 2802 2803 lif = netdev_priv(netdev); 2804 lif->netdev = netdev; 2805 ionic->lif = lif; 2806 netdev->netdev_ops = &ionic_netdev_ops; 2807 ionic_ethtool_set_ops(netdev); 2808 2809 netdev->watchdog_timeo = 2 * HZ; 2810 netif_carrier_off(netdev); 2811 2812 lif->identity = lid; 2813 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 2814 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity); 2815 if (err) { 2816 dev_err(ionic->dev, "Cannot identify type %d: %d\n", 2817 lif->lif_type, err); 2818 goto err_out_free_netdev; 2819 } 2820 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, 2821 le32_to_cpu(lif->identity->eth.min_frame_size)); 2822 lif->netdev->max_mtu = 2823 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; 2824 2825 lif->neqs = ionic->neqs_per_lif; 2826 lif->nxqs = ionic->ntxqs_per_lif; 2827 2828 lif->ionic = ionic; 2829 lif->index = 0; 2830 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 2831 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 2832 2833 /* Convert the default coalesce value to actual hw resolution */ 2834 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 2835 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 2836 lif->rx_coalesce_usecs); 2837 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2838 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2839 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); 2840 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); 2841 2842 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); 2843 2844 spin_lock_init(&lif->adminq_lock); 2845 2846 spin_lock_init(&lif->deferred.lock); 2847 INIT_LIST_HEAD(&lif->deferred.list); 2848 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 2849 2850 /* allocate lif info */ 2851 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 2852 lif->info = dma_alloc_coherent(dev, lif->info_sz, 2853 &lif->info_pa, GFP_KERNEL); 2854 if (!lif->info) { 2855 dev_err(dev, "Failed to allocate lif info, aborting\n"); 2856 err = -ENOMEM; 2857 goto err_out_free_netdev; 2858 } 2859 2860 ionic_debugfs_add_lif(lif); 2861 2862 /* allocate control queues and txrx queue arrays */ 2863 ionic_lif_queue_identify(lif); 2864 err = ionic_qcqs_alloc(lif); 2865 if (err) 2866 goto err_out_free_lif_info; 2867 2868 /* allocate rss indirection table */ 2869 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 2870 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 2871 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 2872 &lif->rss_ind_tbl_pa, 2873 GFP_KERNEL); 2874 2875 if (!lif->rss_ind_tbl) { 2876 err = -ENOMEM; 2877 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 2878 goto err_out_free_qcqs; 2879 } 2880 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 2881 2882 ionic_lif_alloc_phc(lif); 2883 2884 return 0; 2885 2886 err_out_free_qcqs: 2887 ionic_qcqs_free(lif); 2888 err_out_free_lif_info: 2889 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2890 lif->info = NULL; 2891 lif->info_pa = 0; 2892 err_out_free_netdev: 2893 free_netdev(lif->netdev); 2894 lif = NULL; 2895 err_out_free_lid: 2896 kfree(lid); 2897 2898 return err; 2899 } 2900 2901 static void ionic_lif_reset(struct ionic_lif *lif) 2902 { 2903 struct ionic_dev *idev = &lif->ionic->idev; 2904 2905 mutex_lock(&lif->ionic->dev_cmd_lock); 2906 ionic_dev_cmd_lif_reset(idev, lif->index); 2907 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2908 mutex_unlock(&lif->ionic->dev_cmd_lock); 2909 } 2910 2911 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 2912 { 2913 struct ionic *ionic = lif->ionic; 2914 2915 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2916 return; 2917 2918 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 2919 2920 netif_device_detach(lif->netdev); 2921 2922 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 2923 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 2924 mutex_lock(&lif->queue_lock); 2925 ionic_stop_queues(lif); 2926 mutex_unlock(&lif->queue_lock); 2927 } 2928 2929 if (netif_running(lif->netdev)) { 2930 ionic_txrx_deinit(lif); 2931 ionic_txrx_free(lif); 2932 } 2933 ionic_lif_deinit(lif); 2934 ionic_reset(ionic); 2935 ionic_qcqs_free(lif); 2936 2937 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 2938 } 2939 2940 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 2941 { 2942 struct ionic *ionic = lif->ionic; 2943 int err; 2944 2945 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2946 return; 2947 2948 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 2949 2950 ionic_init_devinfo(ionic); 2951 err = ionic_identify(ionic); 2952 if (err) 2953 goto err_out; 2954 err = ionic_port_identify(ionic); 2955 if (err) 2956 goto err_out; 2957 err = ionic_port_init(ionic); 2958 if (err) 2959 goto err_out; 2960 err = ionic_qcqs_alloc(lif); 2961 if (err) 2962 goto err_out; 2963 2964 err = ionic_lif_init(lif); 2965 if (err) 2966 goto err_qcqs_free; 2967 2968 if (lif->registered) 2969 ionic_lif_set_netdev_info(lif); 2970 2971 ionic_rx_filter_replay(lif); 2972 2973 if (netif_running(lif->netdev)) { 2974 err = ionic_txrx_alloc(lif); 2975 if (err) 2976 goto err_lifs_deinit; 2977 2978 err = ionic_txrx_init(lif); 2979 if (err) 2980 goto err_txrx_free; 2981 } 2982 2983 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 2984 ionic_link_status_check_request(lif, CAN_SLEEP); 2985 netif_device_attach(lif->netdev); 2986 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 2987 2988 /* restore the hardware timestamping queues */ 2989 ionic_lif_hwstamp_replay(lif); 2990 2991 return; 2992 2993 err_txrx_free: 2994 ionic_txrx_free(lif); 2995 err_lifs_deinit: 2996 ionic_lif_deinit(lif); 2997 err_qcqs_free: 2998 ionic_qcqs_free(lif); 2999 err_out: 3000 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 3001 } 3002 3003 void ionic_lif_free(struct ionic_lif *lif) 3004 { 3005 struct device *dev = lif->ionic->dev; 3006 3007 ionic_lif_free_phc(lif); 3008 3009 /* free rss indirection table */ 3010 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 3011 lif->rss_ind_tbl_pa); 3012 lif->rss_ind_tbl = NULL; 3013 lif->rss_ind_tbl_pa = 0; 3014 3015 /* free queues */ 3016 ionic_qcqs_free(lif); 3017 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 3018 ionic_lif_reset(lif); 3019 3020 /* free lif info */ 3021 kfree(lif->identity); 3022 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 3023 lif->info = NULL; 3024 lif->info_pa = 0; 3025 3026 /* unmap doorbell page */ 3027 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3028 lif->kern_dbpage = NULL; 3029 kfree(lif->dbid_inuse); 3030 lif->dbid_inuse = NULL; 3031 3032 /* free netdev & lif */ 3033 ionic_debugfs_del_lif(lif); 3034 free_netdev(lif->netdev); 3035 } 3036 3037 void ionic_lif_deinit(struct ionic_lif *lif) 3038 { 3039 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 3040 return; 3041 3042 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3043 cancel_work_sync(&lif->deferred.work); 3044 cancel_work_sync(&lif->tx_timeout_work); 3045 ionic_rx_filters_deinit(lif); 3046 if (lif->netdev->features & NETIF_F_RXHASH) 3047 ionic_lif_rss_deinit(lif); 3048 } 3049 3050 napi_disable(&lif->adminqcq->napi); 3051 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3052 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3053 3054 mutex_destroy(&lif->config_lock); 3055 mutex_destroy(&lif->queue_lock); 3056 ionic_lif_reset(lif); 3057 } 3058 3059 static int ionic_lif_adminq_init(struct ionic_lif *lif) 3060 { 3061 struct device *dev = lif->ionic->dev; 3062 struct ionic_q_init_comp comp; 3063 struct ionic_dev *idev; 3064 struct ionic_qcq *qcq; 3065 struct ionic_queue *q; 3066 int err; 3067 3068 idev = &lif->ionic->idev; 3069 qcq = lif->adminqcq; 3070 q = &qcq->q; 3071 3072 mutex_lock(&lif->ionic->dev_cmd_lock); 3073 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 3074 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3075 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3076 mutex_unlock(&lif->ionic->dev_cmd_lock); 3077 if (err) { 3078 netdev_err(lif->netdev, "adminq init failed %d\n", err); 3079 return err; 3080 } 3081 3082 q->hw_type = comp.hw_type; 3083 q->hw_index = le32_to_cpu(comp.hw_index); 3084 q->dbval = IONIC_DBELL_QID(q->hw_index); 3085 3086 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 3087 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 3088 3089 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, 3090 NAPI_POLL_WEIGHT); 3091 3092 napi_enable(&qcq->napi); 3093 3094 if (qcq->flags & IONIC_QCQ_F_INTR) 3095 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 3096 IONIC_INTR_MASK_CLEAR); 3097 3098 qcq->flags |= IONIC_QCQ_F_INITED; 3099 3100 return 0; 3101 } 3102 3103 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 3104 { 3105 struct ionic_qcq *qcq = lif->notifyqcq; 3106 struct device *dev = lif->ionic->dev; 3107 struct ionic_queue *q = &qcq->q; 3108 int err; 3109 3110 struct ionic_admin_ctx ctx = { 3111 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3112 .cmd.q_init = { 3113 .opcode = IONIC_CMD_Q_INIT, 3114 .lif_index = cpu_to_le16(lif->index), 3115 .type = q->type, 3116 .ver = lif->qtype_info[q->type].version, 3117 .index = cpu_to_le32(q->index), 3118 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 3119 IONIC_QINIT_F_ENA), 3120 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 3121 .pid = cpu_to_le16(q->pid), 3122 .ring_size = ilog2(q->num_descs), 3123 .ring_base = cpu_to_le64(q->base_pa), 3124 } 3125 }; 3126 3127 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 3128 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 3129 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 3130 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 3131 3132 err = ionic_adminq_post_wait(lif, &ctx); 3133 if (err) 3134 return err; 3135 3136 lif->last_eid = 0; 3137 q->hw_type = ctx.comp.q_init.hw_type; 3138 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 3139 q->dbval = IONIC_DBELL_QID(q->hw_index); 3140 3141 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 3142 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 3143 3144 /* preset the callback info */ 3145 q->info[0].cb_arg = lif; 3146 3147 qcq->flags |= IONIC_QCQ_F_INITED; 3148 3149 return 0; 3150 } 3151 3152 static int ionic_station_set(struct ionic_lif *lif) 3153 { 3154 struct net_device *netdev = lif->netdev; 3155 struct ionic_admin_ctx ctx = { 3156 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3157 .cmd.lif_getattr = { 3158 .opcode = IONIC_CMD_LIF_GETATTR, 3159 .index = cpu_to_le16(lif->index), 3160 .attr = IONIC_LIF_ATTR_MAC, 3161 }, 3162 }; 3163 struct sockaddr addr; 3164 int err; 3165 3166 err = ionic_adminq_post_wait(lif, &ctx); 3167 if (err) 3168 return err; 3169 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 3170 ctx.comp.lif_getattr.mac); 3171 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac)) 3172 return 0; 3173 3174 if (!is_zero_ether_addr(netdev->dev_addr)) { 3175 /* If the netdev mac is non-zero and doesn't match the default 3176 * device address, it was set by something earlier and we're 3177 * likely here again after a fw-upgrade reset. We need to be 3178 * sure the netdev mac is in our filter list. 3179 */ 3180 if (!ether_addr_equal(ctx.comp.lif_getattr.mac, 3181 netdev->dev_addr)) 3182 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR); 3183 } else { 3184 /* Update the netdev mac with the device's mac */ 3185 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); 3186 addr.sa_family = AF_INET; 3187 err = eth_prepare_mac_addr_change(netdev, &addr); 3188 if (err) { 3189 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 3190 addr.sa_data, err); 3191 return 0; 3192 } 3193 3194 eth_commit_mac_addr_change(netdev, &addr); 3195 } 3196 3197 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 3198 netdev->dev_addr); 3199 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR); 3200 3201 return 0; 3202 } 3203 3204 int ionic_lif_init(struct ionic_lif *lif) 3205 { 3206 struct ionic_dev *idev = &lif->ionic->idev; 3207 struct device *dev = lif->ionic->dev; 3208 struct ionic_lif_init_comp comp; 3209 int dbpage_num; 3210 int err; 3211 3212 mutex_lock(&lif->ionic->dev_cmd_lock); 3213 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 3214 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 3215 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 3216 mutex_unlock(&lif->ionic->dev_cmd_lock); 3217 if (err) 3218 return err; 3219 3220 lif->hw_index = le16_to_cpu(comp.hw_index); 3221 mutex_init(&lif->queue_lock); 3222 mutex_init(&lif->config_lock); 3223 3224 /* now that we have the hw_index we can figure out our doorbell page */ 3225 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 3226 if (!lif->dbid_count) { 3227 dev_err(dev, "No doorbell pages, aborting\n"); 3228 return -EINVAL; 3229 } 3230 3231 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); 3232 if (!lif->dbid_inuse) { 3233 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); 3234 return -ENOMEM; 3235 } 3236 3237 /* first doorbell id reserved for kernel (dbid aka pid == zero) */ 3238 set_bit(0, lif->dbid_inuse); 3239 lif->kern_pid = 0; 3240 3241 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 3242 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 3243 if (!lif->kern_dbpage) { 3244 dev_err(dev, "Cannot map dbpage, aborting\n"); 3245 err = -ENOMEM; 3246 goto err_out_free_dbid; 3247 } 3248 3249 err = ionic_lif_adminq_init(lif); 3250 if (err) 3251 goto err_out_adminq_deinit; 3252 3253 if (lif->ionic->nnqs_per_lif) { 3254 err = ionic_lif_notifyq_init(lif); 3255 if (err) 3256 goto err_out_notifyq_deinit; 3257 } 3258 3259 err = ionic_init_nic_features(lif); 3260 if (err) 3261 goto err_out_notifyq_deinit; 3262 3263 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 3264 err = ionic_rx_filters_init(lif); 3265 if (err) 3266 goto err_out_notifyq_deinit; 3267 } 3268 3269 err = ionic_station_set(lif); 3270 if (err) 3271 goto err_out_notifyq_deinit; 3272 3273 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 3274 3275 set_bit(IONIC_LIF_F_INITED, lif->state); 3276 3277 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 3278 3279 return 0; 3280 3281 err_out_notifyq_deinit: 3282 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 3283 err_out_adminq_deinit: 3284 ionic_lif_qcq_deinit(lif, lif->adminqcq); 3285 ionic_lif_reset(lif); 3286 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 3287 lif->kern_dbpage = NULL; 3288 err_out_free_dbid: 3289 kfree(lif->dbid_inuse); 3290 lif->dbid_inuse = NULL; 3291 3292 return err; 3293 } 3294 3295 static void ionic_lif_notify_work(struct work_struct *ws) 3296 { 3297 } 3298 3299 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 3300 { 3301 struct ionic_admin_ctx ctx = { 3302 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 3303 .cmd.lif_setattr = { 3304 .opcode = IONIC_CMD_LIF_SETATTR, 3305 .index = cpu_to_le16(lif->index), 3306 .attr = IONIC_LIF_ATTR_NAME, 3307 }, 3308 }; 3309 3310 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 3311 sizeof(ctx.cmd.lif_setattr.name)); 3312 3313 ionic_adminq_post_wait(lif, &ctx); 3314 } 3315 3316 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 3317 { 3318 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 3319 return NULL; 3320 3321 return netdev_priv(netdev); 3322 } 3323 3324 static int ionic_lif_notify(struct notifier_block *nb, 3325 unsigned long event, void *info) 3326 { 3327 struct net_device *ndev = netdev_notifier_info_to_dev(info); 3328 struct ionic *ionic = container_of(nb, struct ionic, nb); 3329 struct ionic_lif *lif = ionic_netdev_lif(ndev); 3330 3331 if (!lif || lif->ionic != ionic) 3332 return NOTIFY_DONE; 3333 3334 switch (event) { 3335 case NETDEV_CHANGENAME: 3336 ionic_lif_set_netdev_info(lif); 3337 break; 3338 } 3339 3340 return NOTIFY_DONE; 3341 } 3342 3343 int ionic_lif_register(struct ionic_lif *lif) 3344 { 3345 int err; 3346 3347 ionic_lif_register_phc(lif); 3348 3349 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); 3350 3351 lif->ionic->nb.notifier_call = ionic_lif_notify; 3352 3353 err = register_netdevice_notifier(&lif->ionic->nb); 3354 if (err) 3355 lif->ionic->nb.notifier_call = NULL; 3356 3357 /* only register LIF0 for now */ 3358 err = register_netdev(lif->netdev); 3359 if (err) { 3360 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); 3361 ionic_lif_unregister_phc(lif); 3362 return err; 3363 } 3364 3365 ionic_link_status_check_request(lif, CAN_SLEEP); 3366 lif->registered = true; 3367 ionic_lif_set_netdev_info(lif); 3368 3369 return 0; 3370 } 3371 3372 void ionic_lif_unregister(struct ionic_lif *lif) 3373 { 3374 if (lif->ionic->nb.notifier_call) { 3375 unregister_netdevice_notifier(&lif->ionic->nb); 3376 cancel_work_sync(&lif->ionic->nb_work); 3377 lif->ionic->nb.notifier_call = NULL; 3378 } 3379 3380 if (lif->netdev->reg_state == NETREG_REGISTERED) 3381 unregister_netdev(lif->netdev); 3382 3383 ionic_lif_unregister_phc(lif); 3384 3385 lif->registered = false; 3386 } 3387 3388 static void ionic_lif_queue_identify(struct ionic_lif *lif) 3389 { 3390 union ionic_q_identity __iomem *q_ident; 3391 struct ionic *ionic = lif->ionic; 3392 struct ionic_dev *idev; 3393 int qtype; 3394 int err; 3395 3396 idev = &lif->ionic->idev; 3397 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data; 3398 3399 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 3400 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 3401 3402 /* filter out the ones we know about */ 3403 switch (qtype) { 3404 case IONIC_QTYPE_ADMINQ: 3405 case IONIC_QTYPE_NOTIFYQ: 3406 case IONIC_QTYPE_RXQ: 3407 case IONIC_QTYPE_TXQ: 3408 break; 3409 default: 3410 continue; 3411 } 3412 3413 memset(qti, 0, sizeof(*qti)); 3414 3415 mutex_lock(&ionic->dev_cmd_lock); 3416 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 3417 ionic_qtype_versions[qtype]); 3418 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3419 if (!err) { 3420 qti->version = readb(&q_ident->version); 3421 qti->supported = readb(&q_ident->supported); 3422 qti->features = readq(&q_ident->features); 3423 qti->desc_sz = readw(&q_ident->desc_sz); 3424 qti->comp_sz = readw(&q_ident->comp_sz); 3425 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz); 3426 qti->max_sg_elems = readw(&q_ident->max_sg_elems); 3427 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride); 3428 } 3429 mutex_unlock(&ionic->dev_cmd_lock); 3430 3431 if (err == -EINVAL) { 3432 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 3433 continue; 3434 } else if (err == -EIO) { 3435 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 3436 return; 3437 } else if (err) { 3438 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 3439 qtype, err); 3440 return; 3441 } 3442 3443 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 3444 qtype, qti->version); 3445 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 3446 qtype, qti->supported); 3447 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 3448 qtype, qti->features); 3449 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 3450 qtype, qti->desc_sz); 3451 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 3452 qtype, qti->comp_sz); 3453 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 3454 qtype, qti->sg_desc_sz); 3455 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 3456 qtype, qti->max_sg_elems); 3457 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 3458 qtype, qti->sg_desc_stride); 3459 } 3460 } 3461 3462 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 3463 union ionic_lif_identity *lid) 3464 { 3465 struct ionic_dev *idev = &ionic->idev; 3466 size_t sz; 3467 int err; 3468 3469 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 3470 3471 mutex_lock(&ionic->dev_cmd_lock); 3472 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 3473 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3474 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 3475 mutex_unlock(&ionic->dev_cmd_lock); 3476 if (err) 3477 return (err); 3478 3479 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 3480 le64_to_cpu(lid->capabilities)); 3481 3482 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 3483 le32_to_cpu(lid->eth.max_ucast_filters)); 3484 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 3485 le32_to_cpu(lid->eth.max_mcast_filters)); 3486 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 3487 le64_to_cpu(lid->eth.config.features)); 3488 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 3489 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 3490 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 3491 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 3492 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 3493 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 3494 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 3495 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 3496 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 3497 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 3498 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 3499 le32_to_cpu(lid->eth.config.mtu)); 3500 3501 return 0; 3502 } 3503 3504 int ionic_lif_size(struct ionic *ionic) 3505 { 3506 struct ionic_identity *ident = &ionic->ident; 3507 unsigned int nintrs, dev_nintrs; 3508 union ionic_lif_config *lc; 3509 unsigned int ntxqs_per_lif; 3510 unsigned int nrxqs_per_lif; 3511 unsigned int neqs_per_lif; 3512 unsigned int nnqs_per_lif; 3513 unsigned int nxqs, neqs; 3514 unsigned int min_intrs; 3515 int err; 3516 3517 lc = &ident->lif.eth.config; 3518 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 3519 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 3520 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 3521 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 3522 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 3523 3524 /* reserve last queue id for hardware timestamping */ 3525 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { 3526 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { 3527 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP); 3528 } else { 3529 ntxqs_per_lif -= 1; 3530 nrxqs_per_lif -= 1; 3531 } 3532 } 3533 3534 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 3535 nxqs = min(nxqs, num_online_cpus()); 3536 neqs = min(neqs_per_lif, num_online_cpus()); 3537 3538 try_again: 3539 /* interrupt usage: 3540 * 1 for master lif adminq/notifyq 3541 * 1 for each CPU for master lif TxRx queue pairs 3542 * whatever's left is for RDMA queues 3543 */ 3544 nintrs = 1 + nxqs + neqs; 3545 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 3546 3547 if (nintrs > dev_nintrs) 3548 goto try_fewer; 3549 3550 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 3551 if (err < 0 && err != -ENOSPC) { 3552 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 3553 return err; 3554 } 3555 if (err == -ENOSPC) 3556 goto try_fewer; 3557 3558 if (err != nintrs) { 3559 ionic_bus_free_irq_vectors(ionic); 3560 goto try_fewer; 3561 } 3562 3563 ionic->nnqs_per_lif = nnqs_per_lif; 3564 ionic->neqs_per_lif = neqs; 3565 ionic->ntxqs_per_lif = nxqs; 3566 ionic->nrxqs_per_lif = nxqs; 3567 ionic->nintrs = nintrs; 3568 3569 ionic_debugfs_add_sizes(ionic); 3570 3571 return 0; 3572 3573 try_fewer: 3574 if (nnqs_per_lif > 1) { 3575 nnqs_per_lif >>= 1; 3576 goto try_again; 3577 } 3578 if (neqs > 1) { 3579 neqs >>= 1; 3580 goto try_again; 3581 } 3582 if (nxqs > 1) { 3583 nxqs >>= 1; 3584 goto try_again; 3585 } 3586 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 3587 return -ENOSPC; 3588 } 3589