1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/ethtool.h> 5 #include <linux/printk.h> 6 #include <linux/dynamic_debug.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_vlan.h> 10 #include <linux/rtnetlink.h> 11 #include <linux/interrupt.h> 12 #include <linux/pci.h> 13 #include <linux/cpumask.h> 14 15 #include "ionic.h" 16 #include "ionic_bus.h" 17 #include "ionic_lif.h" 18 #include "ionic_txrx.h" 19 #include "ionic_ethtool.h" 20 #include "ionic_debugfs.h" 21 22 /* queuetype support level */ 23 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { 24 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */ 25 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */ 26 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */ 27 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support 28 * 1 = ... with Tx SG version 1 29 */ 30 }; 31 32 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode); 33 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); 34 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); 35 static void ionic_link_status_check(struct ionic_lif *lif); 36 static void ionic_lif_handle_fw_down(struct ionic_lif *lif); 37 static void ionic_lif_handle_fw_up(struct ionic_lif *lif); 38 static void ionic_lif_set_netdev_info(struct ionic_lif *lif); 39 40 static void ionic_txrx_deinit(struct ionic_lif *lif); 41 static int ionic_txrx_init(struct ionic_lif *lif); 42 static int ionic_start_queues(struct ionic_lif *lif); 43 static void ionic_stop_queues(struct ionic_lif *lif); 44 static void ionic_lif_queue_identify(struct ionic_lif *lif); 45 46 static void ionic_dim_work(struct work_struct *work) 47 { 48 struct dim *dim = container_of(work, struct dim, work); 49 struct dim_cq_moder cur_moder; 50 struct ionic_qcq *qcq; 51 u32 new_coal; 52 53 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 54 qcq = container_of(dim, struct ionic_qcq, dim); 55 new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec); 56 qcq->intr.dim_coal_hw = new_coal ? new_coal : 1; 57 dim->state = DIM_START_MEASURE; 58 } 59 60 static void ionic_lif_deferred_work(struct work_struct *work) 61 { 62 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); 63 struct ionic_deferred *def = &lif->deferred; 64 struct ionic_deferred_work *w = NULL; 65 66 do { 67 spin_lock_bh(&def->lock); 68 if (!list_empty(&def->list)) { 69 w = list_first_entry(&def->list, 70 struct ionic_deferred_work, list); 71 list_del(&w->list); 72 } 73 spin_unlock_bh(&def->lock); 74 75 if (!w) 76 break; 77 78 switch (w->type) { 79 case IONIC_DW_TYPE_RX_MODE: 80 ionic_lif_rx_mode(lif, w->rx_mode); 81 break; 82 case IONIC_DW_TYPE_RX_ADDR_ADD: 83 ionic_lif_addr_add(lif, w->addr); 84 break; 85 case IONIC_DW_TYPE_RX_ADDR_DEL: 86 ionic_lif_addr_del(lif, w->addr); 87 break; 88 case IONIC_DW_TYPE_LINK_STATUS: 89 ionic_link_status_check(lif); 90 break; 91 case IONIC_DW_TYPE_LIF_RESET: 92 if (w->fw_status) 93 ionic_lif_handle_fw_up(lif); 94 else 95 ionic_lif_handle_fw_down(lif); 96 break; 97 default: 98 break; 99 } 100 kfree(w); 101 w = NULL; 102 } while (true); 103 } 104 105 void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 106 struct ionic_deferred_work *work) 107 { 108 spin_lock_bh(&def->lock); 109 list_add_tail(&work->list, &def->list); 110 spin_unlock_bh(&def->lock); 111 schedule_work(&def->work); 112 } 113 114 static void ionic_link_status_check(struct ionic_lif *lif) 115 { 116 struct net_device *netdev = lif->netdev; 117 u16 link_status; 118 bool link_up; 119 120 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 121 return; 122 123 link_status = le16_to_cpu(lif->info->status.link_status); 124 link_up = link_status == IONIC_PORT_OPER_STATUS_UP; 125 126 if (link_up) { 127 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { 128 mutex_lock(&lif->queue_lock); 129 ionic_start_queues(lif); 130 mutex_unlock(&lif->queue_lock); 131 } 132 133 if (!netif_carrier_ok(netdev)) { 134 u32 link_speed; 135 136 ionic_port_identify(lif->ionic); 137 link_speed = le32_to_cpu(lif->info->status.link_speed); 138 netdev_info(netdev, "Link up - %d Gbps\n", 139 link_speed / 1000); 140 netif_carrier_on(netdev); 141 } 142 } else { 143 if (netif_carrier_ok(netdev)) { 144 netdev_info(netdev, "Link down\n"); 145 netif_carrier_off(netdev); 146 } 147 148 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { 149 mutex_lock(&lif->queue_lock); 150 ionic_stop_queues(lif); 151 mutex_unlock(&lif->queue_lock); 152 } 153 } 154 155 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 156 } 157 158 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) 159 { 160 struct ionic_deferred_work *work; 161 162 /* we only need one request outstanding at a time */ 163 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) 164 return; 165 166 if (!can_sleep) { 167 work = kzalloc(sizeof(*work), GFP_ATOMIC); 168 if (!work) { 169 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); 170 return; 171 } 172 173 work->type = IONIC_DW_TYPE_LINK_STATUS; 174 ionic_lif_deferred_enqueue(&lif->deferred, work); 175 } else { 176 ionic_link_status_check(lif); 177 } 178 } 179 180 static irqreturn_t ionic_isr(int irq, void *data) 181 { 182 struct napi_struct *napi = data; 183 184 napi_schedule_irqoff(napi); 185 186 return IRQ_HANDLED; 187 } 188 189 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) 190 { 191 struct ionic_intr_info *intr = &qcq->intr; 192 struct device *dev = lif->ionic->dev; 193 struct ionic_queue *q = &qcq->q; 194 const char *name; 195 196 if (lif->registered) 197 name = lif->netdev->name; 198 else 199 name = dev_name(dev); 200 201 snprintf(intr->name, sizeof(intr->name), 202 "%s-%s-%s", IONIC_DRV_NAME, name, q->name); 203 204 return devm_request_irq(dev, intr->vector, ionic_isr, 205 0, intr->name, &qcq->napi); 206 } 207 208 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) 209 { 210 struct ionic *ionic = lif->ionic; 211 int index; 212 213 index = find_first_zero_bit(ionic->intrs, ionic->nintrs); 214 if (index == ionic->nintrs) { 215 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", 216 __func__, index, ionic->nintrs); 217 return -ENOSPC; 218 } 219 220 set_bit(index, ionic->intrs); 221 ionic_intr_init(&ionic->idev, intr, index); 222 223 return 0; 224 } 225 226 static void ionic_intr_free(struct ionic *ionic, int index) 227 { 228 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs) 229 clear_bit(index, ionic->intrs); 230 } 231 232 static int ionic_qcq_enable(struct ionic_qcq *qcq) 233 { 234 struct ionic_queue *q = &qcq->q; 235 struct ionic_lif *lif = q->lif; 236 struct ionic_dev *idev; 237 struct device *dev; 238 239 struct ionic_admin_ctx ctx = { 240 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 241 .cmd.q_control = { 242 .opcode = IONIC_CMD_Q_CONTROL, 243 .lif_index = cpu_to_le16(lif->index), 244 .type = q->type, 245 .index = cpu_to_le32(q->index), 246 .oper = IONIC_Q_ENABLE, 247 }, 248 }; 249 250 idev = &lif->ionic->idev; 251 dev = lif->ionic->dev; 252 253 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", 254 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 255 256 if (qcq->flags & IONIC_QCQ_F_INTR) { 257 irq_set_affinity_hint(qcq->intr.vector, 258 &qcq->intr.affinity_mask); 259 napi_enable(&qcq->napi); 260 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); 261 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 262 IONIC_INTR_MASK_CLEAR); 263 } 264 265 return ionic_adminq_post_wait(lif, &ctx); 266 } 267 268 static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw) 269 { 270 struct ionic_queue *q; 271 struct ionic_lif *lif; 272 int err = 0; 273 274 struct ionic_admin_ctx ctx = { 275 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 276 .cmd.q_control = { 277 .opcode = IONIC_CMD_Q_CONTROL, 278 .oper = IONIC_Q_DISABLE, 279 }, 280 }; 281 282 if (!qcq) 283 return -ENXIO; 284 285 q = &qcq->q; 286 lif = q->lif; 287 288 if (qcq->flags & IONIC_QCQ_F_INTR) { 289 struct ionic_dev *idev = &lif->ionic->idev; 290 291 cancel_work_sync(&qcq->dim.work); 292 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 293 IONIC_INTR_MASK_SET); 294 synchronize_irq(qcq->intr.vector); 295 irq_set_affinity_hint(qcq->intr.vector, NULL); 296 napi_disable(&qcq->napi); 297 } 298 299 if (send_to_hw) { 300 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index); 301 ctx.cmd.q_control.type = q->type; 302 ctx.cmd.q_control.index = cpu_to_le32(q->index); 303 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n", 304 ctx.cmd.q_control.index, ctx.cmd.q_control.type); 305 306 err = ionic_adminq_post_wait(lif, &ctx); 307 } 308 309 return err; 310 } 311 312 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) 313 { 314 struct ionic_dev *idev = &lif->ionic->idev; 315 316 if (!qcq) 317 return; 318 319 if (!(qcq->flags & IONIC_QCQ_F_INITED)) 320 return; 321 322 if (qcq->flags & IONIC_QCQ_F_INTR) { 323 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 324 IONIC_INTR_MASK_SET); 325 netif_napi_del(&qcq->napi); 326 } 327 328 qcq->flags &= ~IONIC_QCQ_F_INITED; 329 } 330 331 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 332 { 333 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0) 334 return; 335 336 irq_set_affinity_hint(qcq->intr.vector, NULL); 337 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi); 338 qcq->intr.vector = 0; 339 ionic_intr_free(lif->ionic, qcq->intr.index); 340 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 341 } 342 343 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) 344 { 345 struct device *dev = lif->ionic->dev; 346 347 if (!qcq) 348 return; 349 350 ionic_debugfs_del_qcq(qcq); 351 352 if (qcq->q_base) { 353 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa); 354 qcq->q_base = NULL; 355 qcq->q_base_pa = 0; 356 } 357 358 if (qcq->cq_base) { 359 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); 360 qcq->cq_base = NULL; 361 qcq->cq_base_pa = 0; 362 } 363 364 if (qcq->sg_base) { 365 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa); 366 qcq->sg_base = NULL; 367 qcq->sg_base_pa = 0; 368 } 369 370 ionic_qcq_intr_free(lif, qcq); 371 372 if (qcq->cq.info) { 373 devm_kfree(dev, qcq->cq.info); 374 qcq->cq.info = NULL; 375 } 376 if (qcq->q.info) { 377 devm_kfree(dev, qcq->q.info); 378 qcq->q.info = NULL; 379 } 380 } 381 382 static void ionic_qcqs_free(struct ionic_lif *lif) 383 { 384 struct device *dev = lif->ionic->dev; 385 386 if (lif->notifyqcq) { 387 ionic_qcq_free(lif, lif->notifyqcq); 388 devm_kfree(dev, lif->notifyqcq); 389 lif->notifyqcq = NULL; 390 } 391 392 if (lif->adminqcq) { 393 ionic_qcq_free(lif, lif->adminqcq); 394 devm_kfree(dev, lif->adminqcq); 395 lif->adminqcq = NULL; 396 } 397 398 if (lif->rxqcqs) { 399 devm_kfree(dev, lif->rxqstats); 400 lif->rxqstats = NULL; 401 devm_kfree(dev, lif->rxqcqs); 402 lif->rxqcqs = NULL; 403 } 404 405 if (lif->txqcqs) { 406 devm_kfree(dev, lif->txqstats); 407 lif->txqstats = NULL; 408 devm_kfree(dev, lif->txqcqs); 409 lif->txqcqs = NULL; 410 } 411 } 412 413 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, 414 struct ionic_qcq *n_qcq) 415 { 416 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { 417 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index); 418 n_qcq->flags &= ~IONIC_QCQ_F_INTR; 419 } 420 421 n_qcq->intr.vector = src_qcq->intr.vector; 422 n_qcq->intr.index = src_qcq->intr.index; 423 } 424 425 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) 426 { 427 int err; 428 429 if (!(qcq->flags & IONIC_QCQ_F_INTR)) { 430 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; 431 return 0; 432 } 433 434 err = ionic_intr_alloc(lif, &qcq->intr); 435 if (err) { 436 netdev_warn(lif->netdev, "no intr for %s: %d\n", 437 qcq->q.name, err); 438 goto err_out; 439 } 440 441 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index); 442 if (err < 0) { 443 netdev_warn(lif->netdev, "no vector for %s: %d\n", 444 qcq->q.name, err); 445 goto err_out_free_intr; 446 } 447 qcq->intr.vector = err; 448 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index, 449 IONIC_INTR_MASK_SET); 450 451 err = ionic_request_irq(lif, qcq); 452 if (err) { 453 netdev_warn(lif->netdev, "irq request failed %d\n", err); 454 goto err_out_free_intr; 455 } 456 457 /* try to get the irq on the local numa node first */ 458 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index, 459 dev_to_node(lif->ionic->dev)); 460 if (qcq->intr.cpu != -1) 461 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask); 462 463 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); 464 return 0; 465 466 err_out_free_intr: 467 ionic_intr_free(lif->ionic, qcq->intr.index); 468 err_out: 469 return err; 470 } 471 472 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, 473 unsigned int index, 474 const char *name, unsigned int flags, 475 unsigned int num_descs, unsigned int desc_size, 476 unsigned int cq_desc_size, 477 unsigned int sg_desc_size, 478 unsigned int pid, struct ionic_qcq **qcq) 479 { 480 struct ionic_dev *idev = &lif->ionic->idev; 481 struct device *dev = lif->ionic->dev; 482 void *q_base, *cq_base, *sg_base; 483 dma_addr_t cq_base_pa = 0; 484 dma_addr_t sg_base_pa = 0; 485 dma_addr_t q_base_pa = 0; 486 struct ionic_qcq *new; 487 int err; 488 489 *qcq = NULL; 490 491 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); 492 if (!new) { 493 netdev_err(lif->netdev, "Cannot allocate queue structure\n"); 494 err = -ENOMEM; 495 goto err_out; 496 } 497 498 new->q.dev = dev; 499 new->flags = flags; 500 501 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info), 502 GFP_KERNEL); 503 if (!new->q.info) { 504 netdev_err(lif->netdev, "Cannot allocate queue info\n"); 505 err = -ENOMEM; 506 goto err_out_free_qcq; 507 } 508 509 new->q.type = type; 510 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems; 511 512 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, 513 desc_size, sg_desc_size, pid); 514 if (err) { 515 netdev_err(lif->netdev, "Cannot initialize queue\n"); 516 goto err_out_free_q_info; 517 } 518 519 err = ionic_alloc_qcq_interrupt(lif, new); 520 if (err) 521 goto err_out; 522 523 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info), 524 GFP_KERNEL); 525 if (!new->cq.info) { 526 netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); 527 err = -ENOMEM; 528 goto err_out_free_irq; 529 } 530 531 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); 532 if (err) { 533 netdev_err(lif->netdev, "Cannot initialize completion queue\n"); 534 goto err_out_free_cq_info; 535 } 536 537 if (flags & IONIC_QCQ_F_NOTIFYQ) { 538 int q_size, cq_size; 539 540 /* q & cq need to be contiguous in case of notifyq */ 541 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE); 542 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE); 543 544 new->q_size = PAGE_SIZE + q_size + cq_size; 545 new->q_base = dma_alloc_coherent(dev, new->q_size, 546 &new->q_base_pa, GFP_KERNEL); 547 if (!new->q_base) { 548 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); 549 err = -ENOMEM; 550 goto err_out_free_cq_info; 551 } 552 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 553 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 554 ionic_q_map(&new->q, q_base, q_base_pa); 555 556 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE); 557 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); 558 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 559 ionic_cq_bind(&new->cq, &new->q); 560 } else { 561 new->q_size = PAGE_SIZE + (num_descs * desc_size); 562 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa, 563 GFP_KERNEL); 564 if (!new->q_base) { 565 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); 566 err = -ENOMEM; 567 goto err_out_free_cq_info; 568 } 569 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); 570 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); 571 ionic_q_map(&new->q, q_base, q_base_pa); 572 573 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); 574 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, 575 GFP_KERNEL); 576 if (!new->cq_base) { 577 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n"); 578 err = -ENOMEM; 579 goto err_out_free_q; 580 } 581 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); 582 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); 583 ionic_cq_map(&new->cq, cq_base, cq_base_pa); 584 ionic_cq_bind(&new->cq, &new->q); 585 } 586 587 if (flags & IONIC_QCQ_F_SG) { 588 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size); 589 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa, 590 GFP_KERNEL); 591 if (!new->sg_base) { 592 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n"); 593 err = -ENOMEM; 594 goto err_out_free_cq; 595 } 596 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); 597 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); 598 ionic_q_sg_map(&new->q, sg_base, sg_base_pa); 599 } 600 601 INIT_WORK(&new->dim.work, ionic_dim_work); 602 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 603 604 *qcq = new; 605 606 return 0; 607 608 err_out_free_cq: 609 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); 610 err_out_free_q: 611 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); 612 err_out_free_cq_info: 613 devm_kfree(dev, new->cq.info); 614 err_out_free_irq: 615 if (flags & IONIC_QCQ_F_INTR) { 616 devm_free_irq(dev, new->intr.vector, &new->napi); 617 ionic_intr_free(lif->ionic, new->intr.index); 618 } 619 err_out_free_q_info: 620 devm_kfree(dev, new->q.info); 621 err_out_free_qcq: 622 devm_kfree(dev, new); 623 err_out: 624 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); 625 return err; 626 } 627 628 static int ionic_qcqs_alloc(struct ionic_lif *lif) 629 { 630 struct device *dev = lif->ionic->dev; 631 unsigned int flags; 632 int err; 633 634 flags = IONIC_QCQ_F_INTR; 635 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, 636 IONIC_ADMINQ_LENGTH, 637 sizeof(struct ionic_admin_cmd), 638 sizeof(struct ionic_admin_comp), 639 0, lif->kern_pid, &lif->adminqcq); 640 if (err) 641 return err; 642 ionic_debugfs_add_qcq(lif, lif->adminqcq); 643 644 if (lif->ionic->nnqs_per_lif) { 645 flags = IONIC_QCQ_F_NOTIFYQ; 646 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", 647 flags, IONIC_NOTIFYQ_LENGTH, 648 sizeof(struct ionic_notifyq_cmd), 649 sizeof(union ionic_notifyq_comp), 650 0, lif->kern_pid, &lif->notifyqcq); 651 if (err) 652 goto err_out; 653 ionic_debugfs_add_qcq(lif, lif->notifyqcq); 654 655 /* Let the notifyq ride on the adminq interrupt */ 656 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); 657 } 658 659 err = -ENOMEM; 660 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 661 sizeof(struct ionic_qcq *), GFP_KERNEL); 662 if (!lif->txqcqs) 663 goto err_out; 664 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 665 sizeof(struct ionic_qcq *), GFP_KERNEL); 666 if (!lif->rxqcqs) 667 goto err_out; 668 669 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif, 670 sizeof(struct ionic_tx_stats), GFP_KERNEL); 671 if (!lif->txqstats) 672 goto err_out; 673 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif, 674 sizeof(struct ionic_rx_stats), GFP_KERNEL); 675 if (!lif->rxqstats) 676 goto err_out; 677 678 return 0; 679 680 err_out: 681 ionic_qcqs_free(lif); 682 return err; 683 } 684 685 static void ionic_qcq_sanitize(struct ionic_qcq *qcq) 686 { 687 qcq->q.tail_idx = 0; 688 qcq->q.head_idx = 0; 689 qcq->cq.tail_idx = 0; 690 qcq->cq.done_color = 1; 691 memset(qcq->q_base, 0, qcq->q_size); 692 memset(qcq->cq_base, 0, qcq->cq_size); 693 memset(qcq->sg_base, 0, qcq->sg_size); 694 } 695 696 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 697 { 698 struct device *dev = lif->ionic->dev; 699 struct ionic_queue *q = &qcq->q; 700 struct ionic_cq *cq = &qcq->cq; 701 struct ionic_admin_ctx ctx = { 702 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 703 .cmd.q_init = { 704 .opcode = IONIC_CMD_Q_INIT, 705 .lif_index = cpu_to_le16(lif->index), 706 .type = q->type, 707 .ver = lif->qtype_info[q->type].version, 708 .index = cpu_to_le32(q->index), 709 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 710 IONIC_QINIT_F_SG), 711 .pid = cpu_to_le16(q->pid), 712 .ring_size = ilog2(q->num_descs), 713 .ring_base = cpu_to_le64(q->base_pa), 714 .cq_ring_base = cpu_to_le64(cq->base_pa), 715 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 716 }, 717 }; 718 unsigned int intr_index; 719 int err; 720 721 if (qcq->flags & IONIC_QCQ_F_INTR) 722 intr_index = qcq->intr.index; 723 else 724 intr_index = lif->rxqcqs[q->index]->intr.index; 725 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); 726 727 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); 728 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); 729 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 730 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 731 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 732 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); 733 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 734 735 ionic_qcq_sanitize(qcq); 736 737 err = ionic_adminq_post_wait(lif, &ctx); 738 if (err) 739 return err; 740 741 q->hw_type = ctx.comp.q_init.hw_type; 742 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 743 q->dbval = IONIC_DBELL_QID(q->hw_index); 744 745 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); 746 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); 747 748 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 749 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi, 750 NAPI_POLL_WEIGHT); 751 752 qcq->flags |= IONIC_QCQ_F_INITED; 753 754 return 0; 755 } 756 757 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) 758 { 759 struct device *dev = lif->ionic->dev; 760 struct ionic_queue *q = &qcq->q; 761 struct ionic_cq *cq = &qcq->cq; 762 struct ionic_admin_ctx ctx = { 763 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 764 .cmd.q_init = { 765 .opcode = IONIC_CMD_Q_INIT, 766 .lif_index = cpu_to_le16(lif->index), 767 .type = q->type, 768 .ver = lif->qtype_info[q->type].version, 769 .index = cpu_to_le32(q->index), 770 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 771 IONIC_QINIT_F_SG), 772 .intr_index = cpu_to_le16(cq->bound_intr->index), 773 .pid = cpu_to_le16(q->pid), 774 .ring_size = ilog2(q->num_descs), 775 .ring_base = cpu_to_le64(q->base_pa), 776 .cq_ring_base = cpu_to_le64(cq->base_pa), 777 .sg_ring_base = cpu_to_le64(q->sg_base_pa), 778 }, 779 }; 780 int err; 781 782 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); 783 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); 784 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 785 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 786 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); 787 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); 788 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); 789 790 ionic_qcq_sanitize(qcq); 791 792 err = ionic_adminq_post_wait(lif, &ctx); 793 if (err) 794 return err; 795 796 q->hw_type = ctx.comp.q_init.hw_type; 797 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 798 q->dbval = IONIC_DBELL_QID(q->hw_index); 799 800 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); 801 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); 802 803 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 804 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, 805 NAPI_POLL_WEIGHT); 806 else 807 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi, 808 NAPI_POLL_WEIGHT); 809 810 qcq->flags |= IONIC_QCQ_F_INITED; 811 812 return 0; 813 } 814 815 static bool ionic_notifyq_service(struct ionic_cq *cq, 816 struct ionic_cq_info *cq_info) 817 { 818 union ionic_notifyq_comp *comp = cq_info->cq_desc; 819 struct ionic_deferred_work *work; 820 struct net_device *netdev; 821 struct ionic_queue *q; 822 struct ionic_lif *lif; 823 u64 eid; 824 825 q = cq->bound_q; 826 lif = q->info[0].cb_arg; 827 netdev = lif->netdev; 828 eid = le64_to_cpu(comp->event.eid); 829 830 /* Have we run out of new completions to process? */ 831 if ((s64)(eid - lif->last_eid) <= 0) 832 return false; 833 834 lif->last_eid = eid; 835 836 dev_dbg(lif->ionic->dev, "notifyq event:\n"); 837 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, 838 comp, sizeof(*comp), true); 839 840 switch (le16_to_cpu(comp->event.ecode)) { 841 case IONIC_EVENT_LINK_CHANGE: 842 ionic_link_status_check_request(lif, false); 843 break; 844 case IONIC_EVENT_RESET: 845 work = kzalloc(sizeof(*work), GFP_ATOMIC); 846 if (!work) { 847 netdev_err(lif->netdev, "Reset event dropped\n"); 848 } else { 849 work->type = IONIC_DW_TYPE_LIF_RESET; 850 ionic_lif_deferred_enqueue(&lif->deferred, work); 851 } 852 break; 853 default: 854 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n", 855 comp->event.ecode, eid); 856 break; 857 } 858 859 return true; 860 } 861 862 static bool ionic_adminq_service(struct ionic_cq *cq, 863 struct ionic_cq_info *cq_info) 864 { 865 struct ionic_admin_comp *comp = cq_info->cq_desc; 866 867 if (!color_match(comp->color, cq->done_color)) 868 return false; 869 870 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); 871 872 return true; 873 } 874 875 static int ionic_adminq_napi(struct napi_struct *napi, int budget) 876 { 877 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; 878 struct ionic_lif *lif = napi_to_cq(napi)->lif; 879 struct ionic_dev *idev = &lif->ionic->idev; 880 unsigned int flags = 0; 881 int n_work = 0; 882 int a_work = 0; 883 int work_done; 884 885 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED) 886 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget, 887 ionic_notifyq_service, NULL, NULL); 888 889 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) 890 a_work = ionic_cq_service(&lif->adminqcq->cq, budget, 891 ionic_adminq_service, NULL, NULL); 892 893 work_done = max(n_work, a_work); 894 if (work_done < budget && napi_complete_done(napi, work_done)) { 895 flags |= IONIC_INTR_CRED_UNMASK; 896 lif->adminqcq->cq.bound_intr->rearm_count++; 897 } 898 899 if (work_done || flags) { 900 flags |= IONIC_INTR_CRED_RESET_COALESCE; 901 ionic_intr_credits(idev->intr_ctrl, 902 intr->index, 903 n_work + a_work, flags); 904 } 905 906 return work_done; 907 } 908 909 void ionic_get_stats64(struct net_device *netdev, 910 struct rtnl_link_stats64 *ns) 911 { 912 struct ionic_lif *lif = netdev_priv(netdev); 913 struct ionic_lif_stats *ls; 914 915 memset(ns, 0, sizeof(*ns)); 916 ls = &lif->info->stats; 917 918 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + 919 le64_to_cpu(ls->rx_mcast_packets) + 920 le64_to_cpu(ls->rx_bcast_packets); 921 922 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + 923 le64_to_cpu(ls->tx_mcast_packets) + 924 le64_to_cpu(ls->tx_bcast_packets); 925 926 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + 927 le64_to_cpu(ls->rx_mcast_bytes) + 928 le64_to_cpu(ls->rx_bcast_bytes); 929 930 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + 931 le64_to_cpu(ls->tx_mcast_bytes) + 932 le64_to_cpu(ls->tx_bcast_bytes); 933 934 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + 935 le64_to_cpu(ls->rx_mcast_drop_packets) + 936 le64_to_cpu(ls->rx_bcast_drop_packets); 937 938 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + 939 le64_to_cpu(ls->tx_mcast_drop_packets) + 940 le64_to_cpu(ls->tx_bcast_drop_packets); 941 942 ns->multicast = le64_to_cpu(ls->rx_mcast_packets); 943 944 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); 945 946 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + 947 le64_to_cpu(ls->rx_queue_disabled) + 948 le64_to_cpu(ls->rx_desc_fetch_error) + 949 le64_to_cpu(ls->rx_desc_data_error); 950 951 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + 952 le64_to_cpu(ls->tx_queue_disabled) + 953 le64_to_cpu(ls->tx_desc_fetch_error) + 954 le64_to_cpu(ls->tx_desc_data_error); 955 956 ns->rx_errors = ns->rx_over_errors + 957 ns->rx_missed_errors; 958 959 ns->tx_errors = ns->tx_aborted_errors; 960 } 961 962 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) 963 { 964 struct ionic_admin_ctx ctx = { 965 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 966 .cmd.rx_filter_add = { 967 .opcode = IONIC_CMD_RX_FILTER_ADD, 968 .lif_index = cpu_to_le16(lif->index), 969 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), 970 }, 971 }; 972 struct ionic_rx_filter *f; 973 int err; 974 975 /* don't bother if we already have it */ 976 spin_lock_bh(&lif->rx_filters.lock); 977 f = ionic_rx_filter_by_addr(lif, addr); 978 spin_unlock_bh(&lif->rx_filters.lock); 979 if (f) 980 return 0; 981 982 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); 983 984 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); 985 err = ionic_adminq_post_wait(lif, &ctx); 986 if (err && err != -EEXIST) 987 return err; 988 989 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 990 } 991 992 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) 993 { 994 struct ionic_admin_ctx ctx = { 995 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 996 .cmd.rx_filter_del = { 997 .opcode = IONIC_CMD_RX_FILTER_DEL, 998 .lif_index = cpu_to_le16(lif->index), 999 }, 1000 }; 1001 struct ionic_rx_filter *f; 1002 int err; 1003 1004 spin_lock_bh(&lif->rx_filters.lock); 1005 f = ionic_rx_filter_by_addr(lif, addr); 1006 if (!f) { 1007 spin_unlock_bh(&lif->rx_filters.lock); 1008 return -ENOENT; 1009 } 1010 1011 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", 1012 addr, f->filter_id); 1013 1014 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1015 ionic_rx_filter_free(lif, f); 1016 spin_unlock_bh(&lif->rx_filters.lock); 1017 1018 err = ionic_adminq_post_wait(lif, &ctx); 1019 if (err && err != -EEXIST) 1020 return err; 1021 1022 return 0; 1023 } 1024 1025 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add, 1026 bool can_sleep) 1027 { 1028 struct ionic_deferred_work *work; 1029 unsigned int nmfilters; 1030 unsigned int nufilters; 1031 1032 if (add) { 1033 /* Do we have space for this filter? We test the counters 1034 * here before checking the need for deferral so that we 1035 * can return an overflow error to the stack. 1036 */ 1037 nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); 1038 nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1039 1040 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) 1041 lif->nmcast++; 1042 else if (!is_multicast_ether_addr(addr) && 1043 lif->nucast < nufilters) 1044 lif->nucast++; 1045 else 1046 return -ENOSPC; 1047 } else { 1048 if (is_multicast_ether_addr(addr) && lif->nmcast) 1049 lif->nmcast--; 1050 else if (!is_multicast_ether_addr(addr) && lif->nucast) 1051 lif->nucast--; 1052 } 1053 1054 if (!can_sleep) { 1055 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1056 if (!work) 1057 return -ENOMEM; 1058 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : 1059 IONIC_DW_TYPE_RX_ADDR_DEL; 1060 memcpy(work->addr, addr, ETH_ALEN); 1061 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n", 1062 add ? "add" : "del", addr); 1063 ionic_lif_deferred_enqueue(&lif->deferred, work); 1064 } else { 1065 netdev_dbg(lif->netdev, "rx_filter %s %pM\n", 1066 add ? "add" : "del", addr); 1067 if (add) 1068 return ionic_lif_addr_add(lif, addr); 1069 else 1070 return ionic_lif_addr_del(lif, addr); 1071 } 1072 1073 return 0; 1074 } 1075 1076 static int ionic_addr_add(struct net_device *netdev, const u8 *addr) 1077 { 1078 return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP); 1079 } 1080 1081 static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr) 1082 { 1083 return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP); 1084 } 1085 1086 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1087 { 1088 return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP); 1089 } 1090 1091 static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr) 1092 { 1093 return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP); 1094 } 1095 1096 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) 1097 { 1098 struct ionic_admin_ctx ctx = { 1099 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1100 .cmd.rx_mode_set = { 1101 .opcode = IONIC_CMD_RX_MODE_SET, 1102 .lif_index = cpu_to_le16(lif->index), 1103 .rx_mode = cpu_to_le16(rx_mode), 1104 }, 1105 }; 1106 char buf[128]; 1107 int err; 1108 int i; 1109 #define REMAIN(__x) (sizeof(buf) - (__x)) 1110 1111 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", 1112 lif->rx_mode, rx_mode); 1113 if (rx_mode & IONIC_RX_MODE_F_UNICAST) 1114 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); 1115 if (rx_mode & IONIC_RX_MODE_F_MULTICAST) 1116 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); 1117 if (rx_mode & IONIC_RX_MODE_F_BROADCAST) 1118 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); 1119 if (rx_mode & IONIC_RX_MODE_F_PROMISC) 1120 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); 1121 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) 1122 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); 1123 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); 1124 1125 err = ionic_adminq_post_wait(lif, &ctx); 1126 if (err) 1127 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n", 1128 rx_mode, err); 1129 else 1130 lif->rx_mode = rx_mode; 1131 } 1132 1133 static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) 1134 { 1135 struct ionic_lif *lif = netdev_priv(netdev); 1136 struct ionic_deferred_work *work; 1137 unsigned int nfilters; 1138 unsigned int rx_mode; 1139 1140 rx_mode = IONIC_RX_MODE_F_UNICAST; 1141 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; 1142 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; 1143 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; 1144 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; 1145 1146 /* sync unicast addresses 1147 * next check to see if we're in an overflow state 1148 * if so, we track that we overflowed and enable NIC PROMISC 1149 * else if the overflow is set and not needed 1150 * we remove our overflow flag and check the netdev flags 1151 * to see if we can disable NIC PROMISC 1152 */ 1153 if (can_sleep) 1154 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); 1155 else 1156 __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); 1157 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); 1158 if (netdev_uc_count(netdev) + 1 > nfilters) { 1159 rx_mode |= IONIC_RX_MODE_F_PROMISC; 1160 lif->uc_overflow = true; 1161 } else if (lif->uc_overflow) { 1162 lif->uc_overflow = false; 1163 if (!(netdev->flags & IFF_PROMISC)) 1164 rx_mode &= ~IONIC_RX_MODE_F_PROMISC; 1165 } 1166 1167 /* same for multicast */ 1168 if (can_sleep) 1169 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); 1170 else 1171 __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); 1172 nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); 1173 if (netdev_mc_count(netdev) > nfilters) { 1174 rx_mode |= IONIC_RX_MODE_F_ALLMULTI; 1175 lif->mc_overflow = true; 1176 } else if (lif->mc_overflow) { 1177 lif->mc_overflow = false; 1178 if (!(netdev->flags & IFF_ALLMULTI)) 1179 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; 1180 } 1181 1182 if (lif->rx_mode != rx_mode) { 1183 if (!can_sleep) { 1184 work = kzalloc(sizeof(*work), GFP_ATOMIC); 1185 if (!work) { 1186 netdev_err(lif->netdev, "rxmode change dropped\n"); 1187 return; 1188 } 1189 work->type = IONIC_DW_TYPE_RX_MODE; 1190 work->rx_mode = rx_mode; 1191 netdev_dbg(lif->netdev, "deferred: rx_mode\n"); 1192 ionic_lif_deferred_enqueue(&lif->deferred, work); 1193 } else { 1194 ionic_lif_rx_mode(lif, rx_mode); 1195 } 1196 } 1197 } 1198 1199 static void ionic_ndo_set_rx_mode(struct net_device *netdev) 1200 { 1201 ionic_set_rx_mode(netdev, CAN_NOT_SLEEP); 1202 } 1203 1204 static __le64 ionic_netdev_features_to_nic(netdev_features_t features) 1205 { 1206 u64 wanted = 0; 1207 1208 if (features & NETIF_F_HW_VLAN_CTAG_TX) 1209 wanted |= IONIC_ETH_HW_VLAN_TX_TAG; 1210 if (features & NETIF_F_HW_VLAN_CTAG_RX) 1211 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; 1212 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) 1213 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; 1214 if (features & NETIF_F_RXHASH) 1215 wanted |= IONIC_ETH_HW_RX_HASH; 1216 if (features & NETIF_F_RXCSUM) 1217 wanted |= IONIC_ETH_HW_RX_CSUM; 1218 if (features & NETIF_F_SG) 1219 wanted |= IONIC_ETH_HW_TX_SG; 1220 if (features & NETIF_F_HW_CSUM) 1221 wanted |= IONIC_ETH_HW_TX_CSUM; 1222 if (features & NETIF_F_TSO) 1223 wanted |= IONIC_ETH_HW_TSO; 1224 if (features & NETIF_F_TSO6) 1225 wanted |= IONIC_ETH_HW_TSO_IPV6; 1226 if (features & NETIF_F_TSO_ECN) 1227 wanted |= IONIC_ETH_HW_TSO_ECN; 1228 if (features & NETIF_F_GSO_GRE) 1229 wanted |= IONIC_ETH_HW_TSO_GRE; 1230 if (features & NETIF_F_GSO_GRE_CSUM) 1231 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; 1232 if (features & NETIF_F_GSO_IPXIP4) 1233 wanted |= IONIC_ETH_HW_TSO_IPXIP4; 1234 if (features & NETIF_F_GSO_IPXIP6) 1235 wanted |= IONIC_ETH_HW_TSO_IPXIP6; 1236 if (features & NETIF_F_GSO_UDP_TUNNEL) 1237 wanted |= IONIC_ETH_HW_TSO_UDP; 1238 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) 1239 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; 1240 1241 return cpu_to_le64(wanted); 1242 } 1243 1244 static int ionic_set_nic_features(struct ionic_lif *lif, 1245 netdev_features_t features) 1246 { 1247 struct device *dev = lif->ionic->dev; 1248 struct ionic_admin_ctx ctx = { 1249 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1250 .cmd.lif_setattr = { 1251 .opcode = IONIC_CMD_LIF_SETATTR, 1252 .index = cpu_to_le16(lif->index), 1253 .attr = IONIC_LIF_ATTR_FEATURES, 1254 }, 1255 }; 1256 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | 1257 IONIC_ETH_HW_VLAN_RX_STRIP | 1258 IONIC_ETH_HW_VLAN_RX_FILTER; 1259 u64 old_hw_features; 1260 int err; 1261 1262 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); 1263 err = ionic_adminq_post_wait(lif, &ctx); 1264 if (err) 1265 return err; 1266 1267 old_hw_features = lif->hw_features; 1268 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & 1269 ctx.comp.lif_setattr.features); 1270 1271 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH) 1272 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1273 1274 if ((vlan_flags & features) && 1275 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) 1276 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); 1277 1278 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1279 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); 1280 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1281 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); 1282 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1283 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); 1284 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1285 dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); 1286 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1287 dev_dbg(dev, "feature ETH_HW_TX_SG\n"); 1288 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1289 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); 1290 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1291 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); 1292 if (lif->hw_features & IONIC_ETH_HW_TSO) 1293 dev_dbg(dev, "feature ETH_HW_TSO\n"); 1294 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1295 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); 1296 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1297 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); 1298 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1299 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); 1300 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1301 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); 1302 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1303 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); 1304 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1305 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); 1306 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1307 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); 1308 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1309 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); 1310 1311 return 0; 1312 } 1313 1314 static int ionic_init_nic_features(struct ionic_lif *lif) 1315 { 1316 struct net_device *netdev = lif->netdev; 1317 netdev_features_t features; 1318 int err; 1319 1320 /* set up what we expect to support by default */ 1321 features = NETIF_F_HW_VLAN_CTAG_TX | 1322 NETIF_F_HW_VLAN_CTAG_RX | 1323 NETIF_F_HW_VLAN_CTAG_FILTER | 1324 NETIF_F_RXHASH | 1325 NETIF_F_SG | 1326 NETIF_F_HW_CSUM | 1327 NETIF_F_RXCSUM | 1328 NETIF_F_TSO | 1329 NETIF_F_TSO6 | 1330 NETIF_F_TSO_ECN; 1331 1332 err = ionic_set_nic_features(lif, features); 1333 if (err) 1334 return err; 1335 1336 /* tell the netdev what we actually can support */ 1337 netdev->features |= NETIF_F_HIGHDMA; 1338 1339 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) 1340 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; 1341 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) 1342 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1343 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) 1344 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1345 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) 1346 netdev->hw_features |= NETIF_F_RXHASH; 1347 if (lif->hw_features & IONIC_ETH_HW_TX_SG) 1348 netdev->hw_features |= NETIF_F_SG; 1349 1350 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) 1351 netdev->hw_enc_features |= NETIF_F_HW_CSUM; 1352 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) 1353 netdev->hw_enc_features |= NETIF_F_RXCSUM; 1354 if (lif->hw_features & IONIC_ETH_HW_TSO) 1355 netdev->hw_enc_features |= NETIF_F_TSO; 1356 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) 1357 netdev->hw_enc_features |= NETIF_F_TSO6; 1358 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) 1359 netdev->hw_enc_features |= NETIF_F_TSO_ECN; 1360 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) 1361 netdev->hw_enc_features |= NETIF_F_GSO_GRE; 1362 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) 1363 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; 1364 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) 1365 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; 1366 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) 1367 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; 1368 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) 1369 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; 1370 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) 1371 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 1372 1373 netdev->hw_features |= netdev->hw_enc_features; 1374 netdev->features |= netdev->hw_features; 1375 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; 1376 1377 netdev->priv_flags |= IFF_UNICAST_FLT | 1378 IFF_LIVE_ADDR_CHANGE; 1379 1380 return 0; 1381 } 1382 1383 static int ionic_set_features(struct net_device *netdev, 1384 netdev_features_t features) 1385 { 1386 struct ionic_lif *lif = netdev_priv(netdev); 1387 int err; 1388 1389 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", 1390 __func__, (u64)lif->netdev->features, (u64)features); 1391 1392 err = ionic_set_nic_features(lif, features); 1393 1394 return err; 1395 } 1396 1397 static int ionic_set_mac_address(struct net_device *netdev, void *sa) 1398 { 1399 struct sockaddr *addr = sa; 1400 u8 *mac; 1401 int err; 1402 1403 mac = (u8 *)addr->sa_data; 1404 if (ether_addr_equal(netdev->dev_addr, mac)) 1405 return 0; 1406 1407 err = eth_prepare_mac_addr_change(netdev, addr); 1408 if (err) 1409 return err; 1410 1411 if (!is_zero_ether_addr(netdev->dev_addr)) { 1412 netdev_info(netdev, "deleting mac addr %pM\n", 1413 netdev->dev_addr); 1414 ionic_addr_del(netdev, netdev->dev_addr); 1415 } 1416 1417 eth_commit_mac_addr_change(netdev, addr); 1418 netdev_info(netdev, "updating mac addr %pM\n", mac); 1419 1420 return ionic_addr_add(netdev, mac); 1421 } 1422 1423 static void ionic_stop_queues_reconfig(struct ionic_lif *lif) 1424 { 1425 /* Stop and clean the queues before reconfiguration */ 1426 mutex_lock(&lif->queue_lock); 1427 netif_device_detach(lif->netdev); 1428 ionic_stop_queues(lif); 1429 ionic_txrx_deinit(lif); 1430 } 1431 1432 static int ionic_start_queues_reconfig(struct ionic_lif *lif) 1433 { 1434 int err; 1435 1436 /* Re-init the queues after reconfiguration */ 1437 1438 /* The only way txrx_init can fail here is if communication 1439 * with FW is suddenly broken. There's not much we can do 1440 * at this point - error messages have already been printed, 1441 * so we can continue on and the user can eventually do a 1442 * DOWN and UP to try to reset and clear the issue. 1443 */ 1444 err = ionic_txrx_init(lif); 1445 mutex_unlock(&lif->queue_lock); 1446 ionic_link_status_check_request(lif, true); 1447 netif_device_attach(lif->netdev); 1448 1449 return err; 1450 } 1451 1452 static int ionic_change_mtu(struct net_device *netdev, int new_mtu) 1453 { 1454 struct ionic_lif *lif = netdev_priv(netdev); 1455 struct ionic_admin_ctx ctx = { 1456 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1457 .cmd.lif_setattr = { 1458 .opcode = IONIC_CMD_LIF_SETATTR, 1459 .index = cpu_to_le16(lif->index), 1460 .attr = IONIC_LIF_ATTR_MTU, 1461 .mtu = cpu_to_le32(new_mtu), 1462 }, 1463 }; 1464 int err; 1465 1466 err = ionic_adminq_post_wait(lif, &ctx); 1467 if (err) 1468 return err; 1469 1470 /* if we're not running, nothing more to do */ 1471 if (!netif_running(netdev)) { 1472 netdev->mtu = new_mtu; 1473 return 0; 1474 } 1475 1476 ionic_stop_queues_reconfig(lif); 1477 netdev->mtu = new_mtu; 1478 return ionic_start_queues_reconfig(lif); 1479 } 1480 1481 static void ionic_tx_timeout_work(struct work_struct *ws) 1482 { 1483 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); 1484 1485 netdev_info(lif->netdev, "Tx Timeout recovery\n"); 1486 1487 /* if we were stopped before this scheduled job was launched, 1488 * don't bother the queues as they are already stopped. 1489 */ 1490 if (!netif_running(lif->netdev)) 1491 return; 1492 1493 ionic_stop_queues_reconfig(lif); 1494 ionic_start_queues_reconfig(lif); 1495 } 1496 1497 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue) 1498 { 1499 struct ionic_lif *lif = netdev_priv(netdev); 1500 1501 schedule_work(&lif->tx_timeout_work); 1502 } 1503 1504 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, 1505 u16 vid) 1506 { 1507 struct ionic_lif *lif = netdev_priv(netdev); 1508 struct ionic_admin_ctx ctx = { 1509 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1510 .cmd.rx_filter_add = { 1511 .opcode = IONIC_CMD_RX_FILTER_ADD, 1512 .lif_index = cpu_to_le16(lif->index), 1513 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), 1514 .vlan.vlan = cpu_to_le16(vid), 1515 }, 1516 }; 1517 int err; 1518 1519 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); 1520 err = ionic_adminq_post_wait(lif, &ctx); 1521 if (err) 1522 return err; 1523 1524 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); 1525 } 1526 1527 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, 1528 u16 vid) 1529 { 1530 struct ionic_lif *lif = netdev_priv(netdev); 1531 struct ionic_admin_ctx ctx = { 1532 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1533 .cmd.rx_filter_del = { 1534 .opcode = IONIC_CMD_RX_FILTER_DEL, 1535 .lif_index = cpu_to_le16(lif->index), 1536 }, 1537 }; 1538 struct ionic_rx_filter *f; 1539 1540 spin_lock_bh(&lif->rx_filters.lock); 1541 1542 f = ionic_rx_filter_by_vlan(lif, vid); 1543 if (!f) { 1544 spin_unlock_bh(&lif->rx_filters.lock); 1545 return -ENOENT; 1546 } 1547 1548 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", 1549 vid, f->filter_id); 1550 1551 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); 1552 ionic_rx_filter_free(lif, f); 1553 spin_unlock_bh(&lif->rx_filters.lock); 1554 1555 return ionic_adminq_post_wait(lif, &ctx); 1556 } 1557 1558 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, 1559 const u8 *key, const u32 *indir) 1560 { 1561 struct ionic_admin_ctx ctx = { 1562 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1563 .cmd.lif_setattr = { 1564 .opcode = IONIC_CMD_LIF_SETATTR, 1565 .attr = IONIC_LIF_ATTR_RSS, 1566 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), 1567 }, 1568 }; 1569 unsigned int i, tbl_sz; 1570 1571 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) { 1572 lif->rss_types = types; 1573 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types); 1574 } 1575 1576 if (key) 1577 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); 1578 1579 if (indir) { 1580 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1581 for (i = 0; i < tbl_sz; i++) 1582 lif->rss_ind_tbl[i] = indir[i]; 1583 } 1584 1585 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, 1586 IONIC_RSS_HASH_KEY_SIZE); 1587 1588 return ionic_adminq_post_wait(lif, &ctx); 1589 } 1590 1591 static int ionic_lif_rss_init(struct ionic_lif *lif) 1592 { 1593 unsigned int tbl_sz; 1594 unsigned int i; 1595 1596 lif->rss_types = IONIC_RSS_TYPE_IPV4 | 1597 IONIC_RSS_TYPE_IPV4_TCP | 1598 IONIC_RSS_TYPE_IPV4_UDP | 1599 IONIC_RSS_TYPE_IPV6 | 1600 IONIC_RSS_TYPE_IPV6_TCP | 1601 IONIC_RSS_TYPE_IPV6_UDP; 1602 1603 /* Fill indirection table with 'default' values */ 1604 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1605 for (i = 0; i < tbl_sz; i++) 1606 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); 1607 1608 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL); 1609 } 1610 1611 static void ionic_lif_rss_deinit(struct ionic_lif *lif) 1612 { 1613 int tbl_sz; 1614 1615 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 1616 memset(lif->rss_ind_tbl, 0, tbl_sz); 1617 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE); 1618 1619 ionic_lif_rss_config(lif, 0x0, NULL, NULL); 1620 } 1621 1622 static void ionic_lif_quiesce(struct ionic_lif *lif) 1623 { 1624 struct ionic_admin_ctx ctx = { 1625 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 1626 .cmd.lif_setattr = { 1627 .opcode = IONIC_CMD_LIF_SETATTR, 1628 .index = cpu_to_le16(lif->index), 1629 .attr = IONIC_LIF_ATTR_STATE, 1630 .state = IONIC_LIF_QUIESCE, 1631 }, 1632 }; 1633 int err; 1634 1635 err = ionic_adminq_post_wait(lif, &ctx); 1636 if (err) 1637 netdev_err(lif->netdev, "lif quiesce failed %d\n", err); 1638 } 1639 1640 static void ionic_txrx_disable(struct ionic_lif *lif) 1641 { 1642 unsigned int i; 1643 int err = 0; 1644 1645 if (lif->txqcqs) { 1646 for (i = 0; i < lif->nxqs; i++) 1647 err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT)); 1648 } 1649 1650 if (lif->rxqcqs) { 1651 for (i = 0; i < lif->nxqs; i++) 1652 err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); 1653 } 1654 1655 ionic_lif_quiesce(lif); 1656 } 1657 1658 static void ionic_txrx_deinit(struct ionic_lif *lif) 1659 { 1660 unsigned int i; 1661 1662 if (lif->txqcqs) { 1663 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) { 1664 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1665 ionic_tx_flush(&lif->txqcqs[i]->cq); 1666 ionic_tx_empty(&lif->txqcqs[i]->q); 1667 } 1668 } 1669 1670 if (lif->rxqcqs) { 1671 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) { 1672 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 1673 ionic_rx_empty(&lif->rxqcqs[i]->q); 1674 } 1675 } 1676 lif->rx_mode = 0; 1677 } 1678 1679 static void ionic_txrx_free(struct ionic_lif *lif) 1680 { 1681 unsigned int i; 1682 1683 if (lif->txqcqs) { 1684 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) { 1685 ionic_qcq_free(lif, lif->txqcqs[i]); 1686 devm_kfree(lif->ionic->dev, lif->txqcqs[i]); 1687 lif->txqcqs[i] = NULL; 1688 } 1689 } 1690 1691 if (lif->rxqcqs) { 1692 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { 1693 ionic_qcq_free(lif, lif->rxqcqs[i]); 1694 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]); 1695 lif->rxqcqs[i] = NULL; 1696 } 1697 } 1698 } 1699 1700 static int ionic_txrx_alloc(struct ionic_lif *lif) 1701 { 1702 unsigned int sg_desc_sz; 1703 unsigned int flags; 1704 unsigned int i; 1705 int err = 0; 1706 1707 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 1708 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 1709 sizeof(struct ionic_txq_sg_desc_v1)) 1710 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 1711 else 1712 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 1713 1714 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; 1715 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 1716 flags |= IONIC_QCQ_F_INTR; 1717 for (i = 0; i < lif->nxqs; i++) { 1718 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 1719 lif->ntxq_descs, 1720 sizeof(struct ionic_txq_desc), 1721 sizeof(struct ionic_txq_comp), 1722 sg_desc_sz, 1723 lif->kern_pid, &lif->txqcqs[i]); 1724 if (err) 1725 goto err_out; 1726 1727 if (flags & IONIC_QCQ_F_INTR) { 1728 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 1729 lif->txqcqs[i]->intr.index, 1730 lif->tx_coalesce_hw); 1731 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 1732 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 1733 } 1734 1735 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 1736 } 1737 1738 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR; 1739 for (i = 0; i < lif->nxqs; i++) { 1740 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 1741 lif->nrxq_descs, 1742 sizeof(struct ionic_rxq_desc), 1743 sizeof(struct ionic_rxq_comp), 1744 sizeof(struct ionic_rxq_sg_desc), 1745 lif->kern_pid, &lif->rxqcqs[i]); 1746 if (err) 1747 goto err_out; 1748 1749 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 1750 lif->rxqcqs[i]->intr.index, 1751 lif->rx_coalesce_hw); 1752 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state)) 1753 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw; 1754 1755 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) 1756 ionic_link_qcq_interrupts(lif->rxqcqs[i], 1757 lif->txqcqs[i]); 1758 1759 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 1760 } 1761 1762 return 0; 1763 1764 err_out: 1765 ionic_txrx_free(lif); 1766 1767 return err; 1768 } 1769 1770 static int ionic_txrx_init(struct ionic_lif *lif) 1771 { 1772 unsigned int i; 1773 int err; 1774 1775 for (i = 0; i < lif->nxqs; i++) { 1776 err = ionic_lif_txq_init(lif, lif->txqcqs[i]); 1777 if (err) 1778 goto err_out; 1779 1780 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]); 1781 if (err) { 1782 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1783 goto err_out; 1784 } 1785 } 1786 1787 if (lif->netdev->features & NETIF_F_RXHASH) 1788 ionic_lif_rss_init(lif); 1789 1790 ionic_set_rx_mode(lif->netdev, CAN_SLEEP); 1791 1792 return 0; 1793 1794 err_out: 1795 while (i--) { 1796 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]); 1797 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]); 1798 } 1799 1800 return err; 1801 } 1802 1803 static int ionic_txrx_enable(struct ionic_lif *lif) 1804 { 1805 int derr = 0; 1806 int i, err; 1807 1808 for (i = 0; i < lif->nxqs; i++) { 1809 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { 1810 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); 1811 err = -ENXIO; 1812 goto err_out; 1813 } 1814 1815 ionic_rx_fill(&lif->rxqcqs[i]->q); 1816 err = ionic_qcq_enable(lif->rxqcqs[i]); 1817 if (err) 1818 goto err_out; 1819 1820 err = ionic_qcq_enable(lif->txqcqs[i]); 1821 if (err) { 1822 derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); 1823 goto err_out; 1824 } 1825 } 1826 1827 return 0; 1828 1829 err_out: 1830 while (i--) { 1831 derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT)); 1832 derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT)); 1833 } 1834 1835 return err; 1836 } 1837 1838 static int ionic_start_queues(struct ionic_lif *lif) 1839 { 1840 int err; 1841 1842 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state)) 1843 return 0; 1844 1845 err = ionic_txrx_enable(lif); 1846 if (err) { 1847 clear_bit(IONIC_LIF_F_UP, lif->state); 1848 return err; 1849 } 1850 netif_tx_wake_all_queues(lif->netdev); 1851 1852 return 0; 1853 } 1854 1855 static int ionic_open(struct net_device *netdev) 1856 { 1857 struct ionic_lif *lif = netdev_priv(netdev); 1858 int err; 1859 1860 err = ionic_txrx_alloc(lif); 1861 if (err) 1862 return err; 1863 1864 err = ionic_txrx_init(lif); 1865 if (err) 1866 goto err_out; 1867 1868 err = netif_set_real_num_tx_queues(netdev, lif->nxqs); 1869 if (err) 1870 goto err_txrx_deinit; 1871 1872 err = netif_set_real_num_rx_queues(netdev, lif->nxqs); 1873 if (err) 1874 goto err_txrx_deinit; 1875 1876 /* don't start the queues until we have link */ 1877 if (netif_carrier_ok(netdev)) { 1878 err = ionic_start_queues(lif); 1879 if (err) 1880 goto err_txrx_deinit; 1881 } 1882 1883 return 0; 1884 1885 err_txrx_deinit: 1886 ionic_txrx_deinit(lif); 1887 err_out: 1888 ionic_txrx_free(lif); 1889 return err; 1890 } 1891 1892 static void ionic_stop_queues(struct ionic_lif *lif) 1893 { 1894 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) 1895 return; 1896 1897 netif_tx_disable(lif->netdev); 1898 ionic_txrx_disable(lif); 1899 } 1900 1901 static int ionic_stop(struct net_device *netdev) 1902 { 1903 struct ionic_lif *lif = netdev_priv(netdev); 1904 1905 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 1906 return 0; 1907 1908 ionic_stop_queues(lif); 1909 ionic_txrx_deinit(lif); 1910 ionic_txrx_free(lif); 1911 1912 return 0; 1913 } 1914 1915 static int ionic_get_vf_config(struct net_device *netdev, 1916 int vf, struct ifla_vf_info *ivf) 1917 { 1918 struct ionic_lif *lif = netdev_priv(netdev); 1919 struct ionic *ionic = lif->ionic; 1920 int ret = 0; 1921 1922 if (!netif_device_present(netdev)) 1923 return -EBUSY; 1924 1925 down_read(&ionic->vf_op_lock); 1926 1927 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1928 ret = -EINVAL; 1929 } else { 1930 ivf->vf = vf; 1931 ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid); 1932 ivf->qos = 0; 1933 ivf->spoofchk = ionic->vfs[vf].spoofchk; 1934 ivf->linkstate = ionic->vfs[vf].linkstate; 1935 ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate); 1936 ivf->trusted = ionic->vfs[vf].trusted; 1937 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr); 1938 } 1939 1940 up_read(&ionic->vf_op_lock); 1941 return ret; 1942 } 1943 1944 static int ionic_get_vf_stats(struct net_device *netdev, int vf, 1945 struct ifla_vf_stats *vf_stats) 1946 { 1947 struct ionic_lif *lif = netdev_priv(netdev); 1948 struct ionic *ionic = lif->ionic; 1949 struct ionic_lif_stats *vs; 1950 int ret = 0; 1951 1952 if (!netif_device_present(netdev)) 1953 return -EBUSY; 1954 1955 down_read(&ionic->vf_op_lock); 1956 1957 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1958 ret = -EINVAL; 1959 } else { 1960 memset(vf_stats, 0, sizeof(*vf_stats)); 1961 vs = &ionic->vfs[vf].stats; 1962 1963 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets); 1964 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets); 1965 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes); 1966 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes); 1967 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets); 1968 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets); 1969 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) + 1970 le64_to_cpu(vs->rx_mcast_drop_packets) + 1971 le64_to_cpu(vs->rx_bcast_drop_packets); 1972 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) + 1973 le64_to_cpu(vs->tx_mcast_drop_packets) + 1974 le64_to_cpu(vs->tx_bcast_drop_packets); 1975 } 1976 1977 up_read(&ionic->vf_op_lock); 1978 return ret; 1979 } 1980 1981 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1982 { 1983 struct ionic_lif *lif = netdev_priv(netdev); 1984 struct ionic *ionic = lif->ionic; 1985 int ret; 1986 1987 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac))) 1988 return -EINVAL; 1989 1990 if (!netif_device_present(netdev)) 1991 return -EBUSY; 1992 1993 down_write(&ionic->vf_op_lock); 1994 1995 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 1996 ret = -EINVAL; 1997 } else { 1998 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac); 1999 if (!ret) 2000 ether_addr_copy(ionic->vfs[vf].macaddr, mac); 2001 } 2002 2003 up_write(&ionic->vf_op_lock); 2004 return ret; 2005 } 2006 2007 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, 2008 u8 qos, __be16 proto) 2009 { 2010 struct ionic_lif *lif = netdev_priv(netdev); 2011 struct ionic *ionic = lif->ionic; 2012 int ret; 2013 2014 /* until someday when we support qos */ 2015 if (qos) 2016 return -EINVAL; 2017 2018 if (vlan > 4095) 2019 return -EINVAL; 2020 2021 if (proto != htons(ETH_P_8021Q)) 2022 return -EPROTONOSUPPORT; 2023 2024 if (!netif_device_present(netdev)) 2025 return -EBUSY; 2026 2027 down_write(&ionic->vf_op_lock); 2028 2029 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2030 ret = -EINVAL; 2031 } else { 2032 ret = ionic_set_vf_config(ionic, vf, 2033 IONIC_VF_ATTR_VLAN, (u8 *)&vlan); 2034 if (!ret) 2035 ionic->vfs[vf].vlanid = cpu_to_le16(vlan); 2036 } 2037 2038 up_write(&ionic->vf_op_lock); 2039 return ret; 2040 } 2041 2042 static int ionic_set_vf_rate(struct net_device *netdev, int vf, 2043 int tx_min, int tx_max) 2044 { 2045 struct ionic_lif *lif = netdev_priv(netdev); 2046 struct ionic *ionic = lif->ionic; 2047 int ret; 2048 2049 /* setting the min just seems silly */ 2050 if (tx_min) 2051 return -EINVAL; 2052 2053 if (!netif_device_present(netdev)) 2054 return -EBUSY; 2055 2056 down_write(&ionic->vf_op_lock); 2057 2058 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2059 ret = -EINVAL; 2060 } else { 2061 ret = ionic_set_vf_config(ionic, vf, 2062 IONIC_VF_ATTR_RATE, (u8 *)&tx_max); 2063 if (!ret) 2064 lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max); 2065 } 2066 2067 up_write(&ionic->vf_op_lock); 2068 return ret; 2069 } 2070 2071 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set) 2072 { 2073 struct ionic_lif *lif = netdev_priv(netdev); 2074 struct ionic *ionic = lif->ionic; 2075 u8 data = set; /* convert to u8 for config */ 2076 int ret; 2077 2078 if (!netif_device_present(netdev)) 2079 return -EBUSY; 2080 2081 down_write(&ionic->vf_op_lock); 2082 2083 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2084 ret = -EINVAL; 2085 } else { 2086 ret = ionic_set_vf_config(ionic, vf, 2087 IONIC_VF_ATTR_SPOOFCHK, &data); 2088 if (!ret) 2089 ionic->vfs[vf].spoofchk = data; 2090 } 2091 2092 up_write(&ionic->vf_op_lock); 2093 return ret; 2094 } 2095 2096 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set) 2097 { 2098 struct ionic_lif *lif = netdev_priv(netdev); 2099 struct ionic *ionic = lif->ionic; 2100 u8 data = set; /* convert to u8 for config */ 2101 int ret; 2102 2103 if (!netif_device_present(netdev)) 2104 return -EBUSY; 2105 2106 down_write(&ionic->vf_op_lock); 2107 2108 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2109 ret = -EINVAL; 2110 } else { 2111 ret = ionic_set_vf_config(ionic, vf, 2112 IONIC_VF_ATTR_TRUST, &data); 2113 if (!ret) 2114 ionic->vfs[vf].trusted = data; 2115 } 2116 2117 up_write(&ionic->vf_op_lock); 2118 return ret; 2119 } 2120 2121 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) 2122 { 2123 struct ionic_lif *lif = netdev_priv(netdev); 2124 struct ionic *ionic = lif->ionic; 2125 u8 data; 2126 int ret; 2127 2128 switch (set) { 2129 case IFLA_VF_LINK_STATE_ENABLE: 2130 data = IONIC_VF_LINK_STATUS_UP; 2131 break; 2132 case IFLA_VF_LINK_STATE_DISABLE: 2133 data = IONIC_VF_LINK_STATUS_DOWN; 2134 break; 2135 case IFLA_VF_LINK_STATE_AUTO: 2136 data = IONIC_VF_LINK_STATUS_AUTO; 2137 break; 2138 default: 2139 return -EINVAL; 2140 } 2141 2142 if (!netif_device_present(netdev)) 2143 return -EBUSY; 2144 2145 down_write(&ionic->vf_op_lock); 2146 2147 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) { 2148 ret = -EINVAL; 2149 } else { 2150 ret = ionic_set_vf_config(ionic, vf, 2151 IONIC_VF_ATTR_LINKSTATE, &data); 2152 if (!ret) 2153 ionic->vfs[vf].linkstate = set; 2154 } 2155 2156 up_write(&ionic->vf_op_lock); 2157 return ret; 2158 } 2159 2160 static const struct net_device_ops ionic_netdev_ops = { 2161 .ndo_open = ionic_open, 2162 .ndo_stop = ionic_stop, 2163 .ndo_start_xmit = ionic_start_xmit, 2164 .ndo_get_stats64 = ionic_get_stats64, 2165 .ndo_set_rx_mode = ionic_ndo_set_rx_mode, 2166 .ndo_set_features = ionic_set_features, 2167 .ndo_set_mac_address = ionic_set_mac_address, 2168 .ndo_validate_addr = eth_validate_addr, 2169 .ndo_tx_timeout = ionic_tx_timeout, 2170 .ndo_change_mtu = ionic_change_mtu, 2171 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, 2172 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, 2173 .ndo_set_vf_vlan = ionic_set_vf_vlan, 2174 .ndo_set_vf_trust = ionic_set_vf_trust, 2175 .ndo_set_vf_mac = ionic_set_vf_mac, 2176 .ndo_set_vf_rate = ionic_set_vf_rate, 2177 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk, 2178 .ndo_get_vf_config = ionic_get_vf_config, 2179 .ndo_set_vf_link_state = ionic_set_vf_link_state, 2180 .ndo_get_vf_stats = ionic_get_vf_stats, 2181 }; 2182 2183 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) 2184 { 2185 /* only swapping the queues, not the napi, flags, or other stuff */ 2186 swap(a->q.num_descs, b->q.num_descs); 2187 swap(a->q.base, b->q.base); 2188 swap(a->q.base_pa, b->q.base_pa); 2189 swap(a->q.info, b->q.info); 2190 swap(a->q_base, b->q_base); 2191 swap(a->q_base_pa, b->q_base_pa); 2192 swap(a->q_size, b->q_size); 2193 2194 swap(a->q.sg_base, b->q.sg_base); 2195 swap(a->q.sg_base_pa, b->q.sg_base_pa); 2196 swap(a->sg_base, b->sg_base); 2197 swap(a->sg_base_pa, b->sg_base_pa); 2198 swap(a->sg_size, b->sg_size); 2199 2200 swap(a->cq.num_descs, b->cq.num_descs); 2201 swap(a->cq.base, b->cq.base); 2202 swap(a->cq.base_pa, b->cq.base_pa); 2203 swap(a->cq.info, b->cq.info); 2204 swap(a->cq_base, b->cq_base); 2205 swap(a->cq_base_pa, b->cq_base_pa); 2206 swap(a->cq_size, b->cq_size); 2207 2208 ionic_debugfs_del_qcq(a); 2209 ionic_debugfs_add_qcq(a->q.lif, a); 2210 } 2211 2212 int ionic_reconfigure_queues(struct ionic_lif *lif, 2213 struct ionic_queue_params *qparam) 2214 { 2215 struct ionic_qcq **tx_qcqs = NULL; 2216 struct ionic_qcq **rx_qcqs = NULL; 2217 unsigned int sg_desc_sz; 2218 unsigned int flags; 2219 int err = -ENOMEM; 2220 unsigned int i; 2221 2222 /* allocate temporary qcq arrays to hold new queue structs */ 2223 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { 2224 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, 2225 sizeof(struct ionic_qcq *), GFP_KERNEL); 2226 if (!tx_qcqs) 2227 goto err_out; 2228 } 2229 if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) { 2230 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, 2231 sizeof(struct ionic_qcq *), GFP_KERNEL); 2232 if (!rx_qcqs) 2233 goto err_out; 2234 } 2235 2236 /* allocate new desc_info and rings, but leave the interrupt setup 2237 * until later so as to not mess with the still-running queues 2238 */ 2239 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 && 2240 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == 2241 sizeof(struct ionic_txq_sg_desc_v1)) 2242 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1); 2243 else 2244 sg_desc_sz = sizeof(struct ionic_txq_sg_desc); 2245 2246 if (tx_qcqs) { 2247 for (i = 0; i < qparam->nxqs; i++) { 2248 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2249 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, 2250 qparam->ntxq_descs, 2251 sizeof(struct ionic_txq_desc), 2252 sizeof(struct ionic_txq_comp), 2253 sg_desc_sz, 2254 lif->kern_pid, &tx_qcqs[i]); 2255 if (err) 2256 goto err_out; 2257 } 2258 } 2259 2260 if (rx_qcqs) { 2261 for (i = 0; i < qparam->nxqs; i++) { 2262 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; 2263 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, 2264 qparam->nrxq_descs, 2265 sizeof(struct ionic_rxq_desc), 2266 sizeof(struct ionic_rxq_comp), 2267 sizeof(struct ionic_rxq_sg_desc), 2268 lif->kern_pid, &rx_qcqs[i]); 2269 if (err) 2270 goto err_out; 2271 } 2272 } 2273 2274 /* stop and clean the queues */ 2275 ionic_stop_queues_reconfig(lif); 2276 2277 if (qparam->nxqs != lif->nxqs) { 2278 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs); 2279 if (err) 2280 goto err_out_reinit_unlock; 2281 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs); 2282 if (err) { 2283 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs); 2284 goto err_out_reinit_unlock; 2285 } 2286 } 2287 2288 /* swap new desc_info and rings, keeping existing interrupt config */ 2289 if (tx_qcqs) { 2290 lif->ntxq_descs = qparam->ntxq_descs; 2291 for (i = 0; i < qparam->nxqs; i++) 2292 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]); 2293 } 2294 2295 if (rx_qcqs) { 2296 lif->nrxq_descs = qparam->nrxq_descs; 2297 for (i = 0; i < qparam->nxqs; i++) 2298 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]); 2299 } 2300 2301 /* if we need to change the interrupt layout, this is the time */ 2302 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) || 2303 qparam->nxqs != lif->nxqs) { 2304 if (qparam->intr_split) { 2305 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2306 } else { 2307 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); 2308 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2309 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2310 } 2311 2312 /* clear existing interrupt assignments */ 2313 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) { 2314 ionic_qcq_intr_free(lif, lif->txqcqs[i]); 2315 ionic_qcq_intr_free(lif, lif->rxqcqs[i]); 2316 } 2317 2318 /* re-assign the interrupts */ 2319 for (i = 0; i < qparam->nxqs; i++) { 2320 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2321 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]); 2322 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2323 lif->rxqcqs[i]->intr.index, 2324 lif->rx_coalesce_hw); 2325 2326 if (qparam->intr_split) { 2327 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR; 2328 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]); 2329 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, 2330 lif->txqcqs[i]->intr.index, 2331 lif->tx_coalesce_hw); 2332 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state)) 2333 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw; 2334 } else { 2335 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2336 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]); 2337 } 2338 } 2339 } 2340 2341 /* now we can rework the debugfs mappings */ 2342 if (tx_qcqs) { 2343 for (i = 0; i < qparam->nxqs; i++) { 2344 ionic_debugfs_del_qcq(lif->txqcqs[i]); 2345 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]); 2346 } 2347 } 2348 2349 if (rx_qcqs) { 2350 for (i = 0; i < qparam->nxqs; i++) { 2351 ionic_debugfs_del_qcq(lif->rxqcqs[i]); 2352 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]); 2353 } 2354 } 2355 2356 swap(lif->nxqs, qparam->nxqs); 2357 2358 err_out_reinit_unlock: 2359 /* re-init the queues, but don't loose an error code */ 2360 if (err) 2361 ionic_start_queues_reconfig(lif); 2362 else 2363 err = ionic_start_queues_reconfig(lif); 2364 2365 err_out: 2366 /* free old allocs without cleaning intr */ 2367 for (i = 0; i < qparam->nxqs; i++) { 2368 if (tx_qcqs && tx_qcqs[i]) { 2369 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2370 ionic_qcq_free(lif, tx_qcqs[i]); 2371 devm_kfree(lif->ionic->dev, tx_qcqs[i]); 2372 tx_qcqs[i] = NULL; 2373 } 2374 if (rx_qcqs && rx_qcqs[i]) { 2375 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2376 ionic_qcq_free(lif, rx_qcqs[i]); 2377 devm_kfree(lif->ionic->dev, rx_qcqs[i]); 2378 rx_qcqs[i] = NULL; 2379 } 2380 } 2381 2382 /* free q array */ 2383 if (rx_qcqs) { 2384 devm_kfree(lif->ionic->dev, rx_qcqs); 2385 rx_qcqs = NULL; 2386 } 2387 if (tx_qcqs) { 2388 devm_kfree(lif->ionic->dev, tx_qcqs); 2389 tx_qcqs = NULL; 2390 } 2391 2392 /* clean the unused dma and info allocations when new set is smaller 2393 * than the full array, but leave the qcq shells in place 2394 */ 2395 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { 2396 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2397 ionic_qcq_free(lif, lif->txqcqs[i]); 2398 2399 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; 2400 ionic_qcq_free(lif, lif->rxqcqs[i]); 2401 } 2402 2403 return err; 2404 } 2405 2406 int ionic_lif_alloc(struct ionic *ionic) 2407 { 2408 struct device *dev = ionic->dev; 2409 union ionic_lif_identity *lid; 2410 struct net_device *netdev; 2411 struct ionic_lif *lif; 2412 int tbl_sz; 2413 int err; 2414 2415 lid = kzalloc(sizeof(*lid), GFP_KERNEL); 2416 if (!lid) 2417 return -ENOMEM; 2418 2419 netdev = alloc_etherdev_mqs(sizeof(*lif), 2420 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); 2421 if (!netdev) { 2422 dev_err(dev, "Cannot allocate netdev, aborting\n"); 2423 err = -ENOMEM; 2424 goto err_out_free_lid; 2425 } 2426 2427 SET_NETDEV_DEV(netdev, dev); 2428 2429 lif = netdev_priv(netdev); 2430 lif->netdev = netdev; 2431 ionic->lif = lif; 2432 netdev->netdev_ops = &ionic_netdev_ops; 2433 ionic_ethtool_set_ops(netdev); 2434 2435 netdev->watchdog_timeo = 2 * HZ; 2436 netif_carrier_off(netdev); 2437 2438 lif->identity = lid; 2439 lif->lif_type = IONIC_LIF_TYPE_CLASSIC; 2440 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity); 2441 if (err) { 2442 dev_err(ionic->dev, "Cannot identify type %d: %d\n", 2443 lif->lif_type, err); 2444 goto err_out_free_netdev; 2445 } 2446 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU, 2447 le32_to_cpu(lif->identity->eth.min_frame_size)); 2448 lif->netdev->max_mtu = 2449 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; 2450 2451 lif->neqs = ionic->neqs_per_lif; 2452 lif->nxqs = ionic->ntxqs_per_lif; 2453 2454 lif->ionic = ionic; 2455 lif->index = 0; 2456 lif->ntxq_descs = IONIC_DEF_TXRX_DESC; 2457 lif->nrxq_descs = IONIC_DEF_TXRX_DESC; 2458 2459 /* Convert the default coalesce value to actual hw resolution */ 2460 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; 2461 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, 2462 lif->rx_coalesce_usecs); 2463 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; 2464 lif->tx_coalesce_hw = lif->rx_coalesce_hw; 2465 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state); 2466 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state); 2467 2468 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index); 2469 2470 spin_lock_init(&lif->adminq_lock); 2471 2472 spin_lock_init(&lif->deferred.lock); 2473 INIT_LIST_HEAD(&lif->deferred.list); 2474 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); 2475 2476 /* allocate lif info */ 2477 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); 2478 lif->info = dma_alloc_coherent(dev, lif->info_sz, 2479 &lif->info_pa, GFP_KERNEL); 2480 if (!lif->info) { 2481 dev_err(dev, "Failed to allocate lif info, aborting\n"); 2482 err = -ENOMEM; 2483 goto err_out_free_netdev; 2484 } 2485 2486 ionic_debugfs_add_lif(lif); 2487 2488 /* allocate control queues and txrx queue arrays */ 2489 ionic_lif_queue_identify(lif); 2490 err = ionic_qcqs_alloc(lif); 2491 if (err) 2492 goto err_out_free_lif_info; 2493 2494 /* allocate rss indirection table */ 2495 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); 2496 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; 2497 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, 2498 &lif->rss_ind_tbl_pa, 2499 GFP_KERNEL); 2500 2501 if (!lif->rss_ind_tbl) { 2502 err = -ENOMEM; 2503 dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); 2504 goto err_out_free_qcqs; 2505 } 2506 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); 2507 2508 return 0; 2509 2510 err_out_free_qcqs: 2511 ionic_qcqs_free(lif); 2512 err_out_free_lif_info: 2513 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2514 lif->info = NULL; 2515 lif->info_pa = 0; 2516 err_out_free_netdev: 2517 free_netdev(lif->netdev); 2518 lif = NULL; 2519 err_out_free_lid: 2520 kfree(lid); 2521 2522 return err; 2523 } 2524 2525 static void ionic_lif_reset(struct ionic_lif *lif) 2526 { 2527 struct ionic_dev *idev = &lif->ionic->idev; 2528 2529 mutex_lock(&lif->ionic->dev_cmd_lock); 2530 ionic_dev_cmd_lif_reset(idev, lif->index); 2531 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2532 mutex_unlock(&lif->ionic->dev_cmd_lock); 2533 } 2534 2535 static void ionic_lif_handle_fw_down(struct ionic_lif *lif) 2536 { 2537 struct ionic *ionic = lif->ionic; 2538 2539 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2540 return; 2541 2542 dev_info(ionic->dev, "FW Down: Stopping LIFs\n"); 2543 2544 netif_device_detach(lif->netdev); 2545 2546 if (test_bit(IONIC_LIF_F_UP, lif->state)) { 2547 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); 2548 mutex_lock(&lif->queue_lock); 2549 ionic_stop_queues(lif); 2550 mutex_unlock(&lif->queue_lock); 2551 } 2552 2553 if (netif_running(lif->netdev)) { 2554 ionic_txrx_deinit(lif); 2555 ionic_txrx_free(lif); 2556 } 2557 ionic_lif_deinit(lif); 2558 ionic_reset(ionic); 2559 ionic_qcqs_free(lif); 2560 2561 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); 2562 } 2563 2564 static void ionic_lif_handle_fw_up(struct ionic_lif *lif) 2565 { 2566 struct ionic *ionic = lif->ionic; 2567 int err; 2568 2569 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2570 return; 2571 2572 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 2573 2574 ionic_init_devinfo(ionic); 2575 err = ionic_identify(ionic); 2576 if (err) 2577 goto err_out; 2578 err = ionic_port_identify(ionic); 2579 if (err) 2580 goto err_out; 2581 err = ionic_port_init(ionic); 2582 if (err) 2583 goto err_out; 2584 err = ionic_qcqs_alloc(lif); 2585 if (err) 2586 goto err_out; 2587 2588 err = ionic_lif_init(lif); 2589 if (err) 2590 goto err_qcqs_free; 2591 2592 if (lif->registered) 2593 ionic_lif_set_netdev_info(lif); 2594 2595 ionic_rx_filter_replay(lif); 2596 2597 if (netif_running(lif->netdev)) { 2598 err = ionic_txrx_alloc(lif); 2599 if (err) 2600 goto err_lifs_deinit; 2601 2602 err = ionic_txrx_init(lif); 2603 if (err) 2604 goto err_txrx_free; 2605 } 2606 2607 clear_bit(IONIC_LIF_F_FW_RESET, lif->state); 2608 ionic_link_status_check_request(lif, true); 2609 netif_device_attach(lif->netdev); 2610 dev_info(ionic->dev, "FW Up: LIFs restarted\n"); 2611 2612 return; 2613 2614 err_txrx_free: 2615 ionic_txrx_free(lif); 2616 err_lifs_deinit: 2617 ionic_lif_deinit(lif); 2618 err_qcqs_free: 2619 ionic_qcqs_free(lif); 2620 err_out: 2621 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err); 2622 } 2623 2624 void ionic_lif_free(struct ionic_lif *lif) 2625 { 2626 struct device *dev = lif->ionic->dev; 2627 2628 /* free rss indirection table */ 2629 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, 2630 lif->rss_ind_tbl_pa); 2631 lif->rss_ind_tbl = NULL; 2632 lif->rss_ind_tbl_pa = 0; 2633 2634 /* free queues */ 2635 ionic_qcqs_free(lif); 2636 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 2637 ionic_lif_reset(lif); 2638 2639 /* free lif info */ 2640 kfree(lif->identity); 2641 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); 2642 lif->info = NULL; 2643 lif->info_pa = 0; 2644 2645 /* unmap doorbell page */ 2646 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2647 lif->kern_dbpage = NULL; 2648 kfree(lif->dbid_inuse); 2649 lif->dbid_inuse = NULL; 2650 2651 /* free netdev & lif */ 2652 ionic_debugfs_del_lif(lif); 2653 free_netdev(lif->netdev); 2654 } 2655 2656 void ionic_lif_deinit(struct ionic_lif *lif) 2657 { 2658 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state)) 2659 return; 2660 2661 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 2662 cancel_work_sync(&lif->deferred.work); 2663 cancel_work_sync(&lif->tx_timeout_work); 2664 ionic_rx_filters_deinit(lif); 2665 if (lif->netdev->features & NETIF_F_RXHASH) 2666 ionic_lif_rss_deinit(lif); 2667 } 2668 2669 napi_disable(&lif->adminqcq->napi); 2670 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2671 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2672 2673 mutex_destroy(&lif->queue_lock); 2674 ionic_lif_reset(lif); 2675 } 2676 2677 static int ionic_lif_adminq_init(struct ionic_lif *lif) 2678 { 2679 struct device *dev = lif->ionic->dev; 2680 struct ionic_q_init_comp comp; 2681 struct ionic_dev *idev; 2682 struct ionic_qcq *qcq; 2683 struct ionic_queue *q; 2684 int err; 2685 2686 idev = &lif->ionic->idev; 2687 qcq = lif->adminqcq; 2688 q = &qcq->q; 2689 2690 mutex_lock(&lif->ionic->dev_cmd_lock); 2691 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); 2692 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2693 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 2694 mutex_unlock(&lif->ionic->dev_cmd_lock); 2695 if (err) { 2696 netdev_err(lif->netdev, "adminq init failed %d\n", err); 2697 return err; 2698 } 2699 2700 q->hw_type = comp.hw_type; 2701 q->hw_index = le32_to_cpu(comp.hw_index); 2702 q->dbval = IONIC_DBELL_QID(q->hw_index); 2703 2704 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); 2705 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); 2706 2707 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, 2708 NAPI_POLL_WEIGHT); 2709 2710 napi_enable(&qcq->napi); 2711 2712 if (qcq->flags & IONIC_QCQ_F_INTR) 2713 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, 2714 IONIC_INTR_MASK_CLEAR); 2715 2716 qcq->flags |= IONIC_QCQ_F_INITED; 2717 2718 return 0; 2719 } 2720 2721 static int ionic_lif_notifyq_init(struct ionic_lif *lif) 2722 { 2723 struct ionic_qcq *qcq = lif->notifyqcq; 2724 struct device *dev = lif->ionic->dev; 2725 struct ionic_queue *q = &qcq->q; 2726 int err; 2727 2728 struct ionic_admin_ctx ctx = { 2729 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2730 .cmd.q_init = { 2731 .opcode = IONIC_CMD_Q_INIT, 2732 .lif_index = cpu_to_le16(lif->index), 2733 .type = q->type, 2734 .ver = lif->qtype_info[q->type].version, 2735 .index = cpu_to_le32(q->index), 2736 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 2737 IONIC_QINIT_F_ENA), 2738 .intr_index = cpu_to_le16(lif->adminqcq->intr.index), 2739 .pid = cpu_to_le16(q->pid), 2740 .ring_size = ilog2(q->num_descs), 2741 .ring_base = cpu_to_le64(q->base_pa), 2742 } 2743 }; 2744 2745 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); 2746 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); 2747 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); 2748 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); 2749 2750 err = ionic_adminq_post_wait(lif, &ctx); 2751 if (err) 2752 return err; 2753 2754 lif->last_eid = 0; 2755 q->hw_type = ctx.comp.q_init.hw_type; 2756 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); 2757 q->dbval = IONIC_DBELL_QID(q->hw_index); 2758 2759 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); 2760 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); 2761 2762 /* preset the callback info */ 2763 q->info[0].cb_arg = lif; 2764 2765 qcq->flags |= IONIC_QCQ_F_INITED; 2766 2767 return 0; 2768 } 2769 2770 static int ionic_station_set(struct ionic_lif *lif) 2771 { 2772 struct net_device *netdev = lif->netdev; 2773 struct ionic_admin_ctx ctx = { 2774 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2775 .cmd.lif_getattr = { 2776 .opcode = IONIC_CMD_LIF_GETATTR, 2777 .index = cpu_to_le16(lif->index), 2778 .attr = IONIC_LIF_ATTR_MAC, 2779 }, 2780 }; 2781 struct sockaddr addr; 2782 int err; 2783 2784 err = ionic_adminq_post_wait(lif, &ctx); 2785 if (err) 2786 return err; 2787 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n", 2788 ctx.comp.lif_getattr.mac); 2789 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac)) 2790 return 0; 2791 2792 if (!is_zero_ether_addr(netdev->dev_addr)) { 2793 /* If the netdev mac is non-zero and doesn't match the default 2794 * device address, it was set by something earlier and we're 2795 * likely here again after a fw-upgrade reset. We need to be 2796 * sure the netdev mac is in our filter list. 2797 */ 2798 if (!ether_addr_equal(ctx.comp.lif_getattr.mac, 2799 netdev->dev_addr)) 2800 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP); 2801 } else { 2802 /* Update the netdev mac with the device's mac */ 2803 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); 2804 addr.sa_family = AF_INET; 2805 err = eth_prepare_mac_addr_change(netdev, &addr); 2806 if (err) { 2807 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n", 2808 addr.sa_data, err); 2809 return 0; 2810 } 2811 2812 eth_commit_mac_addr_change(netdev, &addr); 2813 } 2814 2815 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", 2816 netdev->dev_addr); 2817 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP); 2818 2819 return 0; 2820 } 2821 2822 int ionic_lif_init(struct ionic_lif *lif) 2823 { 2824 struct ionic_dev *idev = &lif->ionic->idev; 2825 struct device *dev = lif->ionic->dev; 2826 struct ionic_lif_init_comp comp; 2827 int dbpage_num; 2828 int err; 2829 2830 mutex_lock(&lif->ionic->dev_cmd_lock); 2831 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); 2832 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); 2833 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); 2834 mutex_unlock(&lif->ionic->dev_cmd_lock); 2835 if (err) 2836 return err; 2837 2838 lif->hw_index = le16_to_cpu(comp.hw_index); 2839 mutex_init(&lif->queue_lock); 2840 2841 /* now that we have the hw_index we can figure out our doorbell page */ 2842 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); 2843 if (!lif->dbid_count) { 2844 dev_err(dev, "No doorbell pages, aborting\n"); 2845 return -EINVAL; 2846 } 2847 2848 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); 2849 if (!lif->dbid_inuse) { 2850 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); 2851 return -ENOMEM; 2852 } 2853 2854 /* first doorbell id reserved for kernel (dbid aka pid == zero) */ 2855 set_bit(0, lif->dbid_inuse); 2856 lif->kern_pid = 0; 2857 2858 dbpage_num = ionic_db_page_num(lif, lif->kern_pid); 2859 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); 2860 if (!lif->kern_dbpage) { 2861 dev_err(dev, "Cannot map dbpage, aborting\n"); 2862 err = -ENOMEM; 2863 goto err_out_free_dbid; 2864 } 2865 2866 err = ionic_lif_adminq_init(lif); 2867 if (err) 2868 goto err_out_adminq_deinit; 2869 2870 if (lif->ionic->nnqs_per_lif) { 2871 err = ionic_lif_notifyq_init(lif); 2872 if (err) 2873 goto err_out_notifyq_deinit; 2874 } 2875 2876 err = ionic_init_nic_features(lif); 2877 if (err) 2878 goto err_out_notifyq_deinit; 2879 2880 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { 2881 err = ionic_rx_filters_init(lif); 2882 if (err) 2883 goto err_out_notifyq_deinit; 2884 } 2885 2886 err = ionic_station_set(lif); 2887 if (err) 2888 goto err_out_notifyq_deinit; 2889 2890 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; 2891 2892 set_bit(IONIC_LIF_F_INITED, lif->state); 2893 2894 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); 2895 2896 return 0; 2897 2898 err_out_notifyq_deinit: 2899 ionic_lif_qcq_deinit(lif, lif->notifyqcq); 2900 err_out_adminq_deinit: 2901 ionic_lif_qcq_deinit(lif, lif->adminqcq); 2902 ionic_lif_reset(lif); 2903 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); 2904 lif->kern_dbpage = NULL; 2905 err_out_free_dbid: 2906 kfree(lif->dbid_inuse); 2907 lif->dbid_inuse = NULL; 2908 2909 return err; 2910 } 2911 2912 static void ionic_lif_notify_work(struct work_struct *ws) 2913 { 2914 } 2915 2916 static void ionic_lif_set_netdev_info(struct ionic_lif *lif) 2917 { 2918 struct ionic_admin_ctx ctx = { 2919 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), 2920 .cmd.lif_setattr = { 2921 .opcode = IONIC_CMD_LIF_SETATTR, 2922 .index = cpu_to_le16(lif->index), 2923 .attr = IONIC_LIF_ATTR_NAME, 2924 }, 2925 }; 2926 2927 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, 2928 sizeof(ctx.cmd.lif_setattr.name)); 2929 2930 ionic_adminq_post_wait(lif, &ctx); 2931 } 2932 2933 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) 2934 { 2935 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) 2936 return NULL; 2937 2938 return netdev_priv(netdev); 2939 } 2940 2941 static int ionic_lif_notify(struct notifier_block *nb, 2942 unsigned long event, void *info) 2943 { 2944 struct net_device *ndev = netdev_notifier_info_to_dev(info); 2945 struct ionic *ionic = container_of(nb, struct ionic, nb); 2946 struct ionic_lif *lif = ionic_netdev_lif(ndev); 2947 2948 if (!lif || lif->ionic != ionic) 2949 return NOTIFY_DONE; 2950 2951 switch (event) { 2952 case NETDEV_CHANGENAME: 2953 ionic_lif_set_netdev_info(lif); 2954 break; 2955 } 2956 2957 return NOTIFY_DONE; 2958 } 2959 2960 int ionic_lif_register(struct ionic_lif *lif) 2961 { 2962 int err; 2963 2964 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work); 2965 2966 lif->ionic->nb.notifier_call = ionic_lif_notify; 2967 2968 err = register_netdevice_notifier(&lif->ionic->nb); 2969 if (err) 2970 lif->ionic->nb.notifier_call = NULL; 2971 2972 /* only register LIF0 for now */ 2973 err = register_netdev(lif->netdev); 2974 if (err) { 2975 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); 2976 return err; 2977 } 2978 2979 ionic_link_status_check_request(lif, true); 2980 lif->registered = true; 2981 ionic_lif_set_netdev_info(lif); 2982 2983 return 0; 2984 } 2985 2986 void ionic_lif_unregister(struct ionic_lif *lif) 2987 { 2988 if (lif->ionic->nb.notifier_call) { 2989 unregister_netdevice_notifier(&lif->ionic->nb); 2990 cancel_work_sync(&lif->ionic->nb_work); 2991 lif->ionic->nb.notifier_call = NULL; 2992 } 2993 2994 if (lif->netdev->reg_state == NETREG_REGISTERED) 2995 unregister_netdev(lif->netdev); 2996 lif->registered = false; 2997 } 2998 2999 static void ionic_lif_queue_identify(struct ionic_lif *lif) 3000 { 3001 union ionic_q_identity __iomem *q_ident; 3002 struct ionic *ionic = lif->ionic; 3003 struct ionic_dev *idev; 3004 int qtype; 3005 int err; 3006 3007 idev = &lif->ionic->idev; 3008 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data; 3009 3010 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) { 3011 struct ionic_qtype_info *qti = &lif->qtype_info[qtype]; 3012 3013 /* filter out the ones we know about */ 3014 switch (qtype) { 3015 case IONIC_QTYPE_ADMINQ: 3016 case IONIC_QTYPE_NOTIFYQ: 3017 case IONIC_QTYPE_RXQ: 3018 case IONIC_QTYPE_TXQ: 3019 break; 3020 default: 3021 continue; 3022 } 3023 3024 memset(qti, 0, sizeof(*qti)); 3025 3026 mutex_lock(&ionic->dev_cmd_lock); 3027 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype, 3028 ionic_qtype_versions[qtype]); 3029 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3030 if (!err) { 3031 qti->version = readb(&q_ident->version); 3032 qti->supported = readb(&q_ident->supported); 3033 qti->features = readq(&q_ident->features); 3034 qti->desc_sz = readw(&q_ident->desc_sz); 3035 qti->comp_sz = readw(&q_ident->comp_sz); 3036 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz); 3037 qti->max_sg_elems = readw(&q_ident->max_sg_elems); 3038 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride); 3039 } 3040 mutex_unlock(&ionic->dev_cmd_lock); 3041 3042 if (err == -EINVAL) { 3043 dev_err(ionic->dev, "qtype %d not supported\n", qtype); 3044 continue; 3045 } else if (err == -EIO) { 3046 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n"); 3047 return; 3048 } else if (err) { 3049 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n", 3050 qtype, err); 3051 return; 3052 } 3053 3054 dev_dbg(ionic->dev, " qtype[%d].version = %d\n", 3055 qtype, qti->version); 3056 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n", 3057 qtype, qti->supported); 3058 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n", 3059 qtype, qti->features); 3060 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n", 3061 qtype, qti->desc_sz); 3062 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n", 3063 qtype, qti->comp_sz); 3064 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n", 3065 qtype, qti->sg_desc_sz); 3066 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n", 3067 qtype, qti->max_sg_elems); 3068 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", 3069 qtype, qti->sg_desc_stride); 3070 } 3071 } 3072 3073 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 3074 union ionic_lif_identity *lid) 3075 { 3076 struct ionic_dev *idev = &ionic->idev; 3077 size_t sz; 3078 int err; 3079 3080 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); 3081 3082 mutex_lock(&ionic->dev_cmd_lock); 3083 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); 3084 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 3085 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); 3086 mutex_unlock(&ionic->dev_cmd_lock); 3087 if (err) 3088 return (err); 3089 3090 dev_dbg(ionic->dev, "capabilities 0x%llx\n", 3091 le64_to_cpu(lid->capabilities)); 3092 3093 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", 3094 le32_to_cpu(lid->eth.max_ucast_filters)); 3095 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", 3096 le32_to_cpu(lid->eth.max_mcast_filters)); 3097 dev_dbg(ionic->dev, "eth.features 0x%llx\n", 3098 le64_to_cpu(lid->eth.config.features)); 3099 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", 3100 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); 3101 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", 3102 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); 3103 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", 3104 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); 3105 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", 3106 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); 3107 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); 3108 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); 3109 dev_dbg(ionic->dev, "eth.config.mtu %d\n", 3110 le32_to_cpu(lid->eth.config.mtu)); 3111 3112 return 0; 3113 } 3114 3115 int ionic_lif_size(struct ionic *ionic) 3116 { 3117 struct ionic_identity *ident = &ionic->ident; 3118 unsigned int nintrs, dev_nintrs; 3119 union ionic_lif_config *lc; 3120 unsigned int ntxqs_per_lif; 3121 unsigned int nrxqs_per_lif; 3122 unsigned int neqs_per_lif; 3123 unsigned int nnqs_per_lif; 3124 unsigned int nxqs, neqs; 3125 unsigned int min_intrs; 3126 int err; 3127 3128 lc = &ident->lif.eth.config; 3129 dev_nintrs = le32_to_cpu(ident->dev.nintrs); 3130 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); 3131 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); 3132 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); 3133 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); 3134 3135 nxqs = min(ntxqs_per_lif, nrxqs_per_lif); 3136 nxqs = min(nxqs, num_online_cpus()); 3137 neqs = min(neqs_per_lif, num_online_cpus()); 3138 3139 try_again: 3140 /* interrupt usage: 3141 * 1 for master lif adminq/notifyq 3142 * 1 for each CPU for master lif TxRx queue pairs 3143 * whatever's left is for RDMA queues 3144 */ 3145 nintrs = 1 + nxqs + neqs; 3146 min_intrs = 2; /* adminq + 1 TxRx queue pair */ 3147 3148 if (nintrs > dev_nintrs) 3149 goto try_fewer; 3150 3151 err = ionic_bus_alloc_irq_vectors(ionic, nintrs); 3152 if (err < 0 && err != -ENOSPC) { 3153 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); 3154 return err; 3155 } 3156 if (err == -ENOSPC) 3157 goto try_fewer; 3158 3159 if (err != nintrs) { 3160 ionic_bus_free_irq_vectors(ionic); 3161 goto try_fewer; 3162 } 3163 3164 ionic->nnqs_per_lif = nnqs_per_lif; 3165 ionic->neqs_per_lif = neqs; 3166 ionic->ntxqs_per_lif = nxqs; 3167 ionic->nrxqs_per_lif = nxqs; 3168 ionic->nintrs = nintrs; 3169 3170 ionic_debugfs_add_sizes(ionic); 3171 3172 return 0; 3173 3174 try_fewer: 3175 if (nnqs_per_lif > 1) { 3176 nnqs_per_lif >>= 1; 3177 goto try_again; 3178 } 3179 if (neqs > 1) { 3180 neqs >>= 1; 3181 goto try_again; 3182 } 3183 if (nxqs > 1) { 3184 nxqs >>= 1; 3185 goto try_again; 3186 } 3187 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); 3188 return -ENOSPC; 3189 } 3190