1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/ethtool.h>
5 #include <linux/printk.h>
6 #include <linux/dynamic_debug.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_vlan.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/cpumask.h>
14 #include <linux/crash_dump.h>
15 #include <linux/vmalloc.h>
16
17 #include "ionic.h"
18 #include "ionic_bus.h"
19 #include "ionic_dev.h"
20 #include "ionic_lif.h"
21 #include "ionic_txrx.h"
22 #include "ionic_ethtool.h"
23 #include "ionic_debugfs.h"
24
25 /* queuetype support level */
26 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
27 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
28 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
29 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support
30 * 2 = ... with CMB rings
31 */
32 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support
33 * 1 = ... with Tx SG version 1
34 * 3 = ... with CMB rings
35 */
36 };
37
38 static void ionic_link_status_check(struct ionic_lif *lif);
39 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
40 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
41 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
42
43 static void ionic_txrx_deinit(struct ionic_lif *lif);
44 static int ionic_txrx_init(struct ionic_lif *lif);
45 static int ionic_start_queues(struct ionic_lif *lif);
46 static void ionic_stop_queues(struct ionic_lif *lif);
47 static void ionic_lif_queue_identify(struct ionic_lif *lif);
48
ionic_dim_work(struct work_struct * work)49 static void ionic_dim_work(struct work_struct *work)
50 {
51 struct dim *dim = container_of(work, struct dim, work);
52 struct ionic_intr_info *intr;
53 struct dim_cq_moder cur_moder;
54 struct ionic_qcq *qcq;
55 struct ionic_lif *lif;
56 u32 new_coal;
57
58 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
59 qcq = container_of(dim, struct ionic_qcq, dim);
60 lif = qcq->q.lif;
61 new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
62 new_coal = new_coal ? new_coal : 1;
63
64 intr = &qcq->intr;
65 if (intr->dim_coal_hw != new_coal) {
66 intr->dim_coal_hw = new_coal;
67
68 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
69 intr->index, intr->dim_coal_hw);
70 }
71
72 dim->state = DIM_START_MEASURE;
73 }
74
ionic_lif_deferred_work(struct work_struct * work)75 static void ionic_lif_deferred_work(struct work_struct *work)
76 {
77 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
78 struct ionic_deferred *def = &lif->deferred;
79 struct ionic_deferred_work *w = NULL;
80
81 do {
82 spin_lock_bh(&def->lock);
83 if (!list_empty(&def->list)) {
84 w = list_first_entry(&def->list,
85 struct ionic_deferred_work, list);
86 list_del(&w->list);
87 }
88 spin_unlock_bh(&def->lock);
89
90 if (!w)
91 break;
92
93 switch (w->type) {
94 case IONIC_DW_TYPE_RX_MODE:
95 ionic_lif_rx_mode(lif);
96 break;
97 case IONIC_DW_TYPE_LINK_STATUS:
98 ionic_link_status_check(lif);
99 break;
100 case IONIC_DW_TYPE_LIF_RESET:
101 if (w->fw_status) {
102 ionic_lif_handle_fw_up(lif);
103 } else {
104 ionic_lif_handle_fw_down(lif);
105
106 /* Fire off another watchdog to see
107 * if the FW is already back rather than
108 * waiting another whole cycle
109 */
110 mod_timer(&lif->ionic->watchdog_timer, jiffies + 1);
111 }
112 break;
113 default:
114 break;
115 }
116 kfree(w);
117 w = NULL;
118 } while (true);
119 }
120
ionic_lif_deferred_enqueue(struct ionic_deferred * def,struct ionic_deferred_work * work)121 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
122 struct ionic_deferred_work *work)
123 {
124 spin_lock_bh(&def->lock);
125 list_add_tail(&work->list, &def->list);
126 spin_unlock_bh(&def->lock);
127 schedule_work(&def->work);
128 }
129
ionic_link_status_check(struct ionic_lif * lif)130 static void ionic_link_status_check(struct ionic_lif *lif)
131 {
132 struct net_device *netdev = lif->netdev;
133 u16 link_status;
134 bool link_up;
135
136 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
137 return;
138
139 /* Don't put carrier back up if we're in a broken state */
140 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) {
141 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
142 return;
143 }
144
145 link_status = le16_to_cpu(lif->info->status.link_status);
146 link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
147
148 if (link_up) {
149 int err = 0;
150
151 if (netdev->flags & IFF_UP && netif_running(netdev)) {
152 mutex_lock(&lif->queue_lock);
153 err = ionic_start_queues(lif);
154 if (err && err != -EBUSY) {
155 netdev_err(netdev,
156 "Failed to start queues: %d\n", err);
157 set_bit(IONIC_LIF_F_BROKEN, lif->state);
158 netif_carrier_off(lif->netdev);
159 }
160 mutex_unlock(&lif->queue_lock);
161 }
162
163 if (!err && !netif_carrier_ok(netdev)) {
164 ionic_port_identify(lif->ionic);
165 netdev_info(netdev, "Link up - %d Gbps\n",
166 le32_to_cpu(lif->info->status.link_speed) / 1000);
167 netif_carrier_on(netdev);
168 }
169 } else {
170 if (netif_carrier_ok(netdev)) {
171 lif->link_down_count++;
172 netdev_info(netdev, "Link down\n");
173 netif_carrier_off(netdev);
174 }
175
176 if (netdev->flags & IFF_UP && netif_running(netdev)) {
177 mutex_lock(&lif->queue_lock);
178 ionic_stop_queues(lif);
179 mutex_unlock(&lif->queue_lock);
180 }
181 }
182
183 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
184 }
185
ionic_link_status_check_request(struct ionic_lif * lif,bool can_sleep)186 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
187 {
188 struct ionic_deferred_work *work;
189
190 /* we only need one request outstanding at a time */
191 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
192 return;
193
194 if (!can_sleep) {
195 work = kzalloc(sizeof(*work), GFP_ATOMIC);
196 if (!work) {
197 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
198 return;
199 }
200
201 work->type = IONIC_DW_TYPE_LINK_STATUS;
202 ionic_lif_deferred_enqueue(&lif->deferred, work);
203 } else {
204 ionic_link_status_check(lif);
205 }
206 }
207
ionic_napi_deadline(struct timer_list * timer)208 static void ionic_napi_deadline(struct timer_list *timer)
209 {
210 struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
211
212 napi_schedule(&qcq->napi);
213 }
214
ionic_isr(int irq,void * data)215 static irqreturn_t ionic_isr(int irq, void *data)
216 {
217 struct napi_struct *napi = data;
218
219 napi_schedule_irqoff(napi);
220
221 return IRQ_HANDLED;
222 }
223
ionic_request_irq(struct ionic_lif * lif,struct ionic_qcq * qcq)224 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
225 {
226 struct ionic_intr_info *intr = &qcq->intr;
227 struct device *dev = lif->ionic->dev;
228 struct ionic_queue *q = &qcq->q;
229 const char *name;
230
231 if (lif->registered)
232 name = lif->netdev->name;
233 else
234 name = dev_name(dev);
235
236 snprintf(intr->name, sizeof(intr->name),
237 "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name);
238
239 return devm_request_irq(dev, intr->vector, ionic_isr,
240 0, intr->name, &qcq->napi);
241 }
242
ionic_intr_alloc(struct ionic_lif * lif,struct ionic_intr_info * intr)243 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
244 {
245 struct ionic *ionic = lif->ionic;
246 int index;
247
248 index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
249 if (index == ionic->nintrs) {
250 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
251 __func__, index, ionic->nintrs);
252 return -ENOSPC;
253 }
254
255 set_bit(index, ionic->intrs);
256 ionic_intr_init(&ionic->idev, intr, index);
257
258 return 0;
259 }
260
ionic_intr_free(struct ionic * ionic,int index)261 static void ionic_intr_free(struct ionic *ionic, int index)
262 {
263 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
264 clear_bit(index, ionic->intrs);
265 }
266
ionic_qcq_enable(struct ionic_qcq * qcq)267 static int ionic_qcq_enable(struct ionic_qcq *qcq)
268 {
269 struct ionic_queue *q = &qcq->q;
270 struct ionic_lif *lif = q->lif;
271 struct ionic_dev *idev;
272 struct device *dev;
273
274 struct ionic_admin_ctx ctx = {
275 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
276 .cmd.q_control = {
277 .opcode = IONIC_CMD_Q_CONTROL,
278 .lif_index = cpu_to_le16(lif->index),
279 .type = q->type,
280 .index = cpu_to_le32(q->index),
281 .oper = IONIC_Q_ENABLE,
282 },
283 };
284 int ret;
285
286 idev = &lif->ionic->idev;
287 dev = lif->ionic->dev;
288
289 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
290 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
291
292 if (qcq->flags & IONIC_QCQ_F_INTR)
293 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
294
295 ret = ionic_adminq_post_wait(lif, &ctx);
296 if (ret)
297 return ret;
298
299 if (qcq->flags & IONIC_QCQ_F_INTR) {
300 napi_enable(&qcq->napi);
301 irq_set_affinity_hint(qcq->intr.vector,
302 &qcq->intr.affinity_mask);
303 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
304 IONIC_INTR_MASK_CLEAR);
305 }
306
307 return 0;
308 }
309
ionic_qcq_disable(struct ionic_lif * lif,struct ionic_qcq * qcq,int fw_err)310 static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
311 {
312 struct ionic_queue *q;
313
314 struct ionic_admin_ctx ctx = {
315 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
316 .cmd.q_control = {
317 .opcode = IONIC_CMD_Q_CONTROL,
318 .oper = IONIC_Q_DISABLE,
319 },
320 };
321
322 if (!qcq) {
323 netdev_err(lif->netdev, "%s: bad qcq\n", __func__);
324 return -ENXIO;
325 }
326
327 q = &qcq->q;
328
329 if (qcq->flags & IONIC_QCQ_F_INTR) {
330 struct ionic_dev *idev = &lif->ionic->idev;
331
332 cancel_work_sync(&qcq->dim.work);
333 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
334 IONIC_INTR_MASK_SET);
335 synchronize_irq(qcq->intr.vector);
336 irq_set_affinity_hint(qcq->intr.vector, NULL);
337 napi_disable(&qcq->napi);
338 del_timer_sync(&qcq->napi_deadline);
339 }
340
341 /* If there was a previous fw communcation error, don't bother with
342 * sending the adminq command and just return the same error value.
343 */
344 if (fw_err == -ETIMEDOUT || fw_err == -ENXIO)
345 return fw_err;
346
347 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
348 ctx.cmd.q_control.type = q->type;
349 ctx.cmd.q_control.index = cpu_to_le32(q->index);
350 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
351 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
352
353 return ionic_adminq_post_wait(lif, &ctx);
354 }
355
ionic_lif_qcq_deinit(struct ionic_lif * lif,struct ionic_qcq * qcq)356 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
357 {
358 struct ionic_dev *idev = &lif->ionic->idev;
359
360 if (!qcq)
361 return;
362
363 if (!(qcq->flags & IONIC_QCQ_F_INITED))
364 return;
365
366 if (qcq->flags & IONIC_QCQ_F_INTR) {
367 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
368 IONIC_INTR_MASK_SET);
369 netif_napi_del(&qcq->napi);
370 }
371
372 qcq->flags &= ~IONIC_QCQ_F_INITED;
373 }
374
ionic_qcq_intr_free(struct ionic_lif * lif,struct ionic_qcq * qcq)375 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
376 {
377 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
378 return;
379
380 irq_set_affinity_hint(qcq->intr.vector, NULL);
381 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
382 qcq->intr.vector = 0;
383 ionic_intr_free(lif->ionic, qcq->intr.index);
384 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
385 }
386
ionic_qcq_free(struct ionic_lif * lif,struct ionic_qcq * qcq)387 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
388 {
389 struct device *dev = lif->ionic->dev;
390
391 if (!qcq)
392 return;
393
394 ionic_debugfs_del_qcq(qcq);
395
396 if (qcq->q_base) {
397 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
398 qcq->q_base = NULL;
399 qcq->q_base_pa = 0;
400 }
401
402 if (qcq->cmb_q_base) {
403 iounmap(qcq->cmb_q_base);
404 ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order);
405 qcq->cmb_pgid = 0;
406 qcq->cmb_order = 0;
407 qcq->cmb_q_base = NULL;
408 qcq->cmb_q_base_pa = 0;
409 }
410
411 if (qcq->cq_base) {
412 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
413 qcq->cq_base = NULL;
414 qcq->cq_base_pa = 0;
415 }
416
417 if (qcq->sg_base) {
418 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
419 qcq->sg_base = NULL;
420 qcq->sg_base_pa = 0;
421 }
422
423 ionic_qcq_intr_free(lif, qcq);
424
425 if (qcq->cq.info) {
426 vfree(qcq->cq.info);
427 qcq->cq.info = NULL;
428 }
429 if (qcq->q.info) {
430 vfree(qcq->q.info);
431 qcq->q.info = NULL;
432 }
433 }
434
ionic_qcqs_free(struct ionic_lif * lif)435 void ionic_qcqs_free(struct ionic_lif *lif)
436 {
437 struct device *dev = lif->ionic->dev;
438 struct ionic_qcq *adminqcq;
439 unsigned long irqflags;
440
441 if (lif->notifyqcq) {
442 ionic_qcq_free(lif, lif->notifyqcq);
443 devm_kfree(dev, lif->notifyqcq);
444 lif->notifyqcq = NULL;
445 }
446
447 if (lif->adminqcq) {
448 spin_lock_irqsave(&lif->adminq_lock, irqflags);
449 adminqcq = READ_ONCE(lif->adminqcq);
450 lif->adminqcq = NULL;
451 spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
452 if (adminqcq) {
453 ionic_qcq_free(lif, adminqcq);
454 devm_kfree(dev, adminqcq);
455 }
456 }
457
458 if (lif->rxqcqs) {
459 devm_kfree(dev, lif->rxqstats);
460 lif->rxqstats = NULL;
461 devm_kfree(dev, lif->rxqcqs);
462 lif->rxqcqs = NULL;
463 }
464
465 if (lif->txqcqs) {
466 devm_kfree(dev, lif->txqstats);
467 lif->txqstats = NULL;
468 devm_kfree(dev, lif->txqcqs);
469 lif->txqcqs = NULL;
470 }
471 }
472
ionic_link_qcq_interrupts(struct ionic_qcq * src_qcq,struct ionic_qcq * n_qcq)473 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
474 struct ionic_qcq *n_qcq)
475 {
476 n_qcq->intr.vector = src_qcq->intr.vector;
477 n_qcq->intr.index = src_qcq->intr.index;
478 n_qcq->napi_qcq = src_qcq->napi_qcq;
479 }
480
ionic_alloc_qcq_interrupt(struct ionic_lif * lif,struct ionic_qcq * qcq)481 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
482 {
483 int err;
484
485 if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
486 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
487 return 0;
488 }
489
490 err = ionic_intr_alloc(lif, &qcq->intr);
491 if (err) {
492 netdev_warn(lif->netdev, "no intr for %s: %d\n",
493 qcq->q.name, err);
494 goto err_out;
495 }
496
497 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
498 if (err < 0) {
499 netdev_warn(lif->netdev, "no vector for %s: %d\n",
500 qcq->q.name, err);
501 goto err_out_free_intr;
502 }
503 qcq->intr.vector = err;
504 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
505 IONIC_INTR_MASK_SET);
506
507 err = ionic_request_irq(lif, qcq);
508 if (err) {
509 netdev_warn(lif->netdev, "irq request failed %d\n", err);
510 goto err_out_free_intr;
511 }
512
513 /* try to get the irq on the local numa node first */
514 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
515 dev_to_node(lif->ionic->dev));
516 if (qcq->intr.cpu != -1)
517 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
518
519 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
520 return 0;
521
522 err_out_free_intr:
523 ionic_intr_free(lif->ionic, qcq->intr.index);
524 err_out:
525 return err;
526 }
527
ionic_qcq_alloc(struct ionic_lif * lif,unsigned int type,unsigned int index,const char * name,unsigned int flags,unsigned int num_descs,unsigned int desc_size,unsigned int cq_desc_size,unsigned int sg_desc_size,unsigned int pid,struct ionic_qcq ** qcq)528 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
529 unsigned int index,
530 const char *name, unsigned int flags,
531 unsigned int num_descs, unsigned int desc_size,
532 unsigned int cq_desc_size,
533 unsigned int sg_desc_size,
534 unsigned int pid, struct ionic_qcq **qcq)
535 {
536 struct ionic_dev *idev = &lif->ionic->idev;
537 struct device *dev = lif->ionic->dev;
538 void *q_base, *cq_base, *sg_base;
539 dma_addr_t cq_base_pa = 0;
540 dma_addr_t sg_base_pa = 0;
541 dma_addr_t q_base_pa = 0;
542 struct ionic_qcq *new;
543 int err;
544
545 *qcq = NULL;
546
547 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
548 if (!new) {
549 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
550 err = -ENOMEM;
551 goto err_out;
552 }
553
554 new->q.dev = dev;
555 new->flags = flags;
556
557 new->q.info = vcalloc(num_descs, sizeof(*new->q.info));
558 if (!new->q.info) {
559 netdev_err(lif->netdev, "Cannot allocate queue info\n");
560 err = -ENOMEM;
561 goto err_out_free_qcq;
562 }
563
564 new->q.type = type;
565 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
566
567 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
568 desc_size, sg_desc_size, pid);
569 if (err) {
570 netdev_err(lif->netdev, "Cannot initialize queue\n");
571 goto err_out_free_q_info;
572 }
573
574 err = ionic_alloc_qcq_interrupt(lif, new);
575 if (err)
576 goto err_out;
577
578 new->cq.info = vcalloc(num_descs, sizeof(*new->cq.info));
579 if (!new->cq.info) {
580 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
581 err = -ENOMEM;
582 goto err_out_free_irq;
583 }
584
585 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
586 if (err) {
587 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
588 goto err_out_free_cq_info;
589 }
590
591 if (flags & IONIC_QCQ_F_NOTIFYQ) {
592 int q_size;
593
594 /* q & cq need to be contiguous in NotifyQ, so alloc it all in q
595 * and don't alloc qc. We leave new->qc_size and new->qc_base
596 * as 0 to be sure we don't try to free it later.
597 */
598 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
599 new->q_size = PAGE_SIZE + q_size +
600 ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
601 new->q_base = dma_alloc_coherent(dev, new->q_size,
602 &new->q_base_pa, GFP_KERNEL);
603 if (!new->q_base) {
604 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
605 err = -ENOMEM;
606 goto err_out_free_cq_info;
607 }
608 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
609 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
610 ionic_q_map(&new->q, q_base, q_base_pa);
611
612 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
613 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
614 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
615 ionic_cq_bind(&new->cq, &new->q);
616 } else {
617 /* regular DMA q descriptors */
618 new->q_size = PAGE_SIZE + (num_descs * desc_size);
619 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
620 GFP_KERNEL);
621 if (!new->q_base) {
622 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
623 err = -ENOMEM;
624 goto err_out_free_cq_info;
625 }
626 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
627 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
628 ionic_q_map(&new->q, q_base, q_base_pa);
629
630 if (flags & IONIC_QCQ_F_CMB_RINGS) {
631 /* on-chip CMB q descriptors */
632 new->cmb_q_size = num_descs * desc_size;
633 new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
634
635 err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
636 new->cmb_order);
637 if (err) {
638 netdev_err(lif->netdev,
639 "Cannot allocate queue order %d from cmb: err %d\n",
640 new->cmb_order, err);
641 goto err_out_free_q;
642 }
643
644 new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size);
645 if (!new->cmb_q_base) {
646 netdev_err(lif->netdev, "Cannot map queue from cmb\n");
647 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
648 err = -ENOMEM;
649 goto err_out_free_q;
650 }
651
652 new->cmb_q_base_pa -= idev->phy_cmb_pages;
653 ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa);
654 }
655
656 /* cq DMA descriptors */
657 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
658 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
659 GFP_KERNEL);
660 if (!new->cq_base) {
661 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
662 err = -ENOMEM;
663 goto err_out_free_q;
664 }
665 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
666 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
667 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
668 ionic_cq_bind(&new->cq, &new->q);
669 }
670
671 if (flags & IONIC_QCQ_F_SG) {
672 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
673 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
674 GFP_KERNEL);
675 if (!new->sg_base) {
676 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
677 err = -ENOMEM;
678 goto err_out_free_cq;
679 }
680 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
681 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
682 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
683 }
684
685 INIT_WORK(&new->dim.work, ionic_dim_work);
686 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
687
688 *qcq = new;
689
690 return 0;
691
692 err_out_free_cq:
693 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
694 err_out_free_q:
695 if (new->cmb_q_base) {
696 iounmap(new->cmb_q_base);
697 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
698 }
699 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
700 err_out_free_cq_info:
701 vfree(new->cq.info);
702 err_out_free_irq:
703 if (flags & IONIC_QCQ_F_INTR) {
704 devm_free_irq(dev, new->intr.vector, &new->napi);
705 ionic_intr_free(lif->ionic, new->intr.index);
706 }
707 err_out_free_q_info:
708 vfree(new->q.info);
709 err_out_free_qcq:
710 devm_kfree(dev, new);
711 err_out:
712 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
713 return err;
714 }
715
ionic_qcqs_alloc(struct ionic_lif * lif)716 static int ionic_qcqs_alloc(struct ionic_lif *lif)
717 {
718 struct device *dev = lif->ionic->dev;
719 unsigned int flags;
720 int err;
721
722 flags = IONIC_QCQ_F_INTR;
723 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
724 IONIC_ADMINQ_LENGTH,
725 sizeof(struct ionic_admin_cmd),
726 sizeof(struct ionic_admin_comp),
727 0, lif->kern_pid, &lif->adminqcq);
728 if (err)
729 return err;
730 ionic_debugfs_add_qcq(lif, lif->adminqcq);
731
732 if (lif->ionic->nnqs_per_lif) {
733 flags = IONIC_QCQ_F_NOTIFYQ;
734 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
735 flags, IONIC_NOTIFYQ_LENGTH,
736 sizeof(struct ionic_notifyq_cmd),
737 sizeof(union ionic_notifyq_comp),
738 0, lif->kern_pid, &lif->notifyqcq);
739 if (err)
740 goto err_out;
741 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
742
743 /* Let the notifyq ride on the adminq interrupt */
744 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
745 }
746
747 err = -ENOMEM;
748 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
749 sizeof(*lif->txqcqs), GFP_KERNEL);
750 if (!lif->txqcqs)
751 goto err_out;
752 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
753 sizeof(*lif->rxqcqs), GFP_KERNEL);
754 if (!lif->rxqcqs)
755 goto err_out;
756
757 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1,
758 sizeof(*lif->txqstats), GFP_KERNEL);
759 if (!lif->txqstats)
760 goto err_out;
761 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1,
762 sizeof(*lif->rxqstats), GFP_KERNEL);
763 if (!lif->rxqstats)
764 goto err_out;
765
766 return 0;
767
768 err_out:
769 ionic_qcqs_free(lif);
770 return err;
771 }
772
ionic_qcq_sanitize(struct ionic_qcq * qcq)773 static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
774 {
775 qcq->q.tail_idx = 0;
776 qcq->q.head_idx = 0;
777 qcq->cq.tail_idx = 0;
778 qcq->cq.done_color = 1;
779 memset(qcq->q_base, 0, qcq->q_size);
780 if (qcq->cmb_q_base)
781 memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size);
782 memset(qcq->cq_base, 0, qcq->cq_size);
783 memset(qcq->sg_base, 0, qcq->sg_size);
784 }
785
ionic_lif_txq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)786 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
787 {
788 struct device *dev = lif->ionic->dev;
789 struct ionic_queue *q = &qcq->q;
790 struct ionic_cq *cq = &qcq->cq;
791 struct ionic_admin_ctx ctx = {
792 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
793 .cmd.q_init = {
794 .opcode = IONIC_CMD_Q_INIT,
795 .lif_index = cpu_to_le16(lif->index),
796 .type = q->type,
797 .ver = lif->qtype_info[q->type].version,
798 .index = cpu_to_le32(q->index),
799 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
800 IONIC_QINIT_F_SG),
801 .intr_index = cpu_to_le16(qcq->intr.index),
802 .pid = cpu_to_le16(q->pid),
803 .ring_size = ilog2(q->num_descs),
804 .ring_base = cpu_to_le64(q->base_pa),
805 .cq_ring_base = cpu_to_le64(cq->base_pa),
806 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
807 .features = cpu_to_le64(q->features),
808 },
809 };
810 int err;
811
812 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
813 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
814 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
815 }
816
817 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
818 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
819 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
820 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
821 dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base);
822 dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base);
823 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
824 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
825 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
826
827 ionic_qcq_sanitize(qcq);
828
829 err = ionic_adminq_post_wait(lif, &ctx);
830 if (err)
831 return err;
832
833 q->hw_type = ctx.comp.q_init.hw_type;
834 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
835 q->dbval = IONIC_DBELL_QID(q->hw_index);
836
837 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
838 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
839
840 q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
841 q->dbell_jiffies = jiffies;
842
843 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
844 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
845 qcq->napi_qcq = qcq;
846 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
847 }
848
849 qcq->flags |= IONIC_QCQ_F_INITED;
850
851 return 0;
852 }
853
ionic_lif_rxq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)854 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
855 {
856 struct device *dev = lif->ionic->dev;
857 struct ionic_queue *q = &qcq->q;
858 struct ionic_cq *cq = &qcq->cq;
859 struct ionic_admin_ctx ctx = {
860 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
861 .cmd.q_init = {
862 .opcode = IONIC_CMD_Q_INIT,
863 .lif_index = cpu_to_le16(lif->index),
864 .type = q->type,
865 .ver = lif->qtype_info[q->type].version,
866 .index = cpu_to_le32(q->index),
867 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
868 IONIC_QINIT_F_SG),
869 .intr_index = cpu_to_le16(cq->bound_intr->index),
870 .pid = cpu_to_le16(q->pid),
871 .ring_size = ilog2(q->num_descs),
872 .ring_base = cpu_to_le64(q->base_pa),
873 .cq_ring_base = cpu_to_le64(cq->base_pa),
874 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
875 .features = cpu_to_le64(q->features),
876 },
877 };
878 int err;
879
880 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
881 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
882 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
883 }
884
885 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
886 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
887 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
888 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
889 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
890 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
891 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
892
893 ionic_qcq_sanitize(qcq);
894
895 err = ionic_adminq_post_wait(lif, &ctx);
896 if (err)
897 return err;
898
899 q->hw_type = ctx.comp.q_init.hw_type;
900 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
901 q->dbval = IONIC_DBELL_QID(q->hw_index);
902
903 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
904 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
905
906 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
907 q->dbell_jiffies = jiffies;
908
909 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
910 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
911 else
912 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
913
914 qcq->napi_qcq = qcq;
915 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
916
917 qcq->flags |= IONIC_QCQ_F_INITED;
918
919 return 0;
920 }
921
ionic_lif_create_hwstamp_txq(struct ionic_lif * lif)922 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
923 {
924 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
925 unsigned int txq_i, flags;
926 struct ionic_qcq *txq;
927 u64 features;
928 int err;
929
930 if (lif->hwstamp_txq)
931 return 0;
932
933 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP;
934
935 num_desc = IONIC_MIN_TXRX_DESC;
936 desc_sz = sizeof(struct ionic_txq_desc);
937 comp_sz = 2 * sizeof(struct ionic_txq_comp);
938
939 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
940 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1))
941 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
942 else
943 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
944
945 txq_i = lif->ionic->ntxqs_per_lif;
946 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
947
948 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
949 num_desc, desc_sz, comp_sz, sg_desc_sz,
950 lif->kern_pid, &txq);
951 if (err)
952 goto err_qcq_alloc;
953
954 txq->q.features = features;
955
956 ionic_link_qcq_interrupts(lif->adminqcq, txq);
957 ionic_debugfs_add_qcq(lif, txq);
958
959 lif->hwstamp_txq = txq;
960
961 if (netif_running(lif->netdev)) {
962 err = ionic_lif_txq_init(lif, txq);
963 if (err)
964 goto err_qcq_init;
965
966 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
967 err = ionic_qcq_enable(txq);
968 if (err)
969 goto err_qcq_enable;
970 }
971 }
972
973 return 0;
974
975 err_qcq_enable:
976 ionic_lif_qcq_deinit(lif, txq);
977 err_qcq_init:
978 lif->hwstamp_txq = NULL;
979 ionic_debugfs_del_qcq(txq);
980 ionic_qcq_free(lif, txq);
981 devm_kfree(lif->ionic->dev, txq);
982 err_qcq_alloc:
983 return err;
984 }
985
ionic_lif_create_hwstamp_rxq(struct ionic_lif * lif)986 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
987 {
988 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
989 unsigned int rxq_i, flags;
990 struct ionic_qcq *rxq;
991 u64 features;
992 int err;
993
994 if (lif->hwstamp_rxq)
995 return 0;
996
997 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
998
999 num_desc = IONIC_MIN_TXRX_DESC;
1000 desc_sz = sizeof(struct ionic_rxq_desc);
1001 comp_sz = 2 * sizeof(struct ionic_rxq_comp);
1002 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
1003
1004 rxq_i = lif->ionic->nrxqs_per_lif;
1005 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
1006
1007 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
1008 num_desc, desc_sz, comp_sz, sg_desc_sz,
1009 lif->kern_pid, &rxq);
1010 if (err)
1011 goto err_qcq_alloc;
1012
1013 rxq->q.features = features;
1014
1015 ionic_link_qcq_interrupts(lif->adminqcq, rxq);
1016 ionic_debugfs_add_qcq(lif, rxq);
1017
1018 lif->hwstamp_rxq = rxq;
1019
1020 if (netif_running(lif->netdev)) {
1021 err = ionic_lif_rxq_init(lif, rxq);
1022 if (err)
1023 goto err_qcq_init;
1024
1025 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
1026 ionic_rx_fill(&rxq->q);
1027 err = ionic_qcq_enable(rxq);
1028 if (err)
1029 goto err_qcq_enable;
1030 }
1031 }
1032
1033 return 0;
1034
1035 err_qcq_enable:
1036 ionic_lif_qcq_deinit(lif, rxq);
1037 err_qcq_init:
1038 lif->hwstamp_rxq = NULL;
1039 ionic_debugfs_del_qcq(rxq);
1040 ionic_qcq_free(lif, rxq);
1041 devm_kfree(lif->ionic->dev, rxq);
1042 err_qcq_alloc:
1043 return err;
1044 }
1045
ionic_lif_config_hwstamp_rxq_all(struct ionic_lif * lif,bool rx_all)1046 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all)
1047 {
1048 struct ionic_queue_params qparam;
1049
1050 ionic_init_queue_params(lif, &qparam);
1051
1052 if (rx_all)
1053 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
1054 else
1055 qparam.rxq_features = 0;
1056
1057 /* if we're not running, just set the values and return */
1058 if (!netif_running(lif->netdev)) {
1059 lif->rxq_features = qparam.rxq_features;
1060 return 0;
1061 }
1062
1063 return ionic_reconfigure_queues(lif, &qparam);
1064 }
1065
ionic_lif_set_hwstamp_txmode(struct ionic_lif * lif,u16 txstamp_mode)1066 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode)
1067 {
1068 struct ionic_admin_ctx ctx = {
1069 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1070 .cmd.lif_setattr = {
1071 .opcode = IONIC_CMD_LIF_SETATTR,
1072 .index = cpu_to_le16(lif->index),
1073 .attr = IONIC_LIF_ATTR_TXSTAMP,
1074 .txstamp_mode = cpu_to_le16(txstamp_mode),
1075 },
1076 };
1077
1078 return ionic_adminq_post_wait(lif, &ctx);
1079 }
1080
ionic_lif_del_hwstamp_rxfilt(struct ionic_lif * lif)1081 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif)
1082 {
1083 struct ionic_admin_ctx ctx = {
1084 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1085 .cmd.rx_filter_del = {
1086 .opcode = IONIC_CMD_RX_FILTER_DEL,
1087 .lif_index = cpu_to_le16(lif->index),
1088 },
1089 };
1090 struct ionic_rx_filter *f;
1091 u32 filter_id;
1092 int err;
1093
1094 spin_lock_bh(&lif->rx_filters.lock);
1095
1096 f = ionic_rx_filter_rxsteer(lif);
1097 if (!f) {
1098 spin_unlock_bh(&lif->rx_filters.lock);
1099 return;
1100 }
1101
1102 filter_id = f->filter_id;
1103 ionic_rx_filter_free(lif, f);
1104
1105 spin_unlock_bh(&lif->rx_filters.lock);
1106
1107 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id);
1108
1109 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id);
1110
1111 err = ionic_adminq_post_wait(lif, &ctx);
1112 if (err && err != -EEXIST)
1113 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id);
1114 }
1115
ionic_lif_add_hwstamp_rxfilt(struct ionic_lif * lif,u64 pkt_class)1116 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1117 {
1118 struct ionic_admin_ctx ctx = {
1119 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1120 .cmd.rx_filter_add = {
1121 .opcode = IONIC_CMD_RX_FILTER_ADD,
1122 .lif_index = cpu_to_le16(lif->index),
1123 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS),
1124 .pkt_class = cpu_to_le64(pkt_class),
1125 },
1126 };
1127 u8 qtype;
1128 u32 qid;
1129 int err;
1130
1131 if (!lif->hwstamp_rxq)
1132 return -EINVAL;
1133
1134 qtype = lif->hwstamp_rxq->q.type;
1135 ctx.cmd.rx_filter_add.qtype = qtype;
1136
1137 qid = lif->hwstamp_rxq->q.index;
1138 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid);
1139
1140 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n");
1141 err = ionic_adminq_post_wait(lif, &ctx);
1142 if (err && err != -EEXIST)
1143 return err;
1144
1145 spin_lock_bh(&lif->rx_filters.lock);
1146 err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED);
1147 spin_unlock_bh(&lif->rx_filters.lock);
1148
1149 return err;
1150 }
1151
ionic_lif_set_hwstamp_rxfilt(struct ionic_lif * lif,u64 pkt_class)1152 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1153 {
1154 ionic_lif_del_hwstamp_rxfilt(lif);
1155
1156 if (!pkt_class)
1157 return 0;
1158
1159 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
1160 }
1161
ionic_notifyq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)1162 static bool ionic_notifyq_service(struct ionic_cq *cq,
1163 struct ionic_cq_info *cq_info)
1164 {
1165 union ionic_notifyq_comp *comp = cq_info->cq_desc;
1166 struct ionic_deferred_work *work;
1167 struct net_device *netdev;
1168 struct ionic_queue *q;
1169 struct ionic_lif *lif;
1170 u64 eid;
1171
1172 q = cq->bound_q;
1173 lif = q->info[0].cb_arg;
1174 netdev = lif->netdev;
1175 eid = le64_to_cpu(comp->event.eid);
1176
1177 /* Have we run out of new completions to process? */
1178 if ((s64)(eid - lif->last_eid) <= 0)
1179 return false;
1180
1181 lif->last_eid = eid;
1182
1183 dev_dbg(lif->ionic->dev, "notifyq event:\n");
1184 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
1185 comp, sizeof(*comp), true);
1186
1187 switch (le16_to_cpu(comp->event.ecode)) {
1188 case IONIC_EVENT_LINK_CHANGE:
1189 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1190 break;
1191 case IONIC_EVENT_RESET:
1192 if (lif->ionic->idev.fw_status_ready &&
1193 !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
1194 !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
1195 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1196 if (!work) {
1197 netdev_err(lif->netdev, "Reset event dropped\n");
1198 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
1199 } else {
1200 work->type = IONIC_DW_TYPE_LIF_RESET;
1201 ionic_lif_deferred_enqueue(&lif->deferred, work);
1202 }
1203 }
1204 break;
1205 default:
1206 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
1207 comp->event.ecode, eid);
1208 break;
1209 }
1210
1211 return true;
1212 }
1213
ionic_adminq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)1214 static bool ionic_adminq_service(struct ionic_cq *cq,
1215 struct ionic_cq_info *cq_info)
1216 {
1217 struct ionic_admin_comp *comp = cq_info->cq_desc;
1218
1219 if (!color_match(comp->color, cq->done_color))
1220 return false;
1221
1222 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
1223
1224 return true;
1225 }
1226
ionic_adminq_napi(struct napi_struct * napi,int budget)1227 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
1228 {
1229 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
1230 struct ionic_lif *lif = napi_to_cq(napi)->lif;
1231 struct ionic_dev *idev = &lif->ionic->idev;
1232 unsigned long irqflags;
1233 unsigned int flags = 0;
1234 bool resched = false;
1235 int rx_work = 0;
1236 int tx_work = 0;
1237 int n_work = 0;
1238 int a_work = 0;
1239 int work_done;
1240 int credits;
1241
1242 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
1243 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
1244 ionic_notifyq_service, NULL, NULL);
1245
1246 spin_lock_irqsave(&lif->adminq_lock, irqflags);
1247 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
1248 a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
1249 ionic_adminq_service, NULL, NULL);
1250 spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
1251
1252 if (lif->hwstamp_rxq)
1253 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget,
1254 ionic_rx_service, NULL, NULL);
1255
1256 if (lif->hwstamp_txq)
1257 tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
1258 ionic_tx_service, NULL, NULL);
1259
1260 work_done = max(max(n_work, a_work), max(rx_work, tx_work));
1261 if (work_done < budget && napi_complete_done(napi, work_done)) {
1262 flags |= IONIC_INTR_CRED_UNMASK;
1263 intr->rearm_count++;
1264 }
1265
1266 if (work_done || flags) {
1267 flags |= IONIC_INTR_CRED_RESET_COALESCE;
1268 credits = n_work + a_work + rx_work + tx_work;
1269 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
1270 }
1271
1272 if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
1273 resched = true;
1274 if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
1275 resched = true;
1276 if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
1277 resched = true;
1278 if (resched)
1279 mod_timer(&lif->adminqcq->napi_deadline,
1280 jiffies + IONIC_NAPI_DEADLINE);
1281
1282 return work_done;
1283 }
1284
ionic_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * ns)1285 void ionic_get_stats64(struct net_device *netdev,
1286 struct rtnl_link_stats64 *ns)
1287 {
1288 struct ionic_lif *lif = netdev_priv(netdev);
1289 struct ionic_lif_stats *ls;
1290
1291 memset(ns, 0, sizeof(*ns));
1292 ls = &lif->info->stats;
1293
1294 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
1295 le64_to_cpu(ls->rx_mcast_packets) +
1296 le64_to_cpu(ls->rx_bcast_packets);
1297
1298 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
1299 le64_to_cpu(ls->tx_mcast_packets) +
1300 le64_to_cpu(ls->tx_bcast_packets);
1301
1302 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
1303 le64_to_cpu(ls->rx_mcast_bytes) +
1304 le64_to_cpu(ls->rx_bcast_bytes);
1305
1306 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
1307 le64_to_cpu(ls->tx_mcast_bytes) +
1308 le64_to_cpu(ls->tx_bcast_bytes);
1309
1310 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
1311 le64_to_cpu(ls->rx_mcast_drop_packets) +
1312 le64_to_cpu(ls->rx_bcast_drop_packets);
1313
1314 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
1315 le64_to_cpu(ls->tx_mcast_drop_packets) +
1316 le64_to_cpu(ls->tx_bcast_drop_packets);
1317
1318 ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
1319
1320 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
1321
1322 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
1323 le64_to_cpu(ls->rx_queue_disabled) +
1324 le64_to_cpu(ls->rx_desc_fetch_error) +
1325 le64_to_cpu(ls->rx_desc_data_error);
1326
1327 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
1328 le64_to_cpu(ls->tx_queue_disabled) +
1329 le64_to_cpu(ls->tx_desc_fetch_error) +
1330 le64_to_cpu(ls->tx_desc_data_error);
1331
1332 ns->rx_errors = ns->rx_over_errors +
1333 ns->rx_missed_errors;
1334
1335 ns->tx_errors = ns->tx_aborted_errors;
1336 }
1337
ionic_addr_add(struct net_device * netdev,const u8 * addr)1338 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1339 {
1340 return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
1341 }
1342
ionic_addr_del(struct net_device * netdev,const u8 * addr)1343 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1344 {
1345 /* Don't delete our own address from the uc list */
1346 if (ether_addr_equal(addr, netdev->dev_addr))
1347 return 0;
1348
1349 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
1350 }
1351
ionic_lif_rx_mode(struct ionic_lif * lif)1352 void ionic_lif_rx_mode(struct ionic_lif *lif)
1353 {
1354 struct net_device *netdev = lif->netdev;
1355 unsigned int nfilters;
1356 unsigned int nd_flags;
1357 char buf[128];
1358 u16 rx_mode;
1359 int i;
1360 #define REMAIN(__x) (sizeof(buf) - (__x))
1361
1362 mutex_lock(&lif->config_lock);
1363
1364 /* grab the flags once for local use */
1365 nd_flags = netdev->flags;
1366
1367 rx_mode = IONIC_RX_MODE_F_UNICAST;
1368 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1369 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1370 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1371 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1372
1373 /* sync the filters */
1374 ionic_rx_filter_sync(lif);
1375
1376 /* check for overflow state
1377 * if so, we track that we overflowed and enable NIC PROMISC
1378 * else if the overflow is set and not needed
1379 * we remove our overflow flag and check the netdev flags
1380 * to see if we can disable NIC PROMISC
1381 */
1382 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1383
1384 if (((lif->nucast + lif->nmcast) >= nfilters) ||
1385 (lif->max_vlans && lif->nvlans >= lif->max_vlans)) {
1386 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1387 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1388 } else {
1389 if (!(nd_flags & IFF_PROMISC))
1390 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1391 if (!(nd_flags & IFF_ALLMULTI))
1392 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1393 }
1394
1395 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1396 lif->rx_mode, rx_mode);
1397 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
1398 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
1399 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
1400 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
1401 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
1402 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
1403 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
1404 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
1405 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
1406 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1407 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
1408 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
1409 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
1410
1411 if (lif->rx_mode != rx_mode) {
1412 struct ionic_admin_ctx ctx = {
1413 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1414 .cmd.rx_mode_set = {
1415 .opcode = IONIC_CMD_RX_MODE_SET,
1416 .lif_index = cpu_to_le16(lif->index),
1417 },
1418 };
1419 int err;
1420
1421 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
1422 err = ionic_adminq_post_wait(lif, &ctx);
1423 if (err)
1424 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
1425 rx_mode, err);
1426 else
1427 lif->rx_mode = rx_mode;
1428 }
1429
1430 mutex_unlock(&lif->config_lock);
1431 }
1432
ionic_ndo_set_rx_mode(struct net_device * netdev)1433 static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1434 {
1435 struct ionic_lif *lif = netdev_priv(netdev);
1436 struct ionic_deferred_work *work;
1437
1438 /* Sync the kernel filter list with the driver filter list */
1439 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1440 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1441
1442 /* Shove off the rest of the rxmode work to the work task
1443 * which will include syncing the filters to the firmware.
1444 */
1445 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1446 if (!work) {
1447 netdev_err(lif->netdev, "rxmode change dropped\n");
1448 return;
1449 }
1450 work->type = IONIC_DW_TYPE_RX_MODE;
1451 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1452 ionic_lif_deferred_enqueue(&lif->deferred, work);
1453 }
1454
ionic_netdev_features_to_nic(netdev_features_t features)1455 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1456 {
1457 u64 wanted = 0;
1458
1459 if (features & NETIF_F_HW_VLAN_CTAG_TX)
1460 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1461 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1462 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1463 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1464 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1465 if (features & NETIF_F_RXHASH)
1466 wanted |= IONIC_ETH_HW_RX_HASH;
1467 if (features & NETIF_F_RXCSUM)
1468 wanted |= IONIC_ETH_HW_RX_CSUM;
1469 if (features & NETIF_F_SG)
1470 wanted |= IONIC_ETH_HW_TX_SG;
1471 if (features & NETIF_F_HW_CSUM)
1472 wanted |= IONIC_ETH_HW_TX_CSUM;
1473 if (features & NETIF_F_TSO)
1474 wanted |= IONIC_ETH_HW_TSO;
1475 if (features & NETIF_F_TSO6)
1476 wanted |= IONIC_ETH_HW_TSO_IPV6;
1477 if (features & NETIF_F_TSO_ECN)
1478 wanted |= IONIC_ETH_HW_TSO_ECN;
1479 if (features & NETIF_F_GSO_GRE)
1480 wanted |= IONIC_ETH_HW_TSO_GRE;
1481 if (features & NETIF_F_GSO_GRE_CSUM)
1482 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1483 if (features & NETIF_F_GSO_IPXIP4)
1484 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1485 if (features & NETIF_F_GSO_IPXIP6)
1486 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1487 if (features & NETIF_F_GSO_UDP_TUNNEL)
1488 wanted |= IONIC_ETH_HW_TSO_UDP;
1489 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1490 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1491
1492 return cpu_to_le64(wanted);
1493 }
1494
ionic_set_nic_features(struct ionic_lif * lif,netdev_features_t features)1495 static int ionic_set_nic_features(struct ionic_lif *lif,
1496 netdev_features_t features)
1497 {
1498 struct device *dev = lif->ionic->dev;
1499 struct ionic_admin_ctx ctx = {
1500 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1501 .cmd.lif_setattr = {
1502 .opcode = IONIC_CMD_LIF_SETATTR,
1503 .index = cpu_to_le16(lif->index),
1504 .attr = IONIC_LIF_ATTR_FEATURES,
1505 },
1506 };
1507 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1508 IONIC_ETH_HW_VLAN_RX_STRIP |
1509 IONIC_ETH_HW_VLAN_RX_FILTER;
1510 u64 old_hw_features;
1511 int err;
1512
1513 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1514
1515 if (lif->phc)
1516 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP);
1517
1518 err = ionic_adminq_post_wait(lif, &ctx);
1519 if (err)
1520 return err;
1521
1522 old_hw_features = lif->hw_features;
1523 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1524 ctx.comp.lif_setattr.features);
1525
1526 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1527 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1528
1529 if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
1530 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1531 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1532
1533 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1534 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1535 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1536 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1537 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1538 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1539 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1540 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1541 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1542 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1543 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1544 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1545 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1546 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1547 if (lif->hw_features & IONIC_ETH_HW_TSO)
1548 dev_dbg(dev, "feature ETH_HW_TSO\n");
1549 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1550 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1551 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1552 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1553 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1554 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1555 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1556 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1557 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1558 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1559 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1560 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1561 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1562 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1563 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1564 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1565 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP)
1566 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n");
1567
1568 return 0;
1569 }
1570
ionic_init_nic_features(struct ionic_lif * lif)1571 static int ionic_init_nic_features(struct ionic_lif *lif)
1572 {
1573 struct net_device *netdev = lif->netdev;
1574 netdev_features_t features;
1575 int err;
1576
1577 /* set up what we expect to support by default */
1578 features = NETIF_F_HW_VLAN_CTAG_TX |
1579 NETIF_F_HW_VLAN_CTAG_RX |
1580 NETIF_F_HW_VLAN_CTAG_FILTER |
1581 NETIF_F_SG |
1582 NETIF_F_HW_CSUM |
1583 NETIF_F_RXCSUM |
1584 NETIF_F_TSO |
1585 NETIF_F_TSO6 |
1586 NETIF_F_TSO_ECN |
1587 NETIF_F_GSO_GRE |
1588 NETIF_F_GSO_GRE_CSUM |
1589 NETIF_F_GSO_IPXIP4 |
1590 NETIF_F_GSO_IPXIP6 |
1591 NETIF_F_GSO_UDP_TUNNEL |
1592 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1593
1594 if (lif->nxqs > 1)
1595 features |= NETIF_F_RXHASH;
1596
1597 err = ionic_set_nic_features(lif, features);
1598 if (err)
1599 return err;
1600
1601 /* tell the netdev what we actually can support */
1602 netdev->features |= NETIF_F_HIGHDMA;
1603
1604 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1605 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1606 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1607 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1608 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1609 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1610 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1611 netdev->hw_features |= NETIF_F_RXHASH;
1612 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1613 netdev->hw_features |= NETIF_F_SG;
1614
1615 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1616 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1617 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1618 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1619 if (lif->hw_features & IONIC_ETH_HW_TSO)
1620 netdev->hw_enc_features |= NETIF_F_TSO;
1621 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1622 netdev->hw_enc_features |= NETIF_F_TSO6;
1623 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1624 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1625 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1626 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1627 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1628 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1629 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1630 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1631 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1632 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1633 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1634 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1635 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1636 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1637
1638 netdev->hw_features |= netdev->hw_enc_features;
1639 netdev->features |= netdev->hw_features;
1640 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1641
1642 netdev->priv_flags |= IFF_UNICAST_FLT |
1643 IFF_LIVE_ADDR_CHANGE;
1644
1645 return 0;
1646 }
1647
ionic_set_features(struct net_device * netdev,netdev_features_t features)1648 static int ionic_set_features(struct net_device *netdev,
1649 netdev_features_t features)
1650 {
1651 struct ionic_lif *lif = netdev_priv(netdev);
1652 int err;
1653
1654 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1655 __func__, (u64)lif->netdev->features, (u64)features);
1656
1657 err = ionic_set_nic_features(lif, features);
1658
1659 return err;
1660 }
1661
ionic_set_attr_mac(struct ionic_lif * lif,u8 * mac)1662 static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
1663 {
1664 struct ionic_admin_ctx ctx = {
1665 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1666 .cmd.lif_setattr = {
1667 .opcode = IONIC_CMD_LIF_SETATTR,
1668 .index = cpu_to_le16(lif->index),
1669 .attr = IONIC_LIF_ATTR_MAC,
1670 },
1671 };
1672
1673 ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
1674 return ionic_adminq_post_wait(lif, &ctx);
1675 }
1676
ionic_get_attr_mac(struct ionic_lif * lif,u8 * mac_addr)1677 static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
1678 {
1679 struct ionic_admin_ctx ctx = {
1680 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1681 .cmd.lif_getattr = {
1682 .opcode = IONIC_CMD_LIF_GETATTR,
1683 .index = cpu_to_le16(lif->index),
1684 .attr = IONIC_LIF_ATTR_MAC,
1685 },
1686 };
1687 int err;
1688
1689 err = ionic_adminq_post_wait(lif, &ctx);
1690 if (err)
1691 return err;
1692
1693 ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
1694 return 0;
1695 }
1696
ionic_program_mac(struct ionic_lif * lif,u8 * mac)1697 static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
1698 {
1699 u8 get_mac[ETH_ALEN];
1700 int err;
1701
1702 err = ionic_set_attr_mac(lif, mac);
1703 if (err)
1704 return err;
1705
1706 err = ionic_get_attr_mac(lif, get_mac);
1707 if (err)
1708 return err;
1709
1710 /* To deal with older firmware that silently ignores the set attr mac:
1711 * doesn't actually change the mac and doesn't return an error, so we
1712 * do the get attr to verify whether or not the set actually happened
1713 */
1714 if (!ether_addr_equal(get_mac, mac))
1715 return 1;
1716
1717 return 0;
1718 }
1719
ionic_set_mac_address(struct net_device * netdev,void * sa)1720 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1721 {
1722 struct ionic_lif *lif = netdev_priv(netdev);
1723 struct sockaddr *addr = sa;
1724 u8 *mac;
1725 int err;
1726
1727 mac = (u8 *)addr->sa_data;
1728 if (ether_addr_equal(netdev->dev_addr, mac))
1729 return 0;
1730
1731 err = ionic_program_mac(lif, mac);
1732 if (err < 0)
1733 return err;
1734
1735 if (err > 0)
1736 netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
1737 __func__);
1738
1739 err = eth_prepare_mac_addr_change(netdev, addr);
1740 if (err)
1741 return err;
1742
1743 if (!is_zero_ether_addr(netdev->dev_addr)) {
1744 netdev_info(netdev, "deleting mac addr %pM\n",
1745 netdev->dev_addr);
1746 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr);
1747 }
1748
1749 eth_commit_mac_addr_change(netdev, addr);
1750 netdev_info(netdev, "updating mac addr %pM\n", mac);
1751
1752 return ionic_lif_addr_add(netdev_priv(netdev), mac);
1753 }
1754
ionic_stop_queues_reconfig(struct ionic_lif * lif)1755 void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1756 {
1757 /* Stop and clean the queues before reconfiguration */
1758 netif_device_detach(lif->netdev);
1759 ionic_stop_queues(lif);
1760 ionic_txrx_deinit(lif);
1761 }
1762
ionic_start_queues_reconfig(struct ionic_lif * lif)1763 static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1764 {
1765 int err;
1766
1767 /* Re-init the queues after reconfiguration */
1768
1769 /* The only way txrx_init can fail here is if communication
1770 * with FW is suddenly broken. There's not much we can do
1771 * at this point - error messages have already been printed,
1772 * so we can continue on and the user can eventually do a
1773 * DOWN and UP to try to reset and clear the issue.
1774 */
1775 err = ionic_txrx_init(lif);
1776 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1777 netif_device_attach(lif->netdev);
1778
1779 return err;
1780 }
1781
ionic_change_mtu(struct net_device * netdev,int new_mtu)1782 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1783 {
1784 struct ionic_lif *lif = netdev_priv(netdev);
1785 struct ionic_admin_ctx ctx = {
1786 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1787 .cmd.lif_setattr = {
1788 .opcode = IONIC_CMD_LIF_SETATTR,
1789 .index = cpu_to_le16(lif->index),
1790 .attr = IONIC_LIF_ATTR_MTU,
1791 .mtu = cpu_to_le32(new_mtu),
1792 },
1793 };
1794 int err;
1795
1796 err = ionic_adminq_post_wait(lif, &ctx);
1797 if (err)
1798 return err;
1799
1800 /* if we're not running, nothing more to do */
1801 if (!netif_running(netdev)) {
1802 netdev->mtu = new_mtu;
1803 return 0;
1804 }
1805
1806 mutex_lock(&lif->queue_lock);
1807 ionic_stop_queues_reconfig(lif);
1808 netdev->mtu = new_mtu;
1809 err = ionic_start_queues_reconfig(lif);
1810 mutex_unlock(&lif->queue_lock);
1811
1812 return err;
1813 }
1814
ionic_tx_timeout_work(struct work_struct * ws)1815 static void ionic_tx_timeout_work(struct work_struct *ws)
1816 {
1817 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1818 int err;
1819
1820 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1821 return;
1822
1823 /* if we were stopped before this scheduled job was launched,
1824 * don't bother the queues as they are already stopped.
1825 */
1826 if (!netif_running(lif->netdev))
1827 return;
1828
1829 mutex_lock(&lif->queue_lock);
1830 ionic_stop_queues_reconfig(lif);
1831 err = ionic_start_queues_reconfig(lif);
1832 mutex_unlock(&lif->queue_lock);
1833
1834 if (err)
1835 dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__);
1836 }
1837
ionic_tx_timeout(struct net_device * netdev,unsigned int txqueue)1838 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1839 {
1840 struct ionic_lif *lif = netdev_priv(netdev);
1841
1842 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue);
1843 schedule_work(&lif->tx_timeout_work);
1844 }
1845
ionic_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1846 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1847 u16 vid)
1848 {
1849 struct ionic_lif *lif = netdev_priv(netdev);
1850 int err;
1851
1852 err = ionic_lif_vlan_add(lif, vid);
1853 if (err)
1854 return err;
1855
1856 ionic_lif_rx_mode(lif);
1857
1858 return 0;
1859 }
1860
ionic_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1861 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1862 u16 vid)
1863 {
1864 struct ionic_lif *lif = netdev_priv(netdev);
1865 int err;
1866
1867 err = ionic_lif_vlan_del(lif, vid);
1868 if (err)
1869 return err;
1870
1871 ionic_lif_rx_mode(lif);
1872
1873 return 0;
1874 }
1875
ionic_lif_rss_config(struct ionic_lif * lif,const u16 types,const u8 * key,const u32 * indir)1876 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1877 const u8 *key, const u32 *indir)
1878 {
1879 struct ionic_admin_ctx ctx = {
1880 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1881 .cmd.lif_setattr = {
1882 .opcode = IONIC_CMD_LIF_SETATTR,
1883 .attr = IONIC_LIF_ATTR_RSS,
1884 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1885 },
1886 };
1887 unsigned int i, tbl_sz;
1888
1889 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1890 lif->rss_types = types;
1891 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1892 }
1893
1894 if (key)
1895 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1896
1897 if (indir) {
1898 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1899 for (i = 0; i < tbl_sz; i++)
1900 lif->rss_ind_tbl[i] = indir[i];
1901 }
1902
1903 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1904 IONIC_RSS_HASH_KEY_SIZE);
1905
1906 return ionic_adminq_post_wait(lif, &ctx);
1907 }
1908
ionic_lif_rss_init(struct ionic_lif * lif)1909 static int ionic_lif_rss_init(struct ionic_lif *lif)
1910 {
1911 unsigned int tbl_sz;
1912 unsigned int i;
1913
1914 lif->rss_types = IONIC_RSS_TYPE_IPV4 |
1915 IONIC_RSS_TYPE_IPV4_TCP |
1916 IONIC_RSS_TYPE_IPV4_UDP |
1917 IONIC_RSS_TYPE_IPV6 |
1918 IONIC_RSS_TYPE_IPV6_TCP |
1919 IONIC_RSS_TYPE_IPV6_UDP;
1920
1921 /* Fill indirection table with 'default' values */
1922 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1923 for (i = 0; i < tbl_sz; i++)
1924 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1925
1926 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1927 }
1928
ionic_lif_rss_deinit(struct ionic_lif * lif)1929 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1930 {
1931 int tbl_sz;
1932
1933 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1934 memset(lif->rss_ind_tbl, 0, tbl_sz);
1935 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1936
1937 ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1938 }
1939
ionic_lif_quiesce(struct ionic_lif * lif)1940 static void ionic_lif_quiesce(struct ionic_lif *lif)
1941 {
1942 struct ionic_admin_ctx ctx = {
1943 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1944 .cmd.lif_setattr = {
1945 .opcode = IONIC_CMD_LIF_SETATTR,
1946 .index = cpu_to_le16(lif->index),
1947 .attr = IONIC_LIF_ATTR_STATE,
1948 .state = IONIC_LIF_QUIESCE,
1949 },
1950 };
1951 int err;
1952
1953 err = ionic_adminq_post_wait(lif, &ctx);
1954 if (err)
1955 netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err);
1956 }
1957
ionic_txrx_disable(struct ionic_lif * lif)1958 static void ionic_txrx_disable(struct ionic_lif *lif)
1959 {
1960 unsigned int i;
1961 int err = 0;
1962
1963 if (lif->txqcqs) {
1964 for (i = 0; i < lif->nxqs; i++)
1965 err = ionic_qcq_disable(lif, lif->txqcqs[i], err);
1966 }
1967
1968 if (lif->hwstamp_txq)
1969 err = ionic_qcq_disable(lif, lif->hwstamp_txq, err);
1970
1971 if (lif->rxqcqs) {
1972 for (i = 0; i < lif->nxqs; i++)
1973 err = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
1974 }
1975
1976 if (lif->hwstamp_rxq)
1977 err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err);
1978
1979 ionic_lif_quiesce(lif);
1980 }
1981
ionic_txrx_deinit(struct ionic_lif * lif)1982 static void ionic_txrx_deinit(struct ionic_lif *lif)
1983 {
1984 unsigned int i;
1985
1986 if (lif->txqcqs) {
1987 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
1988 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1989 ionic_tx_flush(&lif->txqcqs[i]->cq);
1990 ionic_tx_empty(&lif->txqcqs[i]->q);
1991 }
1992 }
1993
1994 if (lif->rxqcqs) {
1995 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
1996 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1997 ionic_rx_empty(&lif->rxqcqs[i]->q);
1998 }
1999 }
2000 lif->rx_mode = 0;
2001
2002 if (lif->hwstamp_txq) {
2003 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq);
2004 ionic_tx_flush(&lif->hwstamp_txq->cq);
2005 ionic_tx_empty(&lif->hwstamp_txq->q);
2006 }
2007
2008 if (lif->hwstamp_rxq) {
2009 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq);
2010 ionic_rx_empty(&lif->hwstamp_rxq->q);
2011 }
2012 }
2013
ionic_txrx_free(struct ionic_lif * lif)2014 void ionic_txrx_free(struct ionic_lif *lif)
2015 {
2016 unsigned int i;
2017
2018 if (lif->txqcqs) {
2019 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
2020 ionic_qcq_free(lif, lif->txqcqs[i]);
2021 devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
2022 lif->txqcqs[i] = NULL;
2023 }
2024 }
2025
2026 if (lif->rxqcqs) {
2027 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
2028 ionic_qcq_free(lif, lif->rxqcqs[i]);
2029 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
2030 lif->rxqcqs[i] = NULL;
2031 }
2032 }
2033
2034 if (lif->hwstamp_txq) {
2035 ionic_qcq_free(lif, lif->hwstamp_txq);
2036 devm_kfree(lif->ionic->dev, lif->hwstamp_txq);
2037 lif->hwstamp_txq = NULL;
2038 }
2039
2040 if (lif->hwstamp_rxq) {
2041 ionic_qcq_free(lif, lif->hwstamp_rxq);
2042 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq);
2043 lif->hwstamp_rxq = NULL;
2044 }
2045 }
2046
ionic_txrx_alloc(struct ionic_lif * lif)2047 static int ionic_txrx_alloc(struct ionic_lif *lif)
2048 {
2049 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2050 unsigned int flags, i;
2051 int err = 0;
2052
2053 num_desc = lif->ntxq_descs;
2054 desc_sz = sizeof(struct ionic_txq_desc);
2055 comp_sz = sizeof(struct ionic_txq_comp);
2056
2057 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2058 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2059 sizeof(struct ionic_txq_sg_desc_v1))
2060 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2061 else
2062 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2063
2064 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2065
2066 if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state))
2067 flags |= IONIC_QCQ_F_CMB_RINGS;
2068
2069 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2070 flags |= IONIC_QCQ_F_INTR;
2071
2072 for (i = 0; i < lif->nxqs; i++) {
2073 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2074 num_desc, desc_sz, comp_sz, sg_desc_sz,
2075 lif->kern_pid, &lif->txqcqs[i]);
2076 if (err)
2077 goto err_out;
2078
2079 if (flags & IONIC_QCQ_F_INTR) {
2080 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2081 lif->txqcqs[i]->intr.index,
2082 lif->tx_coalesce_hw);
2083 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2084 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2085 }
2086
2087 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2088 }
2089
2090 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
2091
2092 if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
2093 flags |= IONIC_QCQ_F_CMB_RINGS;
2094
2095 num_desc = lif->nrxq_descs;
2096 desc_sz = sizeof(struct ionic_rxq_desc);
2097 comp_sz = sizeof(struct ionic_rxq_comp);
2098 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2099
2100 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2101 comp_sz *= 2;
2102
2103 for (i = 0; i < lif->nxqs; i++) {
2104 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2105 num_desc, desc_sz, comp_sz, sg_desc_sz,
2106 lif->kern_pid, &lif->rxqcqs[i]);
2107 if (err)
2108 goto err_out;
2109
2110 lif->rxqcqs[i]->q.features = lif->rxq_features;
2111
2112 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2113 lif->rxqcqs[i]->intr.index,
2114 lif->rx_coalesce_hw);
2115 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
2116 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
2117
2118 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2119 ionic_link_qcq_interrupts(lif->rxqcqs[i],
2120 lif->txqcqs[i]);
2121
2122 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2123 }
2124
2125 return 0;
2126
2127 err_out:
2128 ionic_txrx_free(lif);
2129
2130 return err;
2131 }
2132
ionic_txrx_init(struct ionic_lif * lif)2133 static int ionic_txrx_init(struct ionic_lif *lif)
2134 {
2135 unsigned int i;
2136 int err;
2137
2138 for (i = 0; i < lif->nxqs; i++) {
2139 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
2140 if (err)
2141 goto err_out;
2142
2143 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
2144 if (err) {
2145 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2146 goto err_out;
2147 }
2148 }
2149
2150 if (lif->netdev->features & NETIF_F_RXHASH)
2151 ionic_lif_rss_init(lif);
2152
2153 ionic_lif_rx_mode(lif);
2154
2155 return 0;
2156
2157 err_out:
2158 while (i--) {
2159 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2160 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
2161 }
2162
2163 return err;
2164 }
2165
ionic_txrx_enable(struct ionic_lif * lif)2166 static int ionic_txrx_enable(struct ionic_lif *lif)
2167 {
2168 int derr = 0;
2169 int i, err;
2170
2171 for (i = 0; i < lif->nxqs; i++) {
2172 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
2173 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
2174 err = -ENXIO;
2175 goto err_out;
2176 }
2177
2178 ionic_rx_fill(&lif->rxqcqs[i]->q);
2179 err = ionic_qcq_enable(lif->rxqcqs[i]);
2180 if (err)
2181 goto err_out;
2182
2183 err = ionic_qcq_enable(lif->txqcqs[i]);
2184 if (err) {
2185 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
2186 goto err_out;
2187 }
2188 }
2189
2190 if (lif->hwstamp_rxq) {
2191 ionic_rx_fill(&lif->hwstamp_rxq->q);
2192 err = ionic_qcq_enable(lif->hwstamp_rxq);
2193 if (err)
2194 goto err_out_hwstamp_rx;
2195 }
2196
2197 if (lif->hwstamp_txq) {
2198 err = ionic_qcq_enable(lif->hwstamp_txq);
2199 if (err)
2200 goto err_out_hwstamp_tx;
2201 }
2202
2203 return 0;
2204
2205 err_out_hwstamp_tx:
2206 if (lif->hwstamp_rxq)
2207 derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr);
2208 err_out_hwstamp_rx:
2209 i = lif->nxqs;
2210 err_out:
2211 while (i--) {
2212 derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr);
2213 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
2214 }
2215
2216 return err;
2217 }
2218
ionic_start_queues(struct ionic_lif * lif)2219 static int ionic_start_queues(struct ionic_lif *lif)
2220 {
2221 int err;
2222
2223 if (test_bit(IONIC_LIF_F_BROKEN, lif->state))
2224 return -EIO;
2225
2226 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2227 return -EBUSY;
2228
2229 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
2230 return 0;
2231
2232 err = ionic_txrx_enable(lif);
2233 if (err) {
2234 clear_bit(IONIC_LIF_F_UP, lif->state);
2235 return err;
2236 }
2237 netif_tx_wake_all_queues(lif->netdev);
2238
2239 return 0;
2240 }
2241
ionic_open(struct net_device * netdev)2242 static int ionic_open(struct net_device *netdev)
2243 {
2244 struct ionic_lif *lif = netdev_priv(netdev);
2245 int err;
2246
2247 /* If recovering from a broken state, clear the bit and we'll try again */
2248 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
2249 netdev_info(netdev, "clearing broken state\n");
2250
2251 mutex_lock(&lif->queue_lock);
2252
2253 err = ionic_txrx_alloc(lif);
2254 if (err)
2255 goto err_unlock;
2256
2257 err = ionic_txrx_init(lif);
2258 if (err)
2259 goto err_txrx_free;
2260
2261 err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
2262 if (err)
2263 goto err_txrx_deinit;
2264
2265 err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
2266 if (err)
2267 goto err_txrx_deinit;
2268
2269 /* don't start the queues until we have link */
2270 if (netif_carrier_ok(netdev)) {
2271 err = ionic_start_queues(lif);
2272 if (err)
2273 goto err_txrx_deinit;
2274 }
2275
2276 /* If hardware timestamping is enabled, but the queues were freed by
2277 * ionic_stop, those need to be reallocated and initialized, too.
2278 */
2279 ionic_lif_hwstamp_recreate_queues(lif);
2280
2281 mutex_unlock(&lif->queue_lock);
2282
2283 return 0;
2284
2285 err_txrx_deinit:
2286 ionic_txrx_deinit(lif);
2287 err_txrx_free:
2288 ionic_txrx_free(lif);
2289 err_unlock:
2290 mutex_unlock(&lif->queue_lock);
2291 return err;
2292 }
2293
ionic_stop_queues(struct ionic_lif * lif)2294 static void ionic_stop_queues(struct ionic_lif *lif)
2295 {
2296 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
2297 return;
2298
2299 netif_tx_disable(lif->netdev);
2300 ionic_txrx_disable(lif);
2301 }
2302
ionic_stop(struct net_device * netdev)2303 static int ionic_stop(struct net_device *netdev)
2304 {
2305 struct ionic_lif *lif = netdev_priv(netdev);
2306
2307 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2308 return 0;
2309
2310 mutex_lock(&lif->queue_lock);
2311 ionic_stop_queues(lif);
2312 ionic_txrx_deinit(lif);
2313 ionic_txrx_free(lif);
2314 mutex_unlock(&lif->queue_lock);
2315
2316 return 0;
2317 }
2318
ionic_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)2319 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2320 {
2321 struct ionic_lif *lif = netdev_priv(netdev);
2322
2323 switch (cmd) {
2324 case SIOCSHWTSTAMP:
2325 return ionic_lif_hwstamp_set(lif, ifr);
2326 case SIOCGHWTSTAMP:
2327 return ionic_lif_hwstamp_get(lif, ifr);
2328 default:
2329 return -EOPNOTSUPP;
2330 }
2331 }
2332
ionic_get_fw_vf_config(struct ionic * ionic,int vf,struct ionic_vf * vfdata)2333 static int ionic_get_fw_vf_config(struct ionic *ionic, int vf, struct ionic_vf *vfdata)
2334 {
2335 struct ionic_vf_getattr_comp comp = { 0 };
2336 int err;
2337 u8 attr;
2338
2339 attr = IONIC_VF_ATTR_VLAN;
2340 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2341 if (err && comp.status != IONIC_RC_ENOSUPP)
2342 goto err_out;
2343 if (!err)
2344 vfdata->vlanid = comp.vlanid;
2345
2346 attr = IONIC_VF_ATTR_SPOOFCHK;
2347 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2348 if (err && comp.status != IONIC_RC_ENOSUPP)
2349 goto err_out;
2350 if (!err)
2351 vfdata->spoofchk = comp.spoofchk;
2352
2353 attr = IONIC_VF_ATTR_LINKSTATE;
2354 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2355 if (err && comp.status != IONIC_RC_ENOSUPP)
2356 goto err_out;
2357 if (!err) {
2358 switch (comp.linkstate) {
2359 case IONIC_VF_LINK_STATUS_UP:
2360 vfdata->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2361 break;
2362 case IONIC_VF_LINK_STATUS_DOWN:
2363 vfdata->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2364 break;
2365 case IONIC_VF_LINK_STATUS_AUTO:
2366 vfdata->linkstate = IFLA_VF_LINK_STATE_AUTO;
2367 break;
2368 default:
2369 dev_warn(ionic->dev, "Unexpected link state %u\n", comp.linkstate);
2370 break;
2371 }
2372 }
2373
2374 attr = IONIC_VF_ATTR_RATE;
2375 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2376 if (err && comp.status != IONIC_RC_ENOSUPP)
2377 goto err_out;
2378 if (!err)
2379 vfdata->maxrate = comp.maxrate;
2380
2381 attr = IONIC_VF_ATTR_TRUST;
2382 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2383 if (err && comp.status != IONIC_RC_ENOSUPP)
2384 goto err_out;
2385 if (!err)
2386 vfdata->trusted = comp.trust;
2387
2388 attr = IONIC_VF_ATTR_MAC;
2389 err = ionic_dev_cmd_vf_getattr(ionic, vf, attr, &comp);
2390 if (err && comp.status != IONIC_RC_ENOSUPP)
2391 goto err_out;
2392 if (!err)
2393 ether_addr_copy(vfdata->macaddr, comp.macaddr);
2394
2395 err_out:
2396 if (err)
2397 dev_err(ionic->dev, "Failed to get %s for VF %d\n",
2398 ionic_vf_attr_to_str(attr), vf);
2399
2400 return err;
2401 }
2402
ionic_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * ivf)2403 static int ionic_get_vf_config(struct net_device *netdev,
2404 int vf, struct ifla_vf_info *ivf)
2405 {
2406 struct ionic_lif *lif = netdev_priv(netdev);
2407 struct ionic *ionic = lif->ionic;
2408 struct ionic_vf vfdata = { 0 };
2409 int ret = 0;
2410
2411 if (!netif_device_present(netdev))
2412 return -EBUSY;
2413
2414 down_read(&ionic->vf_op_lock);
2415
2416 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2417 ret = -EINVAL;
2418 } else {
2419 ivf->vf = vf;
2420 ivf->qos = 0;
2421
2422 ret = ionic_get_fw_vf_config(ionic, vf, &vfdata);
2423 if (!ret) {
2424 ivf->vlan = le16_to_cpu(vfdata.vlanid);
2425 ivf->spoofchk = vfdata.spoofchk;
2426 ivf->linkstate = vfdata.linkstate;
2427 ivf->max_tx_rate = le32_to_cpu(vfdata.maxrate);
2428 ivf->trusted = vfdata.trusted;
2429 ether_addr_copy(ivf->mac, vfdata.macaddr);
2430 }
2431 }
2432
2433 up_read(&ionic->vf_op_lock);
2434 return ret;
2435 }
2436
ionic_get_vf_stats(struct net_device * netdev,int vf,struct ifla_vf_stats * vf_stats)2437 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
2438 struct ifla_vf_stats *vf_stats)
2439 {
2440 struct ionic_lif *lif = netdev_priv(netdev);
2441 struct ionic *ionic = lif->ionic;
2442 struct ionic_lif_stats *vs;
2443 int ret = 0;
2444
2445 if (!netif_device_present(netdev))
2446 return -EBUSY;
2447
2448 down_read(&ionic->vf_op_lock);
2449
2450 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2451 ret = -EINVAL;
2452 } else {
2453 memset(vf_stats, 0, sizeof(*vf_stats));
2454 vs = &ionic->vfs[vf].stats;
2455
2456 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
2457 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
2458 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
2459 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
2460 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
2461 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
2462 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
2463 le64_to_cpu(vs->rx_mcast_drop_packets) +
2464 le64_to_cpu(vs->rx_bcast_drop_packets);
2465 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
2466 le64_to_cpu(vs->tx_mcast_drop_packets) +
2467 le64_to_cpu(vs->tx_bcast_drop_packets);
2468 }
2469
2470 up_read(&ionic->vf_op_lock);
2471 return ret;
2472 }
2473
ionic_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)2474 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2475 {
2476 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC };
2477 struct ionic_lif *lif = netdev_priv(netdev);
2478 struct ionic *ionic = lif->ionic;
2479 int ret;
2480
2481 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
2482 return -EINVAL;
2483
2484 if (!netif_device_present(netdev))
2485 return -EBUSY;
2486
2487 down_write(&ionic->vf_op_lock);
2488
2489 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2490 ret = -EINVAL;
2491 } else {
2492 ether_addr_copy(vfc.macaddr, mac);
2493 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
2494 __func__, vf, vfc.macaddr);
2495
2496 ret = ionic_set_vf_config(ionic, vf, &vfc);
2497 if (!ret)
2498 ether_addr_copy(ionic->vfs[vf].macaddr, mac);
2499 }
2500
2501 up_write(&ionic->vf_op_lock);
2502 return ret;
2503 }
2504
ionic_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 proto)2505 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2506 u8 qos, __be16 proto)
2507 {
2508 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN };
2509 struct ionic_lif *lif = netdev_priv(netdev);
2510 struct ionic *ionic = lif->ionic;
2511 int ret;
2512
2513 /* until someday when we support qos */
2514 if (qos)
2515 return -EINVAL;
2516
2517 if (vlan > 4095)
2518 return -EINVAL;
2519
2520 if (proto != htons(ETH_P_8021Q))
2521 return -EPROTONOSUPPORT;
2522
2523 if (!netif_device_present(netdev))
2524 return -EBUSY;
2525
2526 down_write(&ionic->vf_op_lock);
2527
2528 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2529 ret = -EINVAL;
2530 } else {
2531 vfc.vlanid = cpu_to_le16(vlan);
2532 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
2533 __func__, vf, le16_to_cpu(vfc.vlanid));
2534
2535 ret = ionic_set_vf_config(ionic, vf, &vfc);
2536 if (!ret)
2537 ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
2538 }
2539
2540 up_write(&ionic->vf_op_lock);
2541 return ret;
2542 }
2543
ionic_set_vf_rate(struct net_device * netdev,int vf,int tx_min,int tx_max)2544 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2545 int tx_min, int tx_max)
2546 {
2547 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE };
2548 struct ionic_lif *lif = netdev_priv(netdev);
2549 struct ionic *ionic = lif->ionic;
2550 int ret;
2551
2552 /* setting the min just seems silly */
2553 if (tx_min)
2554 return -EINVAL;
2555
2556 if (!netif_device_present(netdev))
2557 return -EBUSY;
2558
2559 down_write(&ionic->vf_op_lock);
2560
2561 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2562 ret = -EINVAL;
2563 } else {
2564 vfc.maxrate = cpu_to_le32(tx_max);
2565 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
2566 __func__, vf, le32_to_cpu(vfc.maxrate));
2567
2568 ret = ionic_set_vf_config(ionic, vf, &vfc);
2569 if (!ret)
2570 ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
2571 }
2572
2573 up_write(&ionic->vf_op_lock);
2574 return ret;
2575 }
2576
ionic_set_vf_spoofchk(struct net_device * netdev,int vf,bool set)2577 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2578 {
2579 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK };
2580 struct ionic_lif *lif = netdev_priv(netdev);
2581 struct ionic *ionic = lif->ionic;
2582 int ret;
2583
2584 if (!netif_device_present(netdev))
2585 return -EBUSY;
2586
2587 down_write(&ionic->vf_op_lock);
2588
2589 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2590 ret = -EINVAL;
2591 } else {
2592 vfc.spoofchk = set;
2593 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
2594 __func__, vf, vfc.spoofchk);
2595
2596 ret = ionic_set_vf_config(ionic, vf, &vfc);
2597 if (!ret)
2598 ionic->vfs[vf].spoofchk = set;
2599 }
2600
2601 up_write(&ionic->vf_op_lock);
2602 return ret;
2603 }
2604
ionic_set_vf_trust(struct net_device * netdev,int vf,bool set)2605 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2606 {
2607 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST };
2608 struct ionic_lif *lif = netdev_priv(netdev);
2609 struct ionic *ionic = lif->ionic;
2610 int ret;
2611
2612 if (!netif_device_present(netdev))
2613 return -EBUSY;
2614
2615 down_write(&ionic->vf_op_lock);
2616
2617 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2618 ret = -EINVAL;
2619 } else {
2620 vfc.trust = set;
2621 dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
2622 __func__, vf, vfc.trust);
2623
2624 ret = ionic_set_vf_config(ionic, vf, &vfc);
2625 if (!ret)
2626 ionic->vfs[vf].trusted = set;
2627 }
2628
2629 up_write(&ionic->vf_op_lock);
2630 return ret;
2631 }
2632
ionic_set_vf_link_state(struct net_device * netdev,int vf,int set)2633 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2634 {
2635 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE };
2636 struct ionic_lif *lif = netdev_priv(netdev);
2637 struct ionic *ionic = lif->ionic;
2638 u8 vfls;
2639 int ret;
2640
2641 switch (set) {
2642 case IFLA_VF_LINK_STATE_ENABLE:
2643 vfls = IONIC_VF_LINK_STATUS_UP;
2644 break;
2645 case IFLA_VF_LINK_STATE_DISABLE:
2646 vfls = IONIC_VF_LINK_STATUS_DOWN;
2647 break;
2648 case IFLA_VF_LINK_STATE_AUTO:
2649 vfls = IONIC_VF_LINK_STATUS_AUTO;
2650 break;
2651 default:
2652 return -EINVAL;
2653 }
2654
2655 if (!netif_device_present(netdev))
2656 return -EBUSY;
2657
2658 down_write(&ionic->vf_op_lock);
2659
2660 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2661 ret = -EINVAL;
2662 } else {
2663 vfc.linkstate = vfls;
2664 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
2665 __func__, vf, vfc.linkstate);
2666
2667 ret = ionic_set_vf_config(ionic, vf, &vfc);
2668 if (!ret)
2669 ionic->vfs[vf].linkstate = set;
2670 }
2671
2672 up_write(&ionic->vf_op_lock);
2673 return ret;
2674 }
2675
ionic_vf_attr_replay(struct ionic_lif * lif)2676 static void ionic_vf_attr_replay(struct ionic_lif *lif)
2677 {
2678 struct ionic_vf_setattr_cmd vfc = { };
2679 struct ionic *ionic = lif->ionic;
2680 struct ionic_vf *v;
2681 int i;
2682
2683 if (!ionic->vfs)
2684 return;
2685
2686 down_read(&ionic->vf_op_lock);
2687
2688 for (i = 0; i < ionic->num_vfs; i++) {
2689 v = &ionic->vfs[i];
2690
2691 if (v->stats_pa) {
2692 vfc.attr = IONIC_VF_ATTR_STATSADDR;
2693 vfc.stats_pa = cpu_to_le64(v->stats_pa);
2694 ionic_set_vf_config(ionic, i, &vfc);
2695 vfc.stats_pa = 0;
2696 }
2697
2698 if (!is_zero_ether_addr(v->macaddr)) {
2699 vfc.attr = IONIC_VF_ATTR_MAC;
2700 ether_addr_copy(vfc.macaddr, v->macaddr);
2701 ionic_set_vf_config(ionic, i, &vfc);
2702 eth_zero_addr(vfc.macaddr);
2703 }
2704
2705 if (v->vlanid) {
2706 vfc.attr = IONIC_VF_ATTR_VLAN;
2707 vfc.vlanid = v->vlanid;
2708 ionic_set_vf_config(ionic, i, &vfc);
2709 vfc.vlanid = 0;
2710 }
2711
2712 if (v->maxrate) {
2713 vfc.attr = IONIC_VF_ATTR_RATE;
2714 vfc.maxrate = v->maxrate;
2715 ionic_set_vf_config(ionic, i, &vfc);
2716 vfc.maxrate = 0;
2717 }
2718
2719 if (v->spoofchk) {
2720 vfc.attr = IONIC_VF_ATTR_SPOOFCHK;
2721 vfc.spoofchk = v->spoofchk;
2722 ionic_set_vf_config(ionic, i, &vfc);
2723 vfc.spoofchk = 0;
2724 }
2725
2726 if (v->trusted) {
2727 vfc.attr = IONIC_VF_ATTR_TRUST;
2728 vfc.trust = v->trusted;
2729 ionic_set_vf_config(ionic, i, &vfc);
2730 vfc.trust = 0;
2731 }
2732
2733 if (v->linkstate) {
2734 vfc.attr = IONIC_VF_ATTR_LINKSTATE;
2735 vfc.linkstate = v->linkstate;
2736 ionic_set_vf_config(ionic, i, &vfc);
2737 vfc.linkstate = 0;
2738 }
2739 }
2740
2741 up_read(&ionic->vf_op_lock);
2742
2743 ionic_vf_start(ionic);
2744 }
2745
2746 static const struct net_device_ops ionic_netdev_ops = {
2747 .ndo_open = ionic_open,
2748 .ndo_stop = ionic_stop,
2749 .ndo_eth_ioctl = ionic_eth_ioctl,
2750 .ndo_start_xmit = ionic_start_xmit,
2751 .ndo_get_stats64 = ionic_get_stats64,
2752 .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
2753 .ndo_set_features = ionic_set_features,
2754 .ndo_set_mac_address = ionic_set_mac_address,
2755 .ndo_validate_addr = eth_validate_addr,
2756 .ndo_tx_timeout = ionic_tx_timeout,
2757 .ndo_change_mtu = ionic_change_mtu,
2758 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
2759 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
2760 .ndo_set_vf_vlan = ionic_set_vf_vlan,
2761 .ndo_set_vf_trust = ionic_set_vf_trust,
2762 .ndo_set_vf_mac = ionic_set_vf_mac,
2763 .ndo_set_vf_rate = ionic_set_vf_rate,
2764 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
2765 .ndo_get_vf_config = ionic_get_vf_config,
2766 .ndo_set_vf_link_state = ionic_set_vf_link_state,
2767 .ndo_get_vf_stats = ionic_get_vf_stats,
2768 };
2769
ionic_cmb_reconfig(struct ionic_lif * lif,struct ionic_queue_params * qparam)2770 static int ionic_cmb_reconfig(struct ionic_lif *lif,
2771 struct ionic_queue_params *qparam)
2772 {
2773 struct ionic_queue_params start_qparams;
2774 int err = 0;
2775
2776 /* When changing CMB queue parameters, we're using limited
2777 * on-device memory and don't have extra memory to use for
2778 * duplicate allocations, so we free it all first then
2779 * re-allocate with the new parameters.
2780 */
2781
2782 /* Checkpoint for possible unwind */
2783 ionic_init_queue_params(lif, &start_qparams);
2784
2785 /* Stop and free the queues */
2786 ionic_stop_queues_reconfig(lif);
2787 ionic_txrx_free(lif);
2788
2789 /* Set up new qparams */
2790 ionic_set_queue_params(lif, qparam);
2791
2792 if (netif_running(lif->netdev)) {
2793 /* Alloc and start the new configuration */
2794 err = ionic_txrx_alloc(lif);
2795 if (err) {
2796 dev_warn(lif->ionic->dev,
2797 "CMB reconfig failed, restoring values: %d\n", err);
2798
2799 /* Back out the changes */
2800 ionic_set_queue_params(lif, &start_qparams);
2801 err = ionic_txrx_alloc(lif);
2802 if (err) {
2803 dev_err(lif->ionic->dev,
2804 "CMB restore failed: %d\n", err);
2805 goto err_out;
2806 }
2807 }
2808
2809 err = ionic_start_queues_reconfig(lif);
2810 if (err) {
2811 dev_err(lif->ionic->dev,
2812 "CMB reconfig failed: %d\n", err);
2813 goto err_out;
2814 }
2815 }
2816
2817 err_out:
2818 /* This was detached in ionic_stop_queues_reconfig() */
2819 netif_device_attach(lif->netdev);
2820
2821 return err;
2822 }
2823
ionic_swap_queues(struct ionic_qcq * a,struct ionic_qcq * b)2824 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2825 {
2826 /* only swapping the queues, not the napi, flags, or other stuff */
2827 swap(a->q.features, b->q.features);
2828 swap(a->q.num_descs, b->q.num_descs);
2829 swap(a->q.desc_size, b->q.desc_size);
2830 swap(a->q.base, b->q.base);
2831 swap(a->q.base_pa, b->q.base_pa);
2832 swap(a->q.info, b->q.info);
2833 swap(a->q_base, b->q_base);
2834 swap(a->q_base_pa, b->q_base_pa);
2835 swap(a->q_size, b->q_size);
2836
2837 swap(a->q.sg_desc_size, b->q.sg_desc_size);
2838 swap(a->q.sg_base, b->q.sg_base);
2839 swap(a->q.sg_base_pa, b->q.sg_base_pa);
2840 swap(a->sg_base, b->sg_base);
2841 swap(a->sg_base_pa, b->sg_base_pa);
2842 swap(a->sg_size, b->sg_size);
2843
2844 swap(a->cq.num_descs, b->cq.num_descs);
2845 swap(a->cq.desc_size, b->cq.desc_size);
2846 swap(a->cq.base, b->cq.base);
2847 swap(a->cq.base_pa, b->cq.base_pa);
2848 swap(a->cq.info, b->cq.info);
2849 swap(a->cq_base, b->cq_base);
2850 swap(a->cq_base_pa, b->cq_base_pa);
2851 swap(a->cq_size, b->cq_size);
2852
2853 ionic_debugfs_del_qcq(a);
2854 ionic_debugfs_add_qcq(a->q.lif, a);
2855 }
2856
ionic_reconfigure_queues(struct ionic_lif * lif,struct ionic_queue_params * qparam)2857 int ionic_reconfigure_queues(struct ionic_lif *lif,
2858 struct ionic_queue_params *qparam)
2859 {
2860 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2861 struct ionic_qcq **tx_qcqs = NULL;
2862 struct ionic_qcq **rx_qcqs = NULL;
2863 unsigned int flags, i;
2864 int err = 0;
2865
2866 /* Are we changing q params while CMB is on */
2867 if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) ||
2868 (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx))
2869 return ionic_cmb_reconfig(lif, qparam);
2870
2871 /* allocate temporary qcq arrays to hold new queue structs */
2872 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2873 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
2874 sizeof(struct ionic_qcq *), GFP_KERNEL);
2875 if (!tx_qcqs) {
2876 err = -ENOMEM;
2877 goto err_out;
2878 }
2879 }
2880 if (qparam->nxqs != lif->nxqs ||
2881 qparam->nrxq_descs != lif->nrxq_descs ||
2882 qparam->rxq_features != lif->rxq_features) {
2883 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
2884 sizeof(struct ionic_qcq *), GFP_KERNEL);
2885 if (!rx_qcqs) {
2886 err = -ENOMEM;
2887 goto err_out;
2888 }
2889 }
2890
2891 /* allocate new desc_info and rings, but leave the interrupt setup
2892 * until later so as to not mess with the still-running queues
2893 */
2894 if (tx_qcqs) {
2895 num_desc = qparam->ntxq_descs;
2896 desc_sz = sizeof(struct ionic_txq_desc);
2897 comp_sz = sizeof(struct ionic_txq_comp);
2898
2899 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2900 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2901 sizeof(struct ionic_txq_sg_desc_v1))
2902 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2903 else
2904 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2905
2906 for (i = 0; i < qparam->nxqs; i++) {
2907 /* If missing, short placeholder qcq needed for swap */
2908 if (!lif->txqcqs[i]) {
2909 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2910 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2911 4, desc_sz, comp_sz, sg_desc_sz,
2912 lif->kern_pid, &lif->txqcqs[i]);
2913 if (err)
2914 goto err_out;
2915 }
2916
2917 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2918 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2919 num_desc, desc_sz, comp_sz, sg_desc_sz,
2920 lif->kern_pid, &tx_qcqs[i]);
2921 if (err)
2922 goto err_out;
2923 }
2924 }
2925
2926 if (rx_qcqs) {
2927 num_desc = qparam->nrxq_descs;
2928 desc_sz = sizeof(struct ionic_rxq_desc);
2929 comp_sz = sizeof(struct ionic_rxq_comp);
2930 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2931
2932 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2933 comp_sz *= 2;
2934
2935 for (i = 0; i < qparam->nxqs; i++) {
2936 /* If missing, short placeholder qcq needed for swap */
2937 if (!lif->rxqcqs[i]) {
2938 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
2939 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2940 4, desc_sz, comp_sz, sg_desc_sz,
2941 lif->kern_pid, &lif->rxqcqs[i]);
2942 if (err)
2943 goto err_out;
2944 }
2945
2946 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2947 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2948 num_desc, desc_sz, comp_sz, sg_desc_sz,
2949 lif->kern_pid, &rx_qcqs[i]);
2950 if (err)
2951 goto err_out;
2952
2953 rx_qcqs[i]->q.features = qparam->rxq_features;
2954 }
2955 }
2956
2957 /* stop and clean the queues */
2958 ionic_stop_queues_reconfig(lif);
2959
2960 if (qparam->nxqs != lif->nxqs) {
2961 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
2962 if (err)
2963 goto err_out_reinit_unlock;
2964 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
2965 if (err) {
2966 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
2967 goto err_out_reinit_unlock;
2968 }
2969 }
2970
2971 /* swap new desc_info and rings, keeping existing interrupt config */
2972 if (tx_qcqs) {
2973 lif->ntxq_descs = qparam->ntxq_descs;
2974 for (i = 0; i < qparam->nxqs; i++)
2975 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
2976 }
2977
2978 if (rx_qcqs) {
2979 lif->nrxq_descs = qparam->nrxq_descs;
2980 for (i = 0; i < qparam->nxqs; i++)
2981 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
2982 }
2983
2984 /* if we need to change the interrupt layout, this is the time */
2985 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
2986 qparam->nxqs != lif->nxqs) {
2987 if (qparam->intr_split) {
2988 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2989 } else {
2990 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2991 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2992 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2993 }
2994
2995 /* Clear existing interrupt assignments. We check for NULL here
2996 * because we're checking the whole array for potential qcqs, not
2997 * just those qcqs that have just been set up.
2998 */
2999 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
3000 if (lif->txqcqs[i])
3001 ionic_qcq_intr_free(lif, lif->txqcqs[i]);
3002 if (lif->rxqcqs[i])
3003 ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
3004 }
3005
3006 /* re-assign the interrupts */
3007 for (i = 0; i < qparam->nxqs; i++) {
3008 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
3009 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
3010 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
3011 lif->rxqcqs[i]->intr.index,
3012 lif->rx_coalesce_hw);
3013
3014 if (qparam->intr_split) {
3015 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
3016 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
3017 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
3018 lif->txqcqs[i]->intr.index,
3019 lif->tx_coalesce_hw);
3020 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
3021 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
3022 } else {
3023 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3024 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
3025 }
3026 }
3027 }
3028
3029 /* now we can rework the debugfs mappings */
3030 if (tx_qcqs) {
3031 for (i = 0; i < qparam->nxqs; i++) {
3032 ionic_debugfs_del_qcq(lif->txqcqs[i]);
3033 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
3034 }
3035 }
3036
3037 if (rx_qcqs) {
3038 for (i = 0; i < qparam->nxqs; i++) {
3039 ionic_debugfs_del_qcq(lif->rxqcqs[i]);
3040 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
3041 }
3042 }
3043
3044 swap(lif->nxqs, qparam->nxqs);
3045 swap(lif->rxq_features, qparam->rxq_features);
3046
3047 err_out_reinit_unlock:
3048 /* re-init the queues, but don't lose an error code */
3049 if (err)
3050 ionic_start_queues_reconfig(lif);
3051 else
3052 err = ionic_start_queues_reconfig(lif);
3053
3054 err_out:
3055 /* free old allocs without cleaning intr */
3056 for (i = 0; i < qparam->nxqs; i++) {
3057 if (tx_qcqs && tx_qcqs[i]) {
3058 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3059 ionic_qcq_free(lif, tx_qcqs[i]);
3060 devm_kfree(lif->ionic->dev, tx_qcqs[i]);
3061 tx_qcqs[i] = NULL;
3062 }
3063 if (rx_qcqs && rx_qcqs[i]) {
3064 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3065 ionic_qcq_free(lif, rx_qcqs[i]);
3066 devm_kfree(lif->ionic->dev, rx_qcqs[i]);
3067 rx_qcqs[i] = NULL;
3068 }
3069 }
3070
3071 /* free q array */
3072 if (rx_qcqs) {
3073 devm_kfree(lif->ionic->dev, rx_qcqs);
3074 rx_qcqs = NULL;
3075 }
3076 if (tx_qcqs) {
3077 devm_kfree(lif->ionic->dev, tx_qcqs);
3078 tx_qcqs = NULL;
3079 }
3080
3081 /* clean the unused dma and info allocations when new set is smaller
3082 * than the full array, but leave the qcq shells in place
3083 */
3084 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
3085 if (lif->txqcqs && lif->txqcqs[i]) {
3086 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3087 ionic_qcq_free(lif, lif->txqcqs[i]);
3088 }
3089
3090 if (lif->rxqcqs && lif->rxqcqs[i]) {
3091 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3092 ionic_qcq_free(lif, lif->rxqcqs[i]);
3093 }
3094 }
3095
3096 if (err)
3097 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err);
3098
3099 return err;
3100 }
3101
ionic_lif_alloc(struct ionic * ionic)3102 int ionic_lif_alloc(struct ionic *ionic)
3103 {
3104 struct device *dev = ionic->dev;
3105 union ionic_lif_identity *lid;
3106 struct net_device *netdev;
3107 struct ionic_lif *lif;
3108 int tbl_sz;
3109 int err;
3110
3111 lid = kzalloc(sizeof(*lid), GFP_KERNEL);
3112 if (!lid)
3113 return -ENOMEM;
3114
3115 netdev = alloc_etherdev_mqs(sizeof(*lif),
3116 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
3117 if (!netdev) {
3118 dev_err(dev, "Cannot allocate netdev, aborting\n");
3119 err = -ENOMEM;
3120 goto err_out_free_lid;
3121 }
3122
3123 SET_NETDEV_DEV(netdev, dev);
3124
3125 lif = netdev_priv(netdev);
3126 lif->netdev = netdev;
3127 ionic->lif = lif;
3128 netdev->netdev_ops = &ionic_netdev_ops;
3129 ionic_ethtool_set_ops(netdev);
3130
3131 netdev->watchdog_timeo = 2 * HZ;
3132 netif_carrier_off(netdev);
3133
3134 lif->identity = lid;
3135 lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
3136 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
3137 if (err) {
3138 dev_err(ionic->dev, "Cannot identify type %d: %d\n",
3139 lif->lif_type, err);
3140 goto err_out_free_netdev;
3141 }
3142 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
3143 le32_to_cpu(lif->identity->eth.min_frame_size));
3144 lif->netdev->max_mtu =
3145 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
3146
3147 lif->neqs = ionic->neqs_per_lif;
3148 lif->nxqs = ionic->ntxqs_per_lif;
3149
3150 lif->ionic = ionic;
3151 lif->index = 0;
3152
3153 if (is_kdump_kernel()) {
3154 lif->ntxq_descs = IONIC_MIN_TXRX_DESC;
3155 lif->nrxq_descs = IONIC_MIN_TXRX_DESC;
3156 } else {
3157 lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
3158 lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
3159 }
3160
3161 /* Convert the default coalesce value to actual hw resolution */
3162 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
3163 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
3164 lif->rx_coalesce_usecs);
3165 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
3166 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
3167 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
3168 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
3169
3170 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
3171
3172 mutex_init(&lif->queue_lock);
3173 mutex_init(&lif->config_lock);
3174
3175 spin_lock_init(&lif->adminq_lock);
3176
3177 spin_lock_init(&lif->deferred.lock);
3178 INIT_LIST_HEAD(&lif->deferred.list);
3179 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
3180
3181 /* allocate lif info */
3182 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
3183 lif->info = dma_alloc_coherent(dev, lif->info_sz,
3184 &lif->info_pa, GFP_KERNEL);
3185 if (!lif->info) {
3186 dev_err(dev, "Failed to allocate lif info, aborting\n");
3187 err = -ENOMEM;
3188 goto err_out_free_mutex;
3189 }
3190
3191 ionic_debugfs_add_lif(lif);
3192
3193 /* allocate control queues and txrx queue arrays */
3194 ionic_lif_queue_identify(lif);
3195 err = ionic_qcqs_alloc(lif);
3196 if (err)
3197 goto err_out_free_lif_info;
3198
3199 /* allocate rss indirection table */
3200 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
3201 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
3202 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
3203 &lif->rss_ind_tbl_pa,
3204 GFP_KERNEL);
3205
3206 if (!lif->rss_ind_tbl) {
3207 err = -ENOMEM;
3208 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
3209 goto err_out_free_qcqs;
3210 }
3211 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
3212
3213 ionic_lif_alloc_phc(lif);
3214
3215 return 0;
3216
3217 err_out_free_qcqs:
3218 ionic_qcqs_free(lif);
3219 err_out_free_lif_info:
3220 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3221 lif->info = NULL;
3222 lif->info_pa = 0;
3223 err_out_free_mutex:
3224 mutex_destroy(&lif->config_lock);
3225 mutex_destroy(&lif->queue_lock);
3226 err_out_free_netdev:
3227 free_netdev(lif->netdev);
3228 lif = NULL;
3229 err_out_free_lid:
3230 kfree(lid);
3231
3232 return err;
3233 }
3234
ionic_lif_reset(struct ionic_lif * lif)3235 static void ionic_lif_reset(struct ionic_lif *lif)
3236 {
3237 struct ionic_dev *idev = &lif->ionic->idev;
3238
3239 if (!ionic_is_fw_running(idev))
3240 return;
3241
3242 mutex_lock(&lif->ionic->dev_cmd_lock);
3243 ionic_dev_cmd_lif_reset(idev, lif->index);
3244 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3245 mutex_unlock(&lif->ionic->dev_cmd_lock);
3246 }
3247
ionic_lif_handle_fw_down(struct ionic_lif * lif)3248 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
3249 {
3250 struct ionic *ionic = lif->ionic;
3251
3252 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
3253 return;
3254
3255 dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
3256
3257 netif_device_detach(lif->netdev);
3258
3259 mutex_lock(&lif->queue_lock);
3260 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
3261 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
3262 ionic_stop_queues(lif);
3263 }
3264
3265 if (netif_running(lif->netdev)) {
3266 ionic_txrx_deinit(lif);
3267 ionic_txrx_free(lif);
3268 }
3269 ionic_lif_deinit(lif);
3270 ionic_reset(ionic);
3271 ionic_qcqs_free(lif);
3272
3273 mutex_unlock(&lif->queue_lock);
3274
3275 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
3276 dev_info(ionic->dev, "FW Down: LIFs stopped\n");
3277 }
3278
ionic_restart_lif(struct ionic_lif * lif)3279 int ionic_restart_lif(struct ionic_lif *lif)
3280 {
3281 struct ionic *ionic = lif->ionic;
3282 int err;
3283
3284 mutex_lock(&lif->queue_lock);
3285
3286 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
3287 dev_info(ionic->dev, "FW Up: clearing broken state\n");
3288
3289 err = ionic_qcqs_alloc(lif);
3290 if (err)
3291 goto err_unlock;
3292
3293 err = ionic_lif_init(lif);
3294 if (err)
3295 goto err_qcqs_free;
3296
3297 ionic_vf_attr_replay(lif);
3298
3299 if (lif->registered)
3300 ionic_lif_set_netdev_info(lif);
3301
3302 ionic_rx_filter_replay(lif);
3303
3304 if (netif_running(lif->netdev)) {
3305 err = ionic_txrx_alloc(lif);
3306 if (err)
3307 goto err_lifs_deinit;
3308
3309 err = ionic_txrx_init(lif);
3310 if (err)
3311 goto err_txrx_free;
3312 }
3313
3314 mutex_unlock(&lif->queue_lock);
3315
3316 clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
3317 ionic_link_status_check_request(lif, CAN_SLEEP);
3318 netif_device_attach(lif->netdev);
3319
3320 return 0;
3321
3322 err_txrx_free:
3323 ionic_txrx_free(lif);
3324 err_lifs_deinit:
3325 ionic_lif_deinit(lif);
3326 err_qcqs_free:
3327 ionic_qcqs_free(lif);
3328 err_unlock:
3329 mutex_unlock(&lif->queue_lock);
3330
3331 return err;
3332 }
3333
ionic_lif_handle_fw_up(struct ionic_lif * lif)3334 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
3335 {
3336 struct ionic *ionic = lif->ionic;
3337 int err;
3338
3339 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3340 return;
3341
3342 dev_info(ionic->dev, "FW Up: restarting LIFs\n");
3343
3344 /* This is a little different from what happens at
3345 * probe time because the LIF already exists so we
3346 * just need to reanimate it.
3347 */
3348 ionic_init_devinfo(ionic);
3349 err = ionic_identify(ionic);
3350 if (err)
3351 goto err_out;
3352 err = ionic_port_identify(ionic);
3353 if (err)
3354 goto err_out;
3355 err = ionic_port_init(ionic);
3356 if (err)
3357 goto err_out;
3358
3359 err = ionic_restart_lif(lif);
3360 if (err)
3361 goto err_out;
3362
3363 dev_info(ionic->dev, "FW Up: LIFs restarted\n");
3364
3365 /* restore the hardware timestamping queues */
3366 ionic_lif_hwstamp_replay(lif);
3367
3368 return;
3369
3370 err_out:
3371 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
3372 }
3373
ionic_lif_free(struct ionic_lif * lif)3374 void ionic_lif_free(struct ionic_lif *lif)
3375 {
3376 struct device *dev = lif->ionic->dev;
3377
3378 ionic_lif_free_phc(lif);
3379
3380 /* free rss indirection table */
3381 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
3382 lif->rss_ind_tbl_pa);
3383 lif->rss_ind_tbl = NULL;
3384 lif->rss_ind_tbl_pa = 0;
3385
3386 /* free queues */
3387 ionic_qcqs_free(lif);
3388 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3389 ionic_lif_reset(lif);
3390
3391 /* free lif info */
3392 kfree(lif->identity);
3393 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3394 lif->info = NULL;
3395 lif->info_pa = 0;
3396
3397 /* unmap doorbell page */
3398 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3399 lif->kern_dbpage = NULL;
3400
3401 mutex_destroy(&lif->config_lock);
3402 mutex_destroy(&lif->queue_lock);
3403
3404 /* free netdev & lif */
3405 ionic_debugfs_del_lif(lif);
3406 free_netdev(lif->netdev);
3407 }
3408
ionic_lif_deinit(struct ionic_lif * lif)3409 void ionic_lif_deinit(struct ionic_lif *lif)
3410 {
3411 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
3412 return;
3413
3414 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3415 cancel_work_sync(&lif->deferred.work);
3416 cancel_work_sync(&lif->tx_timeout_work);
3417 ionic_rx_filters_deinit(lif);
3418 if (lif->netdev->features & NETIF_F_RXHASH)
3419 ionic_lif_rss_deinit(lif);
3420 }
3421
3422 napi_disable(&lif->adminqcq->napi);
3423 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3424 ionic_lif_qcq_deinit(lif, lif->adminqcq);
3425
3426 ionic_lif_reset(lif);
3427 }
3428
ionic_lif_adminq_init(struct ionic_lif * lif)3429 static int ionic_lif_adminq_init(struct ionic_lif *lif)
3430 {
3431 struct device *dev = lif->ionic->dev;
3432 struct ionic_q_init_comp comp;
3433 struct ionic_dev *idev;
3434 struct ionic_qcq *qcq;
3435 struct ionic_queue *q;
3436 int err;
3437
3438 idev = &lif->ionic->idev;
3439 qcq = lif->adminqcq;
3440 q = &qcq->q;
3441
3442 mutex_lock(&lif->ionic->dev_cmd_lock);
3443 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
3444 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3445 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3446 mutex_unlock(&lif->ionic->dev_cmd_lock);
3447 if (err) {
3448 netdev_err(lif->netdev, "adminq init failed %d\n", err);
3449 return err;
3450 }
3451
3452 q->hw_type = comp.hw_type;
3453 q->hw_index = le32_to_cpu(comp.hw_index);
3454 q->dbval = IONIC_DBELL_QID(q->hw_index);
3455
3456 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
3457 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
3458
3459 q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
3460 q->dbell_jiffies = jiffies;
3461
3462 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
3463
3464 qcq->napi_qcq = qcq;
3465 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
3466
3467 napi_enable(&qcq->napi);
3468
3469 if (qcq->flags & IONIC_QCQ_F_INTR) {
3470 irq_set_affinity_hint(qcq->intr.vector,
3471 &qcq->intr.affinity_mask);
3472 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
3473 IONIC_INTR_MASK_CLEAR);
3474 }
3475
3476 qcq->flags |= IONIC_QCQ_F_INITED;
3477
3478 return 0;
3479 }
3480
ionic_lif_notifyq_init(struct ionic_lif * lif)3481 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
3482 {
3483 struct ionic_qcq *qcq = lif->notifyqcq;
3484 struct device *dev = lif->ionic->dev;
3485 struct ionic_queue *q = &qcq->q;
3486 int err;
3487
3488 struct ionic_admin_ctx ctx = {
3489 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3490 .cmd.q_init = {
3491 .opcode = IONIC_CMD_Q_INIT,
3492 .lif_index = cpu_to_le16(lif->index),
3493 .type = q->type,
3494 .ver = lif->qtype_info[q->type].version,
3495 .index = cpu_to_le32(q->index),
3496 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
3497 IONIC_QINIT_F_ENA),
3498 .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
3499 .pid = cpu_to_le16(q->pid),
3500 .ring_size = ilog2(q->num_descs),
3501 .ring_base = cpu_to_le64(q->base_pa),
3502 }
3503 };
3504
3505 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
3506 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
3507 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
3508 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
3509
3510 err = ionic_adminq_post_wait(lif, &ctx);
3511 if (err)
3512 return err;
3513
3514 lif->last_eid = 0;
3515 q->hw_type = ctx.comp.q_init.hw_type;
3516 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
3517 q->dbval = IONIC_DBELL_QID(q->hw_index);
3518
3519 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
3520 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
3521
3522 /* preset the callback info */
3523 q->info[0].cb_arg = lif;
3524
3525 qcq->flags |= IONIC_QCQ_F_INITED;
3526
3527 return 0;
3528 }
3529
ionic_station_set(struct ionic_lif * lif)3530 static int ionic_station_set(struct ionic_lif *lif)
3531 {
3532 struct net_device *netdev = lif->netdev;
3533 struct ionic_admin_ctx ctx = {
3534 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3535 .cmd.lif_getattr = {
3536 .opcode = IONIC_CMD_LIF_GETATTR,
3537 .index = cpu_to_le16(lif->index),
3538 .attr = IONIC_LIF_ATTR_MAC,
3539 },
3540 };
3541 u8 mac_address[ETH_ALEN];
3542 struct sockaddr addr;
3543 int err;
3544
3545 err = ionic_adminq_post_wait(lif, &ctx);
3546 if (err)
3547 return err;
3548 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
3549 ctx.comp.lif_getattr.mac);
3550 ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
3551
3552 if (is_zero_ether_addr(mac_address)) {
3553 eth_hw_addr_random(netdev);
3554 netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
3555 ether_addr_copy(mac_address, netdev->dev_addr);
3556
3557 err = ionic_program_mac(lif, mac_address);
3558 if (err < 0)
3559 return err;
3560
3561 if (err > 0) {
3562 netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
3563 __func__);
3564 return 0;
3565 }
3566 }
3567
3568 if (!is_zero_ether_addr(netdev->dev_addr)) {
3569 /* If the netdev mac is non-zero and doesn't match the default
3570 * device address, it was set by something earlier and we're
3571 * likely here again after a fw-upgrade reset. We need to be
3572 * sure the netdev mac is in our filter list.
3573 */
3574 if (!ether_addr_equal(mac_address, netdev->dev_addr))
3575 ionic_lif_addr_add(lif, netdev->dev_addr);
3576 } else {
3577 /* Update the netdev mac with the device's mac */
3578 ether_addr_copy(addr.sa_data, mac_address);
3579 addr.sa_family = AF_INET;
3580 err = eth_prepare_mac_addr_change(netdev, &addr);
3581 if (err) {
3582 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
3583 addr.sa_data, err);
3584 return 0;
3585 }
3586
3587 eth_commit_mac_addr_change(netdev, &addr);
3588 }
3589
3590 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
3591 netdev->dev_addr);
3592 ionic_lif_addr_add(lif, netdev->dev_addr);
3593
3594 return 0;
3595 }
3596
ionic_lif_init(struct ionic_lif * lif)3597 int ionic_lif_init(struct ionic_lif *lif)
3598 {
3599 struct ionic_dev *idev = &lif->ionic->idev;
3600 struct device *dev = lif->ionic->dev;
3601 struct ionic_lif_init_comp comp;
3602 int dbpage_num;
3603 int err;
3604
3605 mutex_lock(&lif->ionic->dev_cmd_lock);
3606 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
3607 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3608 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3609 mutex_unlock(&lif->ionic->dev_cmd_lock);
3610 if (err)
3611 return err;
3612
3613 lif->hw_index = le16_to_cpu(comp.hw_index);
3614
3615 /* now that we have the hw_index we can figure out our doorbell page */
3616 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
3617 if (!lif->dbid_count) {
3618 dev_err(dev, "No doorbell pages, aborting\n");
3619 return -EINVAL;
3620 }
3621
3622 lif->kern_pid = 0;
3623 dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
3624 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
3625 if (!lif->kern_dbpage) {
3626 dev_err(dev, "Cannot map dbpage, aborting\n");
3627 return -ENOMEM;
3628 }
3629
3630 err = ionic_lif_adminq_init(lif);
3631 if (err)
3632 goto err_out_adminq_deinit;
3633
3634 if (lif->ionic->nnqs_per_lif) {
3635 err = ionic_lif_notifyq_init(lif);
3636 if (err)
3637 goto err_out_notifyq_deinit;
3638 }
3639
3640 err = ionic_init_nic_features(lif);
3641 if (err)
3642 goto err_out_notifyq_deinit;
3643
3644 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3645 err = ionic_rx_filters_init(lif);
3646 if (err)
3647 goto err_out_notifyq_deinit;
3648 }
3649
3650 err = ionic_station_set(lif);
3651 if (err)
3652 goto err_out_notifyq_deinit;
3653
3654 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
3655
3656 set_bit(IONIC_LIF_F_INITED, lif->state);
3657
3658 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
3659
3660 return 0;
3661
3662 err_out_notifyq_deinit:
3663 napi_disable(&lif->adminqcq->napi);
3664 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3665 err_out_adminq_deinit:
3666 ionic_lif_qcq_deinit(lif, lif->adminqcq);
3667 ionic_lif_reset(lif);
3668 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3669 lif->kern_dbpage = NULL;
3670
3671 return err;
3672 }
3673
ionic_lif_notify_work(struct work_struct * ws)3674 static void ionic_lif_notify_work(struct work_struct *ws)
3675 {
3676 }
3677
ionic_lif_set_netdev_info(struct ionic_lif * lif)3678 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
3679 {
3680 struct ionic_admin_ctx ctx = {
3681 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3682 .cmd.lif_setattr = {
3683 .opcode = IONIC_CMD_LIF_SETATTR,
3684 .index = cpu_to_le16(lif->index),
3685 .attr = IONIC_LIF_ATTR_NAME,
3686 },
3687 };
3688
3689 strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
3690 sizeof(ctx.cmd.lif_setattr.name));
3691
3692 ionic_adminq_post_wait(lif, &ctx);
3693 }
3694
ionic_netdev_lif(struct net_device * netdev)3695 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
3696 {
3697 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
3698 return NULL;
3699
3700 return netdev_priv(netdev);
3701 }
3702
ionic_lif_notify(struct notifier_block * nb,unsigned long event,void * info)3703 static int ionic_lif_notify(struct notifier_block *nb,
3704 unsigned long event, void *info)
3705 {
3706 struct net_device *ndev = netdev_notifier_info_to_dev(info);
3707 struct ionic *ionic = container_of(nb, struct ionic, nb);
3708 struct ionic_lif *lif = ionic_netdev_lif(ndev);
3709
3710 if (!lif || lif->ionic != ionic)
3711 return NOTIFY_DONE;
3712
3713 switch (event) {
3714 case NETDEV_CHANGENAME:
3715 ionic_lif_set_netdev_info(lif);
3716 break;
3717 }
3718
3719 return NOTIFY_DONE;
3720 }
3721
ionic_lif_register(struct ionic_lif * lif)3722 int ionic_lif_register(struct ionic_lif *lif)
3723 {
3724 int err;
3725
3726 ionic_lif_register_phc(lif);
3727
3728 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
3729
3730 lif->ionic->nb.notifier_call = ionic_lif_notify;
3731
3732 err = register_netdevice_notifier(&lif->ionic->nb);
3733 if (err)
3734 lif->ionic->nb.notifier_call = NULL;
3735
3736 /* only register LIF0 for now */
3737 err = register_netdev(lif->netdev);
3738 if (err) {
3739 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
3740 ionic_lif_unregister_phc(lif);
3741 return err;
3742 }
3743
3744 ionic_link_status_check_request(lif, CAN_SLEEP);
3745 lif->registered = true;
3746 ionic_lif_set_netdev_info(lif);
3747
3748 return 0;
3749 }
3750
ionic_lif_unregister(struct ionic_lif * lif)3751 void ionic_lif_unregister(struct ionic_lif *lif)
3752 {
3753 if (lif->ionic->nb.notifier_call) {
3754 unregister_netdevice_notifier(&lif->ionic->nb);
3755 cancel_work_sync(&lif->ionic->nb_work);
3756 lif->ionic->nb.notifier_call = NULL;
3757 }
3758
3759 if (lif->netdev->reg_state == NETREG_REGISTERED)
3760 unregister_netdev(lif->netdev);
3761
3762 ionic_lif_unregister_phc(lif);
3763
3764 lif->registered = false;
3765 }
3766
ionic_lif_queue_identify(struct ionic_lif * lif)3767 static void ionic_lif_queue_identify(struct ionic_lif *lif)
3768 {
3769 union ionic_q_identity __iomem *q_ident;
3770 struct ionic *ionic = lif->ionic;
3771 struct ionic_dev *idev;
3772 int qtype;
3773 int err;
3774
3775 idev = &lif->ionic->idev;
3776 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
3777
3778 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
3779 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
3780
3781 /* filter out the ones we know about */
3782 switch (qtype) {
3783 case IONIC_QTYPE_ADMINQ:
3784 case IONIC_QTYPE_NOTIFYQ:
3785 case IONIC_QTYPE_RXQ:
3786 case IONIC_QTYPE_TXQ:
3787 break;
3788 default:
3789 continue;
3790 }
3791
3792 memset(qti, 0, sizeof(*qti));
3793
3794 mutex_lock(&ionic->dev_cmd_lock);
3795 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3796 ionic_qtype_versions[qtype]);
3797 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3798 if (!err) {
3799 qti->version = readb(&q_ident->version);
3800 qti->supported = readb(&q_ident->supported);
3801 qti->features = readq(&q_ident->features);
3802 qti->desc_sz = readw(&q_ident->desc_sz);
3803 qti->comp_sz = readw(&q_ident->comp_sz);
3804 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
3805 qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3806 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
3807 }
3808 mutex_unlock(&ionic->dev_cmd_lock);
3809
3810 if (err == -EINVAL) {
3811 dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3812 continue;
3813 } else if (err == -EIO) {
3814 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3815 return;
3816 } else if (err) {
3817 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3818 qtype, err);
3819 return;
3820 }
3821
3822 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3823 qtype, qti->version);
3824 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3825 qtype, qti->supported);
3826 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3827 qtype, qti->features);
3828 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3829 qtype, qti->desc_sz);
3830 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3831 qtype, qti->comp_sz);
3832 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3833 qtype, qti->sg_desc_sz);
3834 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3835 qtype, qti->max_sg_elems);
3836 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3837 qtype, qti->sg_desc_stride);
3838 }
3839 }
3840
ionic_lif_identify(struct ionic * ionic,u8 lif_type,union ionic_lif_identity * lid)3841 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3842 union ionic_lif_identity *lid)
3843 {
3844 struct ionic_dev *idev = &ionic->idev;
3845 size_t sz;
3846 int err;
3847
3848 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3849
3850 mutex_lock(&ionic->dev_cmd_lock);
3851 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3852 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3853 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3854 mutex_unlock(&ionic->dev_cmd_lock);
3855 if (err)
3856 return (err);
3857
3858 dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3859 le64_to_cpu(lid->capabilities));
3860
3861 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3862 le32_to_cpu(lid->eth.max_ucast_filters));
3863 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3864 le32_to_cpu(lid->eth.max_mcast_filters));
3865 dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3866 le64_to_cpu(lid->eth.config.features));
3867 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3868 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3869 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3870 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3871 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3872 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3873 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3874 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3875 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3876 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3877 dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3878 le32_to_cpu(lid->eth.config.mtu));
3879
3880 return 0;
3881 }
3882
ionic_lif_size(struct ionic * ionic)3883 int ionic_lif_size(struct ionic *ionic)
3884 {
3885 struct ionic_identity *ident = &ionic->ident;
3886 unsigned int nintrs, dev_nintrs;
3887 union ionic_lif_config *lc;
3888 unsigned int ntxqs_per_lif;
3889 unsigned int nrxqs_per_lif;
3890 unsigned int neqs_per_lif;
3891 unsigned int nnqs_per_lif;
3892 unsigned int nxqs, neqs;
3893 unsigned int min_intrs;
3894 int err;
3895
3896 /* retrieve basic values from FW */
3897 lc = &ident->lif.eth.config;
3898 dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3899 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3900 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3901 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3902 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3903
3904 /* limit values to play nice with kdump */
3905 if (is_kdump_kernel()) {
3906 dev_nintrs = 2;
3907 neqs_per_lif = 0;
3908 nnqs_per_lif = 0;
3909 ntxqs_per_lif = 1;
3910 nrxqs_per_lif = 1;
3911 }
3912
3913 /* reserve last queue id for hardware timestamping */
3914 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
3915 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
3916 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP);
3917 } else {
3918 ntxqs_per_lif -= 1;
3919 nrxqs_per_lif -= 1;
3920 }
3921 }
3922
3923 nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3924 nxqs = min(nxqs, num_online_cpus());
3925 neqs = min(neqs_per_lif, num_online_cpus());
3926
3927 try_again:
3928 /* interrupt usage:
3929 * 1 for master lif adminq/notifyq
3930 * 1 for each CPU for master lif TxRx queue pairs
3931 * whatever's left is for RDMA queues
3932 */
3933 nintrs = 1 + nxqs + neqs;
3934 min_intrs = 2; /* adminq + 1 TxRx queue pair */
3935
3936 if (nintrs > dev_nintrs)
3937 goto try_fewer;
3938
3939 err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3940 if (err < 0 && err != -ENOSPC) {
3941 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3942 return err;
3943 }
3944 if (err == -ENOSPC)
3945 goto try_fewer;
3946
3947 if (err != nintrs) {
3948 ionic_bus_free_irq_vectors(ionic);
3949 goto try_fewer;
3950 }
3951
3952 ionic->nnqs_per_lif = nnqs_per_lif;
3953 ionic->neqs_per_lif = neqs;
3954 ionic->ntxqs_per_lif = nxqs;
3955 ionic->nrxqs_per_lif = nxqs;
3956 ionic->nintrs = nintrs;
3957
3958 ionic_debugfs_add_sizes(ionic);
3959
3960 return 0;
3961
3962 try_fewer:
3963 if (nnqs_per_lif > 1) {
3964 nnqs_per_lif >>= 1;
3965 goto try_again;
3966 }
3967 if (neqs > 1) {
3968 neqs >>= 1;
3969 goto try_again;
3970 }
3971 if (nxqs > 1) {
3972 nxqs >>= 1;
3973 goto try_again;
3974 }
3975 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
3976 return -ENOSPC;
3977 }
3978