1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/cpumask.h>
12 
13 #include "ionic.h"
14 #include "ionic_bus.h"
15 #include "ionic_lif.h"
16 #include "ionic_txrx.h"
17 #include "ionic_ethtool.h"
18 #include "ionic_debugfs.h"
19 
20 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
21 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
22 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
23 static void ionic_link_status_check(struct ionic_lif *lif);
24 
25 static void ionic_lif_deferred_work(struct work_struct *work)
26 {
27 	struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
28 	struct ionic_deferred *def = &lif->deferred;
29 	struct ionic_deferred_work *w = NULL;
30 
31 	spin_lock_bh(&def->lock);
32 	if (!list_empty(&def->list)) {
33 		w = list_first_entry(&def->list,
34 				     struct ionic_deferred_work, list);
35 		list_del(&w->list);
36 	}
37 	spin_unlock_bh(&def->lock);
38 
39 	if (w) {
40 		switch (w->type) {
41 		case IONIC_DW_TYPE_RX_MODE:
42 			ionic_lif_rx_mode(lif, w->rx_mode);
43 			break;
44 		case IONIC_DW_TYPE_RX_ADDR_ADD:
45 			ionic_lif_addr_add(lif, w->addr);
46 			break;
47 		case IONIC_DW_TYPE_RX_ADDR_DEL:
48 			ionic_lif_addr_del(lif, w->addr);
49 			break;
50 		case IONIC_DW_TYPE_LINK_STATUS:
51 			ionic_link_status_check(lif);
52 			break;
53 		default:
54 			break;
55 		}
56 		kfree(w);
57 		schedule_work(&def->work);
58 	}
59 }
60 
61 static void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
62 				       struct ionic_deferred_work *work)
63 {
64 	spin_lock_bh(&def->lock);
65 	list_add_tail(&work->list, &def->list);
66 	spin_unlock_bh(&def->lock);
67 	schedule_work(&def->work);
68 }
69 
70 static void ionic_link_status_check(struct ionic_lif *lif)
71 {
72 	struct net_device *netdev = lif->netdev;
73 	u16 link_status;
74 	bool link_up;
75 
76 	link_status = le16_to_cpu(lif->info->status.link_status);
77 	link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
78 
79 	/* filter out the no-change cases */
80 	if (link_up == netif_carrier_ok(netdev))
81 		goto link_out;
82 
83 	if (link_up) {
84 		netdev_info(netdev, "Link up - %d Gbps\n",
85 			    le32_to_cpu(lif->info->status.link_speed) / 1000);
86 
87 		if (test_bit(IONIC_LIF_F_UP, lif->state)) {
88 			netif_tx_wake_all_queues(lif->netdev);
89 			netif_carrier_on(netdev);
90 		}
91 	} else {
92 		netdev_info(netdev, "Link down\n");
93 
94 		/* carrier off first to avoid watchdog timeout */
95 		netif_carrier_off(netdev);
96 		if (test_bit(IONIC_LIF_F_UP, lif->state))
97 			netif_tx_stop_all_queues(netdev);
98 	}
99 
100 link_out:
101 	clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
102 }
103 
104 static void ionic_link_status_check_request(struct ionic_lif *lif)
105 {
106 	struct ionic_deferred_work *work;
107 
108 	/* we only need one request outstanding at a time */
109 	if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
110 		return;
111 
112 	if (in_interrupt()) {
113 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
114 		if (!work)
115 			return;
116 
117 		work->type = IONIC_DW_TYPE_LINK_STATUS;
118 		ionic_lif_deferred_enqueue(&lif->deferred, work);
119 	} else {
120 		ionic_link_status_check(lif);
121 	}
122 }
123 
124 static irqreturn_t ionic_isr(int irq, void *data)
125 {
126 	struct napi_struct *napi = data;
127 
128 	napi_schedule_irqoff(napi);
129 
130 	return IRQ_HANDLED;
131 }
132 
133 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
134 {
135 	struct ionic_intr_info *intr = &qcq->intr;
136 	struct device *dev = lif->ionic->dev;
137 	struct ionic_queue *q = &qcq->q;
138 	const char *name;
139 
140 	if (lif->registered)
141 		name = lif->netdev->name;
142 	else
143 		name = dev_name(dev);
144 
145 	snprintf(intr->name, sizeof(intr->name),
146 		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
147 
148 	return devm_request_irq(dev, intr->vector, ionic_isr,
149 				0, intr->name, &qcq->napi);
150 }
151 
152 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
153 {
154 	struct ionic *ionic = lif->ionic;
155 	int index;
156 
157 	index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
158 	if (index == ionic->nintrs) {
159 		netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
160 			    __func__, index, ionic->nintrs);
161 		return -ENOSPC;
162 	}
163 
164 	set_bit(index, ionic->intrs);
165 	ionic_intr_init(&ionic->idev, intr, index);
166 
167 	return 0;
168 }
169 
170 static void ionic_intr_free(struct ionic_lif *lif, int index)
171 {
172 	if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
173 		clear_bit(index, lif->ionic->intrs);
174 }
175 
176 static int ionic_qcq_enable(struct ionic_qcq *qcq)
177 {
178 	struct ionic_queue *q = &qcq->q;
179 	struct ionic_lif *lif = q->lif;
180 	struct ionic_dev *idev;
181 	struct device *dev;
182 
183 	struct ionic_admin_ctx ctx = {
184 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
185 		.cmd.q_control = {
186 			.opcode = IONIC_CMD_Q_CONTROL,
187 			.lif_index = cpu_to_le16(lif->index),
188 			.type = q->type,
189 			.index = cpu_to_le32(q->index),
190 			.oper = IONIC_Q_ENABLE,
191 		},
192 	};
193 
194 	idev = &lif->ionic->idev;
195 	dev = lif->ionic->dev;
196 
197 	dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
198 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
199 
200 	if (qcq->flags & IONIC_QCQ_F_INTR) {
201 		irq_set_affinity_hint(qcq->intr.vector,
202 				      &qcq->intr.affinity_mask);
203 		napi_enable(&qcq->napi);
204 		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
205 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
206 				IONIC_INTR_MASK_CLEAR);
207 	}
208 
209 	return ionic_adminq_post_wait(lif, &ctx);
210 }
211 
212 static int ionic_qcq_disable(struct ionic_qcq *qcq)
213 {
214 	struct ionic_queue *q = &qcq->q;
215 	struct ionic_lif *lif = q->lif;
216 	struct ionic_dev *idev;
217 	struct device *dev;
218 
219 	struct ionic_admin_ctx ctx = {
220 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
221 		.cmd.q_control = {
222 			.opcode = IONIC_CMD_Q_CONTROL,
223 			.lif_index = cpu_to_le16(lif->index),
224 			.type = q->type,
225 			.index = cpu_to_le32(q->index),
226 			.oper = IONIC_Q_DISABLE,
227 		},
228 	};
229 
230 	idev = &lif->ionic->idev;
231 	dev = lif->ionic->dev;
232 
233 	dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
234 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
235 
236 	if (qcq->flags & IONIC_QCQ_F_INTR) {
237 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
238 				IONIC_INTR_MASK_SET);
239 		synchronize_irq(qcq->intr.vector);
240 		irq_set_affinity_hint(qcq->intr.vector, NULL);
241 		napi_disable(&qcq->napi);
242 	}
243 
244 	return ionic_adminq_post_wait(lif, &ctx);
245 }
246 
247 static void ionic_lif_quiesce(struct ionic_lif *lif)
248 {
249 	struct ionic_admin_ctx ctx = {
250 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
251 		.cmd.lif_setattr = {
252 			.opcode = IONIC_CMD_LIF_SETATTR,
253 			.attr = IONIC_LIF_ATTR_STATE,
254 			.index = lif->index,
255 			.state = IONIC_LIF_DISABLE
256 		},
257 	};
258 
259 	ionic_adminq_post_wait(lif, &ctx);
260 }
261 
262 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
263 {
264 	struct ionic_dev *idev = &lif->ionic->idev;
265 	struct device *dev = lif->ionic->dev;
266 
267 	if (!qcq)
268 		return;
269 
270 	ionic_debugfs_del_qcq(qcq);
271 
272 	if (!(qcq->flags & IONIC_QCQ_F_INITED))
273 		return;
274 
275 	if (qcq->flags & IONIC_QCQ_F_INTR) {
276 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
277 				IONIC_INTR_MASK_SET);
278 		devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
279 		netif_napi_del(&qcq->napi);
280 	}
281 
282 	qcq->flags &= ~IONIC_QCQ_F_INITED;
283 }
284 
285 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
286 {
287 	struct device *dev = lif->ionic->dev;
288 
289 	if (!qcq)
290 		return;
291 
292 	dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
293 	qcq->base = NULL;
294 	qcq->base_pa = 0;
295 
296 	if (qcq->flags & IONIC_QCQ_F_INTR)
297 		ionic_intr_free(lif, qcq->intr.index);
298 
299 	devm_kfree(dev, qcq->cq.info);
300 	qcq->cq.info = NULL;
301 	devm_kfree(dev, qcq->q.info);
302 	qcq->q.info = NULL;
303 	devm_kfree(dev, qcq);
304 }
305 
306 static void ionic_qcqs_free(struct ionic_lif *lif)
307 {
308 	struct device *dev = lif->ionic->dev;
309 	unsigned int i;
310 
311 	if (lif->notifyqcq) {
312 		ionic_qcq_free(lif, lif->notifyqcq);
313 		lif->notifyqcq = NULL;
314 	}
315 
316 	if (lif->adminqcq) {
317 		ionic_qcq_free(lif, lif->adminqcq);
318 		lif->adminqcq = NULL;
319 	}
320 
321 	for (i = 0; i < lif->nxqs; i++)
322 		if (lif->rxqcqs[i].stats)
323 			devm_kfree(dev, lif->rxqcqs[i].stats);
324 
325 	devm_kfree(dev, lif->rxqcqs);
326 	lif->rxqcqs = NULL;
327 
328 	for (i = 0; i < lif->nxqs; i++)
329 		if (lif->txqcqs[i].stats)
330 			devm_kfree(dev, lif->txqcqs[i].stats);
331 
332 	devm_kfree(dev, lif->txqcqs);
333 	lif->txqcqs = NULL;
334 }
335 
336 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
337 				      struct ionic_qcq *n_qcq)
338 {
339 	if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
340 		ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
341 		n_qcq->flags &= ~IONIC_QCQ_F_INTR;
342 	}
343 
344 	n_qcq->intr.vector = src_qcq->intr.vector;
345 	n_qcq->intr.index = src_qcq->intr.index;
346 }
347 
348 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
349 			   unsigned int index,
350 			   const char *name, unsigned int flags,
351 			   unsigned int num_descs, unsigned int desc_size,
352 			   unsigned int cq_desc_size,
353 			   unsigned int sg_desc_size,
354 			   unsigned int pid, struct ionic_qcq **qcq)
355 {
356 	struct ionic_dev *idev = &lif->ionic->idev;
357 	u32 q_size, cq_size, sg_size, total_size;
358 	struct device *dev = lif->ionic->dev;
359 	void *q_base, *cq_base, *sg_base;
360 	dma_addr_t cq_base_pa = 0;
361 	dma_addr_t sg_base_pa = 0;
362 	dma_addr_t q_base_pa = 0;
363 	struct ionic_qcq *new;
364 	int err;
365 
366 	*qcq = NULL;
367 
368 	q_size  = num_descs * desc_size;
369 	cq_size = num_descs * cq_desc_size;
370 	sg_size = num_descs * sg_desc_size;
371 
372 	total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
373 	/* Note: aligning q_size/cq_size is not enough due to cq_base
374 	 * address aligning as q_base could be not aligned to the page.
375 	 * Adding PAGE_SIZE.
376 	 */
377 	total_size += PAGE_SIZE;
378 	if (flags & IONIC_QCQ_F_SG) {
379 		total_size += ALIGN(sg_size, PAGE_SIZE);
380 		total_size += PAGE_SIZE;
381 	}
382 
383 	new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
384 	if (!new) {
385 		netdev_err(lif->netdev, "Cannot allocate queue structure\n");
386 		err = -ENOMEM;
387 		goto err_out;
388 	}
389 
390 	new->flags = flags;
391 
392 	new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
393 				   GFP_KERNEL);
394 	if (!new->q.info) {
395 		netdev_err(lif->netdev, "Cannot allocate queue info\n");
396 		err = -ENOMEM;
397 		goto err_out;
398 	}
399 
400 	new->q.type = type;
401 
402 	err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
403 			   desc_size, sg_desc_size, pid);
404 	if (err) {
405 		netdev_err(lif->netdev, "Cannot initialize queue\n");
406 		goto err_out;
407 	}
408 
409 	if (flags & IONIC_QCQ_F_INTR) {
410 		err = ionic_intr_alloc(lif, &new->intr);
411 		if (err) {
412 			netdev_warn(lif->netdev, "no intr for %s: %d\n",
413 				    name, err);
414 			goto err_out;
415 		}
416 
417 		err = ionic_bus_get_irq(lif->ionic, new->intr.index);
418 		if (err < 0) {
419 			netdev_warn(lif->netdev, "no vector for %s: %d\n",
420 				    name, err);
421 			goto err_out_free_intr;
422 		}
423 		new->intr.vector = err;
424 		ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
425 				       IONIC_INTR_MASK_SET);
426 
427 		new->intr.cpu = cpumask_local_spread(new->intr.index,
428 						     dev_to_node(dev));
429 		if (new->intr.cpu != -1)
430 			cpumask_set_cpu(new->intr.cpu,
431 					&new->intr.affinity_mask);
432 	} else {
433 		new->intr.index = INTR_INDEX_NOT_ASSIGNED;
434 	}
435 
436 	new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
437 				    GFP_KERNEL);
438 	if (!new->cq.info) {
439 		netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
440 		err = -ENOMEM;
441 		goto err_out_free_intr;
442 	}
443 
444 	err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
445 	if (err) {
446 		netdev_err(lif->netdev, "Cannot initialize completion queue\n");
447 		goto err_out_free_intr;
448 	}
449 
450 	new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
451 				       GFP_KERNEL);
452 	if (!new->base) {
453 		netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
454 		err = -ENOMEM;
455 		goto err_out_free_intr;
456 	}
457 
458 	new->total_size = total_size;
459 
460 	q_base = new->base;
461 	q_base_pa = new->base_pa;
462 
463 	cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
464 	cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
465 
466 	if (flags & IONIC_QCQ_F_SG) {
467 		sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
468 					PAGE_SIZE);
469 		sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
470 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
471 	}
472 
473 	ionic_q_map(&new->q, q_base, q_base_pa);
474 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
475 	ionic_cq_bind(&new->cq, &new->q);
476 
477 	*qcq = new;
478 
479 	return 0;
480 
481 err_out_free_intr:
482 	ionic_intr_free(lif, new->intr.index);
483 err_out:
484 	dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
485 	return err;
486 }
487 
488 static int ionic_qcqs_alloc(struct ionic_lif *lif)
489 {
490 	struct device *dev = lif->ionic->dev;
491 	unsigned int q_list_size;
492 	unsigned int flags;
493 	int err;
494 	int i;
495 
496 	flags = IONIC_QCQ_F_INTR;
497 	err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
498 			      IONIC_ADMINQ_LENGTH,
499 			      sizeof(struct ionic_admin_cmd),
500 			      sizeof(struct ionic_admin_comp),
501 			      0, lif->kern_pid, &lif->adminqcq);
502 	if (err)
503 		return err;
504 
505 	if (lif->ionic->nnqs_per_lif) {
506 		flags = IONIC_QCQ_F_NOTIFYQ;
507 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
508 				      flags, IONIC_NOTIFYQ_LENGTH,
509 				      sizeof(struct ionic_notifyq_cmd),
510 				      sizeof(union ionic_notifyq_comp),
511 				      0, lif->kern_pid, &lif->notifyqcq);
512 		if (err)
513 			goto err_out_free_adminqcq;
514 
515 		/* Let the notifyq ride on the adminq interrupt */
516 		ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
517 	}
518 
519 	q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
520 	err = -ENOMEM;
521 	lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
522 	if (!lif->txqcqs)
523 		goto err_out_free_notifyqcq;
524 	for (i = 0; i < lif->nxqs; i++) {
525 		lif->txqcqs[i].stats = devm_kzalloc(dev,
526 						    sizeof(struct ionic_q_stats),
527 						    GFP_KERNEL);
528 		if (!lif->txqcqs[i].stats)
529 			goto err_out_free_tx_stats;
530 	}
531 
532 	lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
533 	if (!lif->rxqcqs)
534 		goto err_out_free_tx_stats;
535 	for (i = 0; i < lif->nxqs; i++) {
536 		lif->rxqcqs[i].stats = devm_kzalloc(dev,
537 						    sizeof(struct ionic_q_stats),
538 						    GFP_KERNEL);
539 		if (!lif->rxqcqs[i].stats)
540 			goto err_out_free_rx_stats;
541 	}
542 
543 	return 0;
544 
545 err_out_free_rx_stats:
546 	for (i = 0; i < lif->nxqs; i++)
547 		if (lif->rxqcqs[i].stats)
548 			devm_kfree(dev, lif->rxqcqs[i].stats);
549 	devm_kfree(dev, lif->rxqcqs);
550 	lif->rxqcqs = NULL;
551 err_out_free_tx_stats:
552 	for (i = 0; i < lif->nxqs; i++)
553 		if (lif->txqcqs[i].stats)
554 			devm_kfree(dev, lif->txqcqs[i].stats);
555 	devm_kfree(dev, lif->txqcqs);
556 	lif->txqcqs = NULL;
557 err_out_free_notifyqcq:
558 	if (lif->notifyqcq) {
559 		ionic_qcq_free(lif, lif->notifyqcq);
560 		lif->notifyqcq = NULL;
561 	}
562 err_out_free_adminqcq:
563 	ionic_qcq_free(lif, lif->adminqcq);
564 	lif->adminqcq = NULL;
565 
566 	return err;
567 }
568 
569 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
570 {
571 	struct device *dev = lif->ionic->dev;
572 	struct ionic_queue *q = &qcq->q;
573 	struct ionic_cq *cq = &qcq->cq;
574 	struct ionic_admin_ctx ctx = {
575 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
576 		.cmd.q_init = {
577 			.opcode = IONIC_CMD_Q_INIT,
578 			.lif_index = cpu_to_le16(lif->index),
579 			.type = q->type,
580 			.index = cpu_to_le32(q->index),
581 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
582 					     IONIC_QINIT_F_SG),
583 			.intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
584 			.pid = cpu_to_le16(q->pid),
585 			.ring_size = ilog2(q->num_descs),
586 			.ring_base = cpu_to_le64(q->base_pa),
587 			.cq_ring_base = cpu_to_le64(cq->base_pa),
588 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
589 		},
590 	};
591 	int err;
592 
593 	dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
594 	dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
595 	dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
596 	dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
597 
598 	err = ionic_adminq_post_wait(lif, &ctx);
599 	if (err)
600 		return err;
601 
602 	q->hw_type = ctx.comp.q_init.hw_type;
603 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
604 	q->dbval = IONIC_DBELL_QID(q->hw_index);
605 
606 	dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
607 	dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
608 
609 	qcq->flags |= IONIC_QCQ_F_INITED;
610 
611 	ionic_debugfs_add_qcq(lif, qcq);
612 
613 	return 0;
614 }
615 
616 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
617 {
618 	struct device *dev = lif->ionic->dev;
619 	struct ionic_queue *q = &qcq->q;
620 	struct ionic_cq *cq = &qcq->cq;
621 	struct ionic_admin_ctx ctx = {
622 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
623 		.cmd.q_init = {
624 			.opcode = IONIC_CMD_Q_INIT,
625 			.lif_index = cpu_to_le16(lif->index),
626 			.type = q->type,
627 			.index = cpu_to_le32(q->index),
628 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
629 					     IONIC_QINIT_F_SG),
630 			.intr_index = cpu_to_le16(cq->bound_intr->index),
631 			.pid = cpu_to_le16(q->pid),
632 			.ring_size = ilog2(q->num_descs),
633 			.ring_base = cpu_to_le64(q->base_pa),
634 			.cq_ring_base = cpu_to_le64(cq->base_pa),
635 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
636 		},
637 	};
638 	int err;
639 
640 	dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
641 	dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
642 	dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
643 	dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
644 
645 	err = ionic_adminq_post_wait(lif, &ctx);
646 	if (err)
647 		return err;
648 
649 	q->hw_type = ctx.comp.q_init.hw_type;
650 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
651 	q->dbval = IONIC_DBELL_QID(q->hw_index);
652 
653 	dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
654 	dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
655 
656 	netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
657 		       NAPI_POLL_WEIGHT);
658 
659 	err = ionic_request_irq(lif, qcq);
660 	if (err) {
661 		netif_napi_del(&qcq->napi);
662 		return err;
663 	}
664 
665 	qcq->flags |= IONIC_QCQ_F_INITED;
666 
667 	ionic_debugfs_add_qcq(lif, qcq);
668 
669 	return 0;
670 }
671 
672 static bool ionic_notifyq_service(struct ionic_cq *cq,
673 				  struct ionic_cq_info *cq_info)
674 {
675 	union ionic_notifyq_comp *comp = cq_info->cq_desc;
676 	struct net_device *netdev;
677 	struct ionic_queue *q;
678 	struct ionic_lif *lif;
679 	u64 eid;
680 
681 	q = cq->bound_q;
682 	lif = q->info[0].cb_arg;
683 	netdev = lif->netdev;
684 	eid = le64_to_cpu(comp->event.eid);
685 
686 	/* Have we run out of new completions to process? */
687 	if (eid <= lif->last_eid)
688 		return false;
689 
690 	lif->last_eid = eid;
691 
692 	dev_dbg(lif->ionic->dev, "notifyq event:\n");
693 	dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
694 			 comp, sizeof(*comp), true);
695 
696 	switch (le16_to_cpu(comp->event.ecode)) {
697 	case IONIC_EVENT_LINK_CHANGE:
698 		ionic_link_status_check_request(lif);
699 		break;
700 	case IONIC_EVENT_RESET:
701 		netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n",
702 			    eid);
703 		netdev_info(netdev, "  reset_code=%d state=%d\n",
704 			    comp->reset.reset_code,
705 			    comp->reset.state);
706 		break;
707 	default:
708 		netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
709 			    comp->event.ecode, eid);
710 		break;
711 	}
712 
713 	return true;
714 }
715 
716 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
717 {
718 	struct ionic_dev *idev = &lif->ionic->idev;
719 	struct ionic_cq *cq = &lif->notifyqcq->cq;
720 	u32 work_done;
721 
722 	work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
723 				     NULL, NULL);
724 	if (work_done)
725 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
726 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
727 
728 	return work_done;
729 }
730 
731 static bool ionic_adminq_service(struct ionic_cq *cq,
732 				 struct ionic_cq_info *cq_info)
733 {
734 	struct ionic_admin_comp *comp = cq_info->cq_desc;
735 
736 	if (!color_match(comp->color, cq->done_color))
737 		return false;
738 
739 	ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
740 
741 	return true;
742 }
743 
744 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
745 {
746 	struct ionic_lif *lif = napi_to_cq(napi)->lif;
747 	int n_work = 0;
748 	int a_work = 0;
749 
750 	if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
751 		n_work = ionic_notifyq_clean(lif, budget);
752 	a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
753 
754 	return max(n_work, a_work);
755 }
756 
757 static void ionic_get_stats64(struct net_device *netdev,
758 			      struct rtnl_link_stats64 *ns)
759 {
760 	struct ionic_lif *lif = netdev_priv(netdev);
761 	struct ionic_lif_stats *ls;
762 
763 	memset(ns, 0, sizeof(*ns));
764 	ls = &lif->info->stats;
765 
766 	ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
767 			 le64_to_cpu(ls->rx_mcast_packets) +
768 			 le64_to_cpu(ls->rx_bcast_packets);
769 
770 	ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
771 			 le64_to_cpu(ls->tx_mcast_packets) +
772 			 le64_to_cpu(ls->tx_bcast_packets);
773 
774 	ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
775 		       le64_to_cpu(ls->rx_mcast_bytes) +
776 		       le64_to_cpu(ls->rx_bcast_bytes);
777 
778 	ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
779 		       le64_to_cpu(ls->tx_mcast_bytes) +
780 		       le64_to_cpu(ls->tx_bcast_bytes);
781 
782 	ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
783 			 le64_to_cpu(ls->rx_mcast_drop_packets) +
784 			 le64_to_cpu(ls->rx_bcast_drop_packets);
785 
786 	ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
787 			 le64_to_cpu(ls->tx_mcast_drop_packets) +
788 			 le64_to_cpu(ls->tx_bcast_drop_packets);
789 
790 	ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
791 
792 	ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
793 
794 	ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
795 			       le64_to_cpu(ls->rx_queue_disabled) +
796 			       le64_to_cpu(ls->rx_desc_fetch_error) +
797 			       le64_to_cpu(ls->rx_desc_data_error);
798 
799 	ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
800 				le64_to_cpu(ls->tx_queue_disabled) +
801 				le64_to_cpu(ls->tx_desc_fetch_error) +
802 				le64_to_cpu(ls->tx_desc_data_error);
803 
804 	ns->rx_errors = ns->rx_over_errors +
805 			ns->rx_missed_errors;
806 
807 	ns->tx_errors = ns->tx_aborted_errors;
808 }
809 
810 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
811 {
812 	struct ionic_admin_ctx ctx = {
813 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
814 		.cmd.rx_filter_add = {
815 			.opcode = IONIC_CMD_RX_FILTER_ADD,
816 			.lif_index = cpu_to_le16(lif->index),
817 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
818 		},
819 	};
820 	struct ionic_rx_filter *f;
821 	int err;
822 
823 	/* don't bother if we already have it */
824 	spin_lock_bh(&lif->rx_filters.lock);
825 	f = ionic_rx_filter_by_addr(lif, addr);
826 	spin_unlock_bh(&lif->rx_filters.lock);
827 	if (f)
828 		return 0;
829 
830 	netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
831 		   ctx.comp.rx_filter_add.filter_id);
832 
833 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
834 	err = ionic_adminq_post_wait(lif, &ctx);
835 	if (err)
836 		return err;
837 
838 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
839 }
840 
841 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
842 {
843 	struct ionic_admin_ctx ctx = {
844 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
845 		.cmd.rx_filter_del = {
846 			.opcode = IONIC_CMD_RX_FILTER_DEL,
847 			.lif_index = cpu_to_le16(lif->index),
848 		},
849 	};
850 	struct ionic_rx_filter *f;
851 	int err;
852 
853 	spin_lock_bh(&lif->rx_filters.lock);
854 	f = ionic_rx_filter_by_addr(lif, addr);
855 	if (!f) {
856 		spin_unlock_bh(&lif->rx_filters.lock);
857 		return -ENOENT;
858 	}
859 
860 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
861 	ionic_rx_filter_free(lif, f);
862 	spin_unlock_bh(&lif->rx_filters.lock);
863 
864 	err = ionic_adminq_post_wait(lif, &ctx);
865 	if (err)
866 		return err;
867 
868 	netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
869 		   ctx.cmd.rx_filter_del.filter_id);
870 
871 	return 0;
872 }
873 
874 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
875 {
876 	struct ionic *ionic = lif->ionic;
877 	struct ionic_deferred_work *work;
878 	unsigned int nmfilters;
879 	unsigned int nufilters;
880 
881 	if (add) {
882 		/* Do we have space for this filter?  We test the counters
883 		 * here before checking the need for deferral so that we
884 		 * can return an overflow error to the stack.
885 		 */
886 		nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
887 		nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
888 
889 		if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
890 			lif->nmcast++;
891 		else if (!is_multicast_ether_addr(addr) &&
892 			 lif->nucast < nufilters)
893 			lif->nucast++;
894 		else
895 			return -ENOSPC;
896 	} else {
897 		if (is_multicast_ether_addr(addr) && lif->nmcast)
898 			lif->nmcast--;
899 		else if (!is_multicast_ether_addr(addr) && lif->nucast)
900 			lif->nucast--;
901 	}
902 
903 	if (in_interrupt()) {
904 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
905 		if (!work) {
906 			netdev_err(lif->netdev, "%s OOM\n", __func__);
907 			return -ENOMEM;
908 		}
909 		work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
910 				   IONIC_DW_TYPE_RX_ADDR_DEL;
911 		memcpy(work->addr, addr, ETH_ALEN);
912 		netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
913 			   add ? "add" : "del", addr);
914 		ionic_lif_deferred_enqueue(&lif->deferred, work);
915 	} else {
916 		netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
917 			   add ? "add" : "del", addr);
918 		if (add)
919 			return ionic_lif_addr_add(lif, addr);
920 		else
921 			return ionic_lif_addr_del(lif, addr);
922 	}
923 
924 	return 0;
925 }
926 
927 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
928 {
929 	return ionic_lif_addr(netdev_priv(netdev), addr, true);
930 }
931 
932 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
933 {
934 	return ionic_lif_addr(netdev_priv(netdev), addr, false);
935 }
936 
937 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
938 {
939 	struct ionic_admin_ctx ctx = {
940 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
941 		.cmd.rx_mode_set = {
942 			.opcode = IONIC_CMD_RX_MODE_SET,
943 			.lif_index = cpu_to_le16(lif->index),
944 			.rx_mode = cpu_to_le16(rx_mode),
945 		},
946 	};
947 	char buf[128];
948 	int err;
949 	int i;
950 #define REMAIN(__x) (sizeof(buf) - (__x))
951 
952 	i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
953 		     lif->rx_mode, rx_mode);
954 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
955 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
956 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
957 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
958 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
959 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
960 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
961 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
962 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
963 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
964 	netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
965 
966 	err = ionic_adminq_post_wait(lif, &ctx);
967 	if (err)
968 		netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
969 			    rx_mode, err);
970 	else
971 		lif->rx_mode = rx_mode;
972 }
973 
974 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
975 {
976 	struct ionic_deferred_work *work;
977 
978 	if (in_interrupt()) {
979 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
980 		if (!work) {
981 			netdev_err(lif->netdev, "%s OOM\n", __func__);
982 			return;
983 		}
984 		work->type = IONIC_DW_TYPE_RX_MODE;
985 		work->rx_mode = rx_mode;
986 		netdev_dbg(lif->netdev, "deferred: rx_mode\n");
987 		ionic_lif_deferred_enqueue(&lif->deferred, work);
988 	} else {
989 		ionic_lif_rx_mode(lif, rx_mode);
990 	}
991 }
992 
993 static void ionic_set_rx_mode(struct net_device *netdev)
994 {
995 	struct ionic_lif *lif = netdev_priv(netdev);
996 	struct ionic_identity *ident;
997 	unsigned int nfilters;
998 	unsigned int rx_mode;
999 
1000 	ident = &lif->ionic->ident;
1001 
1002 	rx_mode = IONIC_RX_MODE_F_UNICAST;
1003 	rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1004 	rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1005 	rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1006 	rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1007 
1008 	/* sync unicast addresses
1009 	 * next check to see if we're in an overflow state
1010 	 *    if so, we track that we overflowed and enable NIC PROMISC
1011 	 *    else if the overflow is set and not needed
1012 	 *       we remove our overflow flag and check the netdev flags
1013 	 *       to see if we can disable NIC PROMISC
1014 	 */
1015 	__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1016 	nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
1017 	if (netdev_uc_count(netdev) + 1 > nfilters) {
1018 		rx_mode |= IONIC_RX_MODE_F_PROMISC;
1019 		lif->uc_overflow = true;
1020 	} else if (lif->uc_overflow) {
1021 		lif->uc_overflow = false;
1022 		if (!(netdev->flags & IFF_PROMISC))
1023 			rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1024 	}
1025 
1026 	/* same for multicast */
1027 	__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1028 	nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1029 	if (netdev_mc_count(netdev) > nfilters) {
1030 		rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1031 		lif->mc_overflow = true;
1032 	} else if (lif->mc_overflow) {
1033 		lif->mc_overflow = false;
1034 		if (!(netdev->flags & IFF_ALLMULTI))
1035 			rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1036 	}
1037 
1038 	if (lif->rx_mode != rx_mode)
1039 		_ionic_lif_rx_mode(lif, rx_mode);
1040 }
1041 
1042 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1043 {
1044 	u64 wanted = 0;
1045 
1046 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
1047 		wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1048 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1049 		wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1050 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1051 		wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1052 	if (features & NETIF_F_RXHASH)
1053 		wanted |= IONIC_ETH_HW_RX_HASH;
1054 	if (features & NETIF_F_RXCSUM)
1055 		wanted |= IONIC_ETH_HW_RX_CSUM;
1056 	if (features & NETIF_F_SG)
1057 		wanted |= IONIC_ETH_HW_TX_SG;
1058 	if (features & NETIF_F_HW_CSUM)
1059 		wanted |= IONIC_ETH_HW_TX_CSUM;
1060 	if (features & NETIF_F_TSO)
1061 		wanted |= IONIC_ETH_HW_TSO;
1062 	if (features & NETIF_F_TSO6)
1063 		wanted |= IONIC_ETH_HW_TSO_IPV6;
1064 	if (features & NETIF_F_TSO_ECN)
1065 		wanted |= IONIC_ETH_HW_TSO_ECN;
1066 	if (features & NETIF_F_GSO_GRE)
1067 		wanted |= IONIC_ETH_HW_TSO_GRE;
1068 	if (features & NETIF_F_GSO_GRE_CSUM)
1069 		wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1070 	if (features & NETIF_F_GSO_IPXIP4)
1071 		wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1072 	if (features & NETIF_F_GSO_IPXIP6)
1073 		wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1074 	if (features & NETIF_F_GSO_UDP_TUNNEL)
1075 		wanted |= IONIC_ETH_HW_TSO_UDP;
1076 	if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1077 		wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1078 
1079 	return cpu_to_le64(wanted);
1080 }
1081 
1082 static int ionic_set_nic_features(struct ionic_lif *lif,
1083 				  netdev_features_t features)
1084 {
1085 	struct device *dev = lif->ionic->dev;
1086 	struct ionic_admin_ctx ctx = {
1087 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1088 		.cmd.lif_setattr = {
1089 			.opcode = IONIC_CMD_LIF_SETATTR,
1090 			.index = cpu_to_le16(lif->index),
1091 			.attr = IONIC_LIF_ATTR_FEATURES,
1092 		},
1093 	};
1094 	u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1095 			 IONIC_ETH_HW_VLAN_RX_STRIP |
1096 			 IONIC_ETH_HW_VLAN_RX_FILTER;
1097 	u64 old_hw_features;
1098 	int err;
1099 
1100 	ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1101 	err = ionic_adminq_post_wait(lif, &ctx);
1102 	if (err)
1103 		return err;
1104 
1105 	old_hw_features = lif->hw_features;
1106 	lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1107 				       ctx.comp.lif_setattr.features);
1108 
1109 	if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1110 		ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1111 
1112 	if ((vlan_flags & features) &&
1113 	    !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1114 		dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1115 
1116 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1117 		dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1118 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1119 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1120 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1121 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1122 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1123 		dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1124 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1125 		dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1126 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1127 		dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1128 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1129 		dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1130 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1131 		dev_dbg(dev, "feature ETH_HW_TSO\n");
1132 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1133 		dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1134 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1135 		dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1136 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1137 		dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1138 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1139 		dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1140 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1141 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1142 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1143 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1144 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1145 		dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1146 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1147 		dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1148 
1149 	return 0;
1150 }
1151 
1152 static int ionic_init_nic_features(struct ionic_lif *lif)
1153 {
1154 	struct net_device *netdev = lif->netdev;
1155 	netdev_features_t features;
1156 	int err;
1157 
1158 	/* no netdev features on the management device */
1159 	if (lif->ionic->is_mgmt_nic)
1160 		return 0;
1161 
1162 	/* set up what we expect to support by default */
1163 	features = NETIF_F_HW_VLAN_CTAG_TX |
1164 		   NETIF_F_HW_VLAN_CTAG_RX |
1165 		   NETIF_F_HW_VLAN_CTAG_FILTER |
1166 		   NETIF_F_RXHASH |
1167 		   NETIF_F_SG |
1168 		   NETIF_F_HW_CSUM |
1169 		   NETIF_F_RXCSUM |
1170 		   NETIF_F_TSO |
1171 		   NETIF_F_TSO6 |
1172 		   NETIF_F_TSO_ECN;
1173 
1174 	err = ionic_set_nic_features(lif, features);
1175 	if (err)
1176 		return err;
1177 
1178 	/* tell the netdev what we actually can support */
1179 	netdev->features |= NETIF_F_HIGHDMA;
1180 
1181 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1182 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1183 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1184 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1185 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1186 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1187 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1188 		netdev->hw_features |= NETIF_F_RXHASH;
1189 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1190 		netdev->hw_features |= NETIF_F_SG;
1191 
1192 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1193 		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1194 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1195 		netdev->hw_enc_features |= NETIF_F_RXCSUM;
1196 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1197 		netdev->hw_enc_features |= NETIF_F_TSO;
1198 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1199 		netdev->hw_enc_features |= NETIF_F_TSO6;
1200 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1201 		netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1202 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1203 		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1204 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1205 		netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1206 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1207 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1208 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1209 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1210 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1211 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1212 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1213 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1214 
1215 	netdev->hw_features |= netdev->hw_enc_features;
1216 	netdev->features |= netdev->hw_features;
1217 
1218 	netdev->priv_flags |= IFF_UNICAST_FLT;
1219 
1220 	return 0;
1221 }
1222 
1223 static int ionic_set_features(struct net_device *netdev,
1224 			      netdev_features_t features)
1225 {
1226 	struct ionic_lif *lif = netdev_priv(netdev);
1227 	int err;
1228 
1229 	netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1230 		   __func__, (u64)lif->netdev->features, (u64)features);
1231 
1232 	err = ionic_set_nic_features(lif, features);
1233 
1234 	return err;
1235 }
1236 
1237 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1238 {
1239 	struct sockaddr *addr = sa;
1240 	u8 *mac;
1241 	int err;
1242 
1243 	mac = (u8 *)addr->sa_data;
1244 	if (ether_addr_equal(netdev->dev_addr, mac))
1245 		return 0;
1246 
1247 	err = eth_prepare_mac_addr_change(netdev, addr);
1248 	if (err)
1249 		return err;
1250 
1251 	if (!is_zero_ether_addr(netdev->dev_addr)) {
1252 		netdev_info(netdev, "deleting mac addr %pM\n",
1253 			    netdev->dev_addr);
1254 		ionic_addr_del(netdev, netdev->dev_addr);
1255 	}
1256 
1257 	eth_commit_mac_addr_change(netdev, addr);
1258 	netdev_info(netdev, "updating mac addr %pM\n", mac);
1259 
1260 	return ionic_addr_add(netdev, mac);
1261 }
1262 
1263 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1264 {
1265 	struct ionic_lif *lif = netdev_priv(netdev);
1266 	struct ionic_admin_ctx ctx = {
1267 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1268 		.cmd.lif_setattr = {
1269 			.opcode = IONIC_CMD_LIF_SETATTR,
1270 			.index = cpu_to_le16(lif->index),
1271 			.attr = IONIC_LIF_ATTR_MTU,
1272 			.mtu = cpu_to_le32(new_mtu),
1273 		},
1274 	};
1275 	int err;
1276 
1277 	err = ionic_adminq_post_wait(lif, &ctx);
1278 	if (err)
1279 		return err;
1280 
1281 	netdev->mtu = new_mtu;
1282 	err = ionic_reset_queues(lif);
1283 
1284 	return err;
1285 }
1286 
1287 static void ionic_tx_timeout_work(struct work_struct *ws)
1288 {
1289 	struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1290 
1291 	netdev_info(lif->netdev, "Tx Timeout recovery\n");
1292 
1293 	rtnl_lock();
1294 	ionic_reset_queues(lif);
1295 	rtnl_unlock();
1296 }
1297 
1298 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1299 {
1300 	struct ionic_lif *lif = netdev_priv(netdev);
1301 
1302 	schedule_work(&lif->tx_timeout_work);
1303 }
1304 
1305 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1306 				 u16 vid)
1307 {
1308 	struct ionic_lif *lif = netdev_priv(netdev);
1309 	struct ionic_admin_ctx ctx = {
1310 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1311 		.cmd.rx_filter_add = {
1312 			.opcode = IONIC_CMD_RX_FILTER_ADD,
1313 			.lif_index = cpu_to_le16(lif->index),
1314 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1315 			.vlan.vlan = cpu_to_le16(vid),
1316 		},
1317 	};
1318 	int err;
1319 
1320 	err = ionic_adminq_post_wait(lif, &ctx);
1321 	if (err)
1322 		return err;
1323 
1324 	netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
1325 		   ctx.comp.rx_filter_add.filter_id);
1326 
1327 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1328 }
1329 
1330 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1331 				  u16 vid)
1332 {
1333 	struct ionic_lif *lif = netdev_priv(netdev);
1334 	struct ionic_admin_ctx ctx = {
1335 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1336 		.cmd.rx_filter_del = {
1337 			.opcode = IONIC_CMD_RX_FILTER_DEL,
1338 			.lif_index = cpu_to_le16(lif->index),
1339 		},
1340 	};
1341 	struct ionic_rx_filter *f;
1342 
1343 	spin_lock_bh(&lif->rx_filters.lock);
1344 
1345 	f = ionic_rx_filter_by_vlan(lif, vid);
1346 	if (!f) {
1347 		spin_unlock_bh(&lif->rx_filters.lock);
1348 		return -ENOENT;
1349 	}
1350 
1351 	netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
1352 		   le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
1353 
1354 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1355 	ionic_rx_filter_free(lif, f);
1356 	spin_unlock_bh(&lif->rx_filters.lock);
1357 
1358 	return ionic_adminq_post_wait(lif, &ctx);
1359 }
1360 
1361 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1362 			 const u8 *key, const u32 *indir)
1363 {
1364 	struct ionic_admin_ctx ctx = {
1365 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1366 		.cmd.lif_setattr = {
1367 			.opcode = IONIC_CMD_LIF_SETATTR,
1368 			.attr = IONIC_LIF_ATTR_RSS,
1369 			.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1370 		},
1371 	};
1372 	unsigned int i, tbl_sz;
1373 
1374 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1375 		lif->rss_types = types;
1376 		ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1377 	}
1378 
1379 	if (key)
1380 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1381 
1382 	if (indir) {
1383 		tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1384 		for (i = 0; i < tbl_sz; i++)
1385 			lif->rss_ind_tbl[i] = indir[i];
1386 	}
1387 
1388 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1389 	       IONIC_RSS_HASH_KEY_SIZE);
1390 
1391 	return ionic_adminq_post_wait(lif, &ctx);
1392 }
1393 
1394 static int ionic_lif_rss_init(struct ionic_lif *lif)
1395 {
1396 	unsigned int tbl_sz;
1397 	unsigned int i;
1398 
1399 	lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1400 			 IONIC_RSS_TYPE_IPV4_TCP |
1401 			 IONIC_RSS_TYPE_IPV4_UDP |
1402 			 IONIC_RSS_TYPE_IPV6     |
1403 			 IONIC_RSS_TYPE_IPV6_TCP |
1404 			 IONIC_RSS_TYPE_IPV6_UDP;
1405 
1406 	/* Fill indirection table with 'default' values */
1407 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1408 	for (i = 0; i < tbl_sz; i++)
1409 		lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1410 
1411 	return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1412 }
1413 
1414 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1415 {
1416 	int tbl_sz;
1417 
1418 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1419 	memset(lif->rss_ind_tbl, 0, tbl_sz);
1420 	memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1421 
1422 	ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1423 }
1424 
1425 static void ionic_txrx_disable(struct ionic_lif *lif)
1426 {
1427 	unsigned int i;
1428 
1429 	for (i = 0; i < lif->nxqs; i++) {
1430 		ionic_qcq_disable(lif->txqcqs[i].qcq);
1431 		ionic_qcq_disable(lif->rxqcqs[i].qcq);
1432 	}
1433 }
1434 
1435 static void ionic_txrx_deinit(struct ionic_lif *lif)
1436 {
1437 	unsigned int i;
1438 
1439 	for (i = 0; i < lif->nxqs; i++) {
1440 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1441 		ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
1442 
1443 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1444 		ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
1445 		ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
1446 	}
1447 }
1448 
1449 static void ionic_txrx_free(struct ionic_lif *lif)
1450 {
1451 	unsigned int i;
1452 
1453 	for (i = 0; i < lif->nxqs; i++) {
1454 		ionic_qcq_free(lif, lif->txqcqs[i].qcq);
1455 		lif->txqcqs[i].qcq = NULL;
1456 
1457 		ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
1458 		lif->rxqcqs[i].qcq = NULL;
1459 	}
1460 }
1461 
1462 static int ionic_txrx_alloc(struct ionic_lif *lif)
1463 {
1464 	unsigned int flags;
1465 	unsigned int i;
1466 	int err = 0;
1467 
1468 	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1469 	for (i = 0; i < lif->nxqs; i++) {
1470 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1471 				      lif->ntxq_descs,
1472 				      sizeof(struct ionic_txq_desc),
1473 				      sizeof(struct ionic_txq_comp),
1474 				      sizeof(struct ionic_txq_sg_desc),
1475 				      lif->kern_pid, &lif->txqcqs[i].qcq);
1476 		if (err)
1477 			goto err_out;
1478 
1479 		lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
1480 	}
1481 
1482 	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1483 	for (i = 0; i < lif->nxqs; i++) {
1484 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1485 				      lif->nrxq_descs,
1486 				      sizeof(struct ionic_rxq_desc),
1487 				      sizeof(struct ionic_rxq_comp),
1488 				      sizeof(struct ionic_rxq_sg_desc),
1489 				      lif->kern_pid, &lif->rxqcqs[i].qcq);
1490 		if (err)
1491 			goto err_out;
1492 
1493 		lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
1494 
1495 		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1496 				     lif->rxqcqs[i].qcq->intr.index,
1497 				     lif->rx_coalesce_hw);
1498 		ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
1499 					  lif->txqcqs[i].qcq);
1500 	}
1501 
1502 	return 0;
1503 
1504 err_out:
1505 	ionic_txrx_free(lif);
1506 
1507 	return err;
1508 }
1509 
1510 static int ionic_txrx_init(struct ionic_lif *lif)
1511 {
1512 	unsigned int i;
1513 	int err;
1514 
1515 	for (i = 0; i < lif->nxqs; i++) {
1516 		err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
1517 		if (err)
1518 			goto err_out;
1519 
1520 		err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
1521 		if (err) {
1522 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1523 			goto err_out;
1524 		}
1525 	}
1526 
1527 	if (lif->netdev->features & NETIF_F_RXHASH)
1528 		ionic_lif_rss_init(lif);
1529 
1530 	ionic_set_rx_mode(lif->netdev);
1531 
1532 	return 0;
1533 
1534 err_out:
1535 	while (i--) {
1536 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1537 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1538 	}
1539 
1540 	return err;
1541 }
1542 
1543 static int ionic_txrx_enable(struct ionic_lif *lif)
1544 {
1545 	int i, err;
1546 
1547 	for (i = 0; i < lif->nxqs; i++) {
1548 		err = ionic_qcq_enable(lif->txqcqs[i].qcq);
1549 		if (err)
1550 			goto err_out;
1551 
1552 		ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
1553 		err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
1554 		if (err) {
1555 			ionic_qcq_disable(lif->txqcqs[i].qcq);
1556 			goto err_out;
1557 		}
1558 	}
1559 
1560 	return 0;
1561 
1562 err_out:
1563 	while (i--) {
1564 		ionic_qcq_disable(lif->rxqcqs[i].qcq);
1565 		ionic_qcq_disable(lif->txqcqs[i].qcq);
1566 	}
1567 
1568 	return err;
1569 }
1570 
1571 int ionic_open(struct net_device *netdev)
1572 {
1573 	struct ionic_lif *lif = netdev_priv(netdev);
1574 	int err;
1575 
1576 	netif_carrier_off(netdev);
1577 
1578 	err = ionic_txrx_alloc(lif);
1579 	if (err)
1580 		return err;
1581 
1582 	err = ionic_txrx_init(lif);
1583 	if (err)
1584 		goto err_txrx_free;
1585 
1586 	err = ionic_txrx_enable(lif);
1587 	if (err)
1588 		goto err_txrx_deinit;
1589 
1590 	netif_set_real_num_tx_queues(netdev, lif->nxqs);
1591 	netif_set_real_num_rx_queues(netdev, lif->nxqs);
1592 
1593 	set_bit(IONIC_LIF_F_UP, lif->state);
1594 
1595 	ionic_link_status_check_request(lif);
1596 	if (netif_carrier_ok(netdev))
1597 		netif_tx_wake_all_queues(netdev);
1598 
1599 	return 0;
1600 
1601 err_txrx_deinit:
1602 	ionic_txrx_deinit(lif);
1603 err_txrx_free:
1604 	ionic_txrx_free(lif);
1605 	return err;
1606 }
1607 
1608 int ionic_stop(struct net_device *netdev)
1609 {
1610 	struct ionic_lif *lif = netdev_priv(netdev);
1611 	int err = 0;
1612 
1613 	if (!test_bit(IONIC_LIF_F_UP, lif->state)) {
1614 		dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n",
1615 			__func__, lif->name);
1616 		return 0;
1617 	}
1618 	dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name);
1619 	clear_bit(IONIC_LIF_F_UP, lif->state);
1620 
1621 	/* carrier off before disabling queues to avoid watchdog timeout */
1622 	netif_carrier_off(netdev);
1623 	netif_tx_stop_all_queues(netdev);
1624 	netif_tx_disable(netdev);
1625 
1626 	ionic_txrx_disable(lif);
1627 	ionic_lif_quiesce(lif);
1628 	ionic_txrx_deinit(lif);
1629 	ionic_txrx_free(lif);
1630 
1631 	return err;
1632 }
1633 
1634 static int ionic_get_vf_config(struct net_device *netdev,
1635 			       int vf, struct ifla_vf_info *ivf)
1636 {
1637 	struct ionic_lif *lif = netdev_priv(netdev);
1638 	struct ionic *ionic = lif->ionic;
1639 	int ret = 0;
1640 
1641 	down_read(&ionic->vf_op_lock);
1642 
1643 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1644 		ret = -EINVAL;
1645 	} else {
1646 		ivf->vf           = vf;
1647 		ivf->vlan         = ionic->vfs[vf].vlanid;
1648 		ivf->qos	  = 0;
1649 		ivf->spoofchk     = ionic->vfs[vf].spoofchk;
1650 		ivf->linkstate    = ionic->vfs[vf].linkstate;
1651 		ivf->max_tx_rate  = ionic->vfs[vf].maxrate;
1652 		ivf->trusted      = ionic->vfs[vf].trusted;
1653 		ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1654 	}
1655 
1656 	up_read(&ionic->vf_op_lock);
1657 	return ret;
1658 }
1659 
1660 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1661 			      struct ifla_vf_stats *vf_stats)
1662 {
1663 	struct ionic_lif *lif = netdev_priv(netdev);
1664 	struct ionic *ionic = lif->ionic;
1665 	struct ionic_lif_stats *vs;
1666 	int ret = 0;
1667 
1668 	down_read(&ionic->vf_op_lock);
1669 
1670 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1671 		ret = -EINVAL;
1672 	} else {
1673 		memset(vf_stats, 0, sizeof(*vf_stats));
1674 		vs = &ionic->vfs[vf].stats;
1675 
1676 		vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1677 		vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1678 		vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
1679 		vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
1680 		vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
1681 		vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
1682 		vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1683 				       le64_to_cpu(vs->rx_mcast_drop_packets) +
1684 				       le64_to_cpu(vs->rx_bcast_drop_packets);
1685 		vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1686 				       le64_to_cpu(vs->tx_mcast_drop_packets) +
1687 				       le64_to_cpu(vs->tx_bcast_drop_packets);
1688 	}
1689 
1690 	up_read(&ionic->vf_op_lock);
1691 	return ret;
1692 }
1693 
1694 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1695 {
1696 	struct ionic_lif *lif = netdev_priv(netdev);
1697 	struct ionic *ionic = lif->ionic;
1698 	int ret;
1699 
1700 	if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1701 		return -EINVAL;
1702 
1703 	down_read(&ionic->vf_op_lock);
1704 
1705 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1706 		ret = -EINVAL;
1707 	} else {
1708 		ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1709 		if (!ret)
1710 			ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1711 	}
1712 
1713 	up_read(&ionic->vf_op_lock);
1714 	return ret;
1715 }
1716 
1717 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1718 			     u8 qos, __be16 proto)
1719 {
1720 	struct ionic_lif *lif = netdev_priv(netdev);
1721 	struct ionic *ionic = lif->ionic;
1722 	int ret;
1723 
1724 	/* until someday when we support qos */
1725 	if (qos)
1726 		return -EINVAL;
1727 
1728 	if (vlan > 4095)
1729 		return -EINVAL;
1730 
1731 	if (proto != htons(ETH_P_8021Q))
1732 		return -EPROTONOSUPPORT;
1733 
1734 	down_read(&ionic->vf_op_lock);
1735 
1736 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1737 		ret = -EINVAL;
1738 	} else {
1739 		ret = ionic_set_vf_config(ionic, vf,
1740 					  IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
1741 		if (!ret)
1742 			ionic->vfs[vf].vlanid = vlan;
1743 	}
1744 
1745 	up_read(&ionic->vf_op_lock);
1746 	return ret;
1747 }
1748 
1749 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
1750 			     int tx_min, int tx_max)
1751 {
1752 	struct ionic_lif *lif = netdev_priv(netdev);
1753 	struct ionic *ionic = lif->ionic;
1754 	int ret;
1755 
1756 	/* setting the min just seems silly */
1757 	if (tx_min)
1758 		return -EINVAL;
1759 
1760 	down_write(&ionic->vf_op_lock);
1761 
1762 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1763 		ret = -EINVAL;
1764 	} else {
1765 		ret = ionic_set_vf_config(ionic, vf,
1766 					  IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
1767 		if (!ret)
1768 			lif->ionic->vfs[vf].maxrate = tx_max;
1769 	}
1770 
1771 	up_write(&ionic->vf_op_lock);
1772 	return ret;
1773 }
1774 
1775 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
1776 {
1777 	struct ionic_lif *lif = netdev_priv(netdev);
1778 	struct ionic *ionic = lif->ionic;
1779 	u8 data = set;  /* convert to u8 for config */
1780 	int ret;
1781 
1782 	down_write(&ionic->vf_op_lock);
1783 
1784 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1785 		ret = -EINVAL;
1786 	} else {
1787 		ret = ionic_set_vf_config(ionic, vf,
1788 					  IONIC_VF_ATTR_SPOOFCHK, &data);
1789 		if (!ret)
1790 			ionic->vfs[vf].spoofchk = data;
1791 	}
1792 
1793 	up_write(&ionic->vf_op_lock);
1794 	return ret;
1795 }
1796 
1797 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
1798 {
1799 	struct ionic_lif *lif = netdev_priv(netdev);
1800 	struct ionic *ionic = lif->ionic;
1801 	u8 data = set;  /* convert to u8 for config */
1802 	int ret;
1803 
1804 	down_write(&ionic->vf_op_lock);
1805 
1806 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1807 		ret = -EINVAL;
1808 	} else {
1809 		ret = ionic_set_vf_config(ionic, vf,
1810 					  IONIC_VF_ATTR_TRUST, &data);
1811 		if (!ret)
1812 			ionic->vfs[vf].trusted = data;
1813 	}
1814 
1815 	up_write(&ionic->vf_op_lock);
1816 	return ret;
1817 }
1818 
1819 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
1820 {
1821 	struct ionic_lif *lif = netdev_priv(netdev);
1822 	struct ionic *ionic = lif->ionic;
1823 	u8 data;
1824 	int ret;
1825 
1826 	switch (set) {
1827 	case IFLA_VF_LINK_STATE_ENABLE:
1828 		data = IONIC_VF_LINK_STATUS_UP;
1829 		break;
1830 	case IFLA_VF_LINK_STATE_DISABLE:
1831 		data = IONIC_VF_LINK_STATUS_DOWN;
1832 		break;
1833 	case IFLA_VF_LINK_STATE_AUTO:
1834 		data = IONIC_VF_LINK_STATUS_AUTO;
1835 		break;
1836 	default:
1837 		return -EINVAL;
1838 	}
1839 
1840 	down_write(&ionic->vf_op_lock);
1841 
1842 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1843 		ret = -EINVAL;
1844 	} else {
1845 		ret = ionic_set_vf_config(ionic, vf,
1846 					  IONIC_VF_ATTR_LINKSTATE, &data);
1847 		if (!ret)
1848 			ionic->vfs[vf].linkstate = set;
1849 	}
1850 
1851 	up_write(&ionic->vf_op_lock);
1852 	return ret;
1853 }
1854 
1855 static const struct net_device_ops ionic_netdev_ops = {
1856 	.ndo_open               = ionic_open,
1857 	.ndo_stop               = ionic_stop,
1858 	.ndo_start_xmit		= ionic_start_xmit,
1859 	.ndo_get_stats64	= ionic_get_stats64,
1860 	.ndo_set_rx_mode	= ionic_set_rx_mode,
1861 	.ndo_set_features	= ionic_set_features,
1862 	.ndo_set_mac_address	= ionic_set_mac_address,
1863 	.ndo_validate_addr	= eth_validate_addr,
1864 	.ndo_tx_timeout         = ionic_tx_timeout,
1865 	.ndo_change_mtu         = ionic_change_mtu,
1866 	.ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
1867 	.ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
1868 	.ndo_set_vf_vlan	= ionic_set_vf_vlan,
1869 	.ndo_set_vf_trust	= ionic_set_vf_trust,
1870 	.ndo_set_vf_mac		= ionic_set_vf_mac,
1871 	.ndo_set_vf_rate	= ionic_set_vf_rate,
1872 	.ndo_set_vf_spoofchk	= ionic_set_vf_spoofchk,
1873 	.ndo_get_vf_config	= ionic_get_vf_config,
1874 	.ndo_set_vf_link_state	= ionic_set_vf_link_state,
1875 	.ndo_get_vf_stats       = ionic_get_vf_stats,
1876 };
1877 
1878 int ionic_reset_queues(struct ionic_lif *lif)
1879 {
1880 	bool running;
1881 	int err = 0;
1882 
1883 	/* Put off the next watchdog timeout */
1884 	netif_trans_update(lif->netdev);
1885 
1886 	err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
1887 	if (err)
1888 		return err;
1889 
1890 	running = netif_running(lif->netdev);
1891 	if (running)
1892 		err = ionic_stop(lif->netdev);
1893 	if (!err && running)
1894 		ionic_open(lif->netdev);
1895 
1896 	clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
1897 
1898 	return err;
1899 }
1900 
1901 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
1902 {
1903 	struct device *dev = ionic->dev;
1904 	struct net_device *netdev;
1905 	struct ionic_lif *lif;
1906 	int tbl_sz;
1907 	int err;
1908 
1909 	netdev = alloc_etherdev_mqs(sizeof(*lif),
1910 				    ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
1911 	if (!netdev) {
1912 		dev_err(dev, "Cannot allocate netdev, aborting\n");
1913 		return ERR_PTR(-ENOMEM);
1914 	}
1915 
1916 	SET_NETDEV_DEV(netdev, dev);
1917 
1918 	lif = netdev_priv(netdev);
1919 	lif->netdev = netdev;
1920 	ionic->master_lif = lif;
1921 	netdev->netdev_ops = &ionic_netdev_ops;
1922 	ionic_ethtool_set_ops(netdev);
1923 
1924 	netdev->watchdog_timeo = 2 * HZ;
1925 	netdev->min_mtu = IONIC_MIN_MTU;
1926 	netdev->max_mtu = IONIC_MAX_MTU;
1927 
1928 	lif->neqs = ionic->neqs_per_lif;
1929 	lif->nxqs = ionic->ntxqs_per_lif;
1930 
1931 	lif->ionic = ionic;
1932 	lif->index = index;
1933 	lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
1934 	lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
1935 
1936 	/* Convert the default coalesce value to actual hw resolution */
1937 	lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
1938 	lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
1939 						    lif->rx_coalesce_usecs);
1940 
1941 	snprintf(lif->name, sizeof(lif->name), "lif%u", index);
1942 
1943 	spin_lock_init(&lif->adminq_lock);
1944 
1945 	spin_lock_init(&lif->deferred.lock);
1946 	INIT_LIST_HEAD(&lif->deferred.list);
1947 	INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
1948 
1949 	/* allocate lif info */
1950 	lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
1951 	lif->info = dma_alloc_coherent(dev, lif->info_sz,
1952 				       &lif->info_pa, GFP_KERNEL);
1953 	if (!lif->info) {
1954 		dev_err(dev, "Failed to allocate lif info, aborting\n");
1955 		err = -ENOMEM;
1956 		goto err_out_free_netdev;
1957 	}
1958 
1959 	/* allocate queues */
1960 	err = ionic_qcqs_alloc(lif);
1961 	if (err)
1962 		goto err_out_free_lif_info;
1963 
1964 	/* allocate rss indirection table */
1965 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1966 	lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
1967 	lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
1968 					      &lif->rss_ind_tbl_pa,
1969 					      GFP_KERNEL);
1970 
1971 	if (!lif->rss_ind_tbl) {
1972 		err = -ENOMEM;
1973 		dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
1974 		goto err_out_free_qcqs;
1975 	}
1976 	netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
1977 
1978 	list_add_tail(&lif->list, &ionic->lifs);
1979 
1980 	return lif;
1981 
1982 err_out_free_qcqs:
1983 	ionic_qcqs_free(lif);
1984 err_out_free_lif_info:
1985 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
1986 	lif->info = NULL;
1987 	lif->info_pa = 0;
1988 err_out_free_netdev:
1989 	free_netdev(lif->netdev);
1990 	lif = NULL;
1991 
1992 	return ERR_PTR(err);
1993 }
1994 
1995 int ionic_lifs_alloc(struct ionic *ionic)
1996 {
1997 	struct ionic_lif *lif;
1998 
1999 	INIT_LIST_HEAD(&ionic->lifs);
2000 
2001 	/* only build the first lif, others are for later features */
2002 	set_bit(0, ionic->lifbits);
2003 	lif = ionic_lif_alloc(ionic, 0);
2004 
2005 	return PTR_ERR_OR_ZERO(lif);
2006 }
2007 
2008 static void ionic_lif_reset(struct ionic_lif *lif)
2009 {
2010 	struct ionic_dev *idev = &lif->ionic->idev;
2011 
2012 	mutex_lock(&lif->ionic->dev_cmd_lock);
2013 	ionic_dev_cmd_lif_reset(idev, lif->index);
2014 	ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2015 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2016 }
2017 
2018 static void ionic_lif_free(struct ionic_lif *lif)
2019 {
2020 	struct device *dev = lif->ionic->dev;
2021 
2022 	/* free rss indirection table */
2023 	dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2024 			  lif->rss_ind_tbl_pa);
2025 	lif->rss_ind_tbl = NULL;
2026 	lif->rss_ind_tbl_pa = 0;
2027 
2028 	/* free queues */
2029 	ionic_qcqs_free(lif);
2030 	ionic_lif_reset(lif);
2031 
2032 	/* free lif info */
2033 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2034 	lif->info = NULL;
2035 	lif->info_pa = 0;
2036 
2037 	/* unmap doorbell page */
2038 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2039 	lif->kern_dbpage = NULL;
2040 	kfree(lif->dbid_inuse);
2041 	lif->dbid_inuse = NULL;
2042 
2043 	/* free netdev & lif */
2044 	ionic_debugfs_del_lif(lif);
2045 	list_del(&lif->list);
2046 	free_netdev(lif->netdev);
2047 }
2048 
2049 void ionic_lifs_free(struct ionic *ionic)
2050 {
2051 	struct list_head *cur, *tmp;
2052 	struct ionic_lif *lif;
2053 
2054 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2055 		lif = list_entry(cur, struct ionic_lif, list);
2056 
2057 		ionic_lif_free(lif);
2058 	}
2059 }
2060 
2061 static void ionic_lif_deinit(struct ionic_lif *lif)
2062 {
2063 	if (!test_bit(IONIC_LIF_F_INITED, lif->state))
2064 		return;
2065 
2066 	clear_bit(IONIC_LIF_F_INITED, lif->state);
2067 
2068 	ionic_rx_filters_deinit(lif);
2069 	ionic_lif_rss_deinit(lif);
2070 
2071 	napi_disable(&lif->adminqcq->napi);
2072 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2073 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2074 
2075 	ionic_lif_reset(lif);
2076 }
2077 
2078 void ionic_lifs_deinit(struct ionic *ionic)
2079 {
2080 	struct list_head *cur, *tmp;
2081 	struct ionic_lif *lif;
2082 
2083 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2084 		lif = list_entry(cur, struct ionic_lif, list);
2085 		ionic_lif_deinit(lif);
2086 	}
2087 }
2088 
2089 static int ionic_lif_adminq_init(struct ionic_lif *lif)
2090 {
2091 	struct device *dev = lif->ionic->dev;
2092 	struct ionic_q_init_comp comp;
2093 	struct ionic_dev *idev;
2094 	struct ionic_qcq *qcq;
2095 	struct ionic_queue *q;
2096 	int err;
2097 
2098 	idev = &lif->ionic->idev;
2099 	qcq = lif->adminqcq;
2100 	q = &qcq->q;
2101 
2102 	mutex_lock(&lif->ionic->dev_cmd_lock);
2103 	ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2104 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2105 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2106 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2107 	if (err) {
2108 		netdev_err(lif->netdev, "adminq init failed %d\n", err);
2109 		return err;
2110 	}
2111 
2112 	q->hw_type = comp.hw_type;
2113 	q->hw_index = le32_to_cpu(comp.hw_index);
2114 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2115 
2116 	dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2117 	dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2118 
2119 	netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2120 		       NAPI_POLL_WEIGHT);
2121 
2122 	err = ionic_request_irq(lif, qcq);
2123 	if (err) {
2124 		netdev_warn(lif->netdev, "adminq irq request failed %d\n", err);
2125 		netif_napi_del(&qcq->napi);
2126 		return err;
2127 	}
2128 
2129 	napi_enable(&qcq->napi);
2130 
2131 	if (qcq->flags & IONIC_QCQ_F_INTR)
2132 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2133 				IONIC_INTR_MASK_CLEAR);
2134 
2135 	qcq->flags |= IONIC_QCQ_F_INITED;
2136 
2137 	ionic_debugfs_add_qcq(lif, qcq);
2138 
2139 	return 0;
2140 }
2141 
2142 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2143 {
2144 	struct ionic_qcq *qcq = lif->notifyqcq;
2145 	struct device *dev = lif->ionic->dev;
2146 	struct ionic_queue *q = &qcq->q;
2147 	int err;
2148 
2149 	struct ionic_admin_ctx ctx = {
2150 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2151 		.cmd.q_init = {
2152 			.opcode = IONIC_CMD_Q_INIT,
2153 			.lif_index = cpu_to_le16(lif->index),
2154 			.type = q->type,
2155 			.index = cpu_to_le32(q->index),
2156 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2157 					     IONIC_QINIT_F_ENA),
2158 			.intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2159 			.pid = cpu_to_le16(q->pid),
2160 			.ring_size = ilog2(q->num_descs),
2161 			.ring_base = cpu_to_le64(q->base_pa),
2162 		}
2163 	};
2164 
2165 	dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2166 	dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2167 	dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2168 	dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2169 
2170 	err = ionic_adminq_post_wait(lif, &ctx);
2171 	if (err)
2172 		return err;
2173 
2174 	q->hw_type = ctx.comp.q_init.hw_type;
2175 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2176 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2177 
2178 	dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2179 	dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2180 
2181 	/* preset the callback info */
2182 	q->info[0].cb_arg = lif;
2183 
2184 	qcq->flags |= IONIC_QCQ_F_INITED;
2185 
2186 	ionic_debugfs_add_qcq(lif, qcq);
2187 
2188 	return 0;
2189 }
2190 
2191 static int ionic_station_set(struct ionic_lif *lif)
2192 {
2193 	struct net_device *netdev = lif->netdev;
2194 	struct ionic_admin_ctx ctx = {
2195 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2196 		.cmd.lif_getattr = {
2197 			.opcode = IONIC_CMD_LIF_GETATTR,
2198 			.index = cpu_to_le16(lif->index),
2199 			.attr = IONIC_LIF_ATTR_MAC,
2200 		},
2201 	};
2202 	struct sockaddr addr;
2203 	int err;
2204 
2205 	err = ionic_adminq_post_wait(lif, &ctx);
2206 	if (err)
2207 		return err;
2208 
2209 	if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2210 		return 0;
2211 
2212 	memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2213 	addr.sa_family = AF_INET;
2214 	err = eth_prepare_mac_addr_change(netdev, &addr);
2215 	if (err) {
2216 		netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM\n",
2217 			    addr.sa_data);
2218 		return 0;
2219 	}
2220 
2221 	netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
2222 		   netdev->dev_addr);
2223 	ionic_lif_addr(lif, netdev->dev_addr, false);
2224 
2225 	eth_commit_mac_addr_change(netdev, &addr);
2226 	netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2227 		   netdev->dev_addr);
2228 	ionic_lif_addr(lif, netdev->dev_addr, true);
2229 
2230 	return 0;
2231 }
2232 
2233 static int ionic_lif_init(struct ionic_lif *lif)
2234 {
2235 	struct ionic_dev *idev = &lif->ionic->idev;
2236 	struct device *dev = lif->ionic->dev;
2237 	struct ionic_lif_init_comp comp;
2238 	int dbpage_num;
2239 	int err;
2240 
2241 	ionic_debugfs_add_lif(lif);
2242 
2243 	mutex_lock(&lif->ionic->dev_cmd_lock);
2244 	ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2245 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2246 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2247 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2248 	if (err)
2249 		return err;
2250 
2251 	lif->hw_index = le16_to_cpu(comp.hw_index);
2252 
2253 	/* now that we have the hw_index we can figure out our doorbell page */
2254 	lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2255 	if (!lif->dbid_count) {
2256 		dev_err(dev, "No doorbell pages, aborting\n");
2257 		return -EINVAL;
2258 	}
2259 
2260 	lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2261 	if (!lif->dbid_inuse) {
2262 		dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2263 		return -ENOMEM;
2264 	}
2265 
2266 	/* first doorbell id reserved for kernel (dbid aka pid == zero) */
2267 	set_bit(0, lif->dbid_inuse);
2268 	lif->kern_pid = 0;
2269 
2270 	dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2271 	lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2272 	if (!lif->kern_dbpage) {
2273 		dev_err(dev, "Cannot map dbpage, aborting\n");
2274 		err = -ENOMEM;
2275 		goto err_out_free_dbid;
2276 	}
2277 
2278 	err = ionic_lif_adminq_init(lif);
2279 	if (err)
2280 		goto err_out_adminq_deinit;
2281 
2282 	if (lif->ionic->nnqs_per_lif) {
2283 		err = ionic_lif_notifyq_init(lif);
2284 		if (err)
2285 			goto err_out_notifyq_deinit;
2286 	}
2287 
2288 	err = ionic_init_nic_features(lif);
2289 	if (err)
2290 		goto err_out_notifyq_deinit;
2291 
2292 	err = ionic_rx_filters_init(lif);
2293 	if (err)
2294 		goto err_out_notifyq_deinit;
2295 
2296 	err = ionic_station_set(lif);
2297 	if (err)
2298 		goto err_out_notifyq_deinit;
2299 
2300 	lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2301 
2302 	set_bit(IONIC_LIF_F_INITED, lif->state);
2303 
2304 	INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2305 
2306 	return 0;
2307 
2308 err_out_notifyq_deinit:
2309 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2310 err_out_adminq_deinit:
2311 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2312 	ionic_lif_reset(lif);
2313 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2314 	lif->kern_dbpage = NULL;
2315 err_out_free_dbid:
2316 	kfree(lif->dbid_inuse);
2317 	lif->dbid_inuse = NULL;
2318 
2319 	return err;
2320 }
2321 
2322 int ionic_lifs_init(struct ionic *ionic)
2323 {
2324 	struct list_head *cur, *tmp;
2325 	struct ionic_lif *lif;
2326 	int err;
2327 
2328 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2329 		lif = list_entry(cur, struct ionic_lif, list);
2330 		err = ionic_lif_init(lif);
2331 		if (err)
2332 			return err;
2333 	}
2334 
2335 	return 0;
2336 }
2337 
2338 static void ionic_lif_notify_work(struct work_struct *ws)
2339 {
2340 }
2341 
2342 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2343 {
2344 	struct ionic_admin_ctx ctx = {
2345 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2346 		.cmd.lif_setattr = {
2347 			.opcode = IONIC_CMD_LIF_SETATTR,
2348 			.index = cpu_to_le16(lif->index),
2349 			.attr = IONIC_LIF_ATTR_NAME,
2350 		},
2351 	};
2352 
2353 	strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2354 		sizeof(ctx.cmd.lif_setattr.name));
2355 
2356 	ionic_adminq_post_wait(lif, &ctx);
2357 }
2358 
2359 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2360 {
2361 	if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2362 		return NULL;
2363 
2364 	return netdev_priv(netdev);
2365 }
2366 
2367 static int ionic_lif_notify(struct notifier_block *nb,
2368 			    unsigned long event, void *info)
2369 {
2370 	struct net_device *ndev = netdev_notifier_info_to_dev(info);
2371 	struct ionic *ionic = container_of(nb, struct ionic, nb);
2372 	struct ionic_lif *lif = ionic_netdev_lif(ndev);
2373 
2374 	if (!lif || lif->ionic != ionic)
2375 		return NOTIFY_DONE;
2376 
2377 	switch (event) {
2378 	case NETDEV_CHANGENAME:
2379 		ionic_lif_set_netdev_info(lif);
2380 		break;
2381 	}
2382 
2383 	return NOTIFY_DONE;
2384 }
2385 
2386 int ionic_lifs_register(struct ionic *ionic)
2387 {
2388 	int err;
2389 
2390 	/* the netdev is not registered on the management device, it is
2391 	 * only used as a vehicle for napi operations on the adminq
2392 	 */
2393 	if (ionic->is_mgmt_nic)
2394 		return 0;
2395 
2396 	INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
2397 
2398 	ionic->nb.notifier_call = ionic_lif_notify;
2399 
2400 	err = register_netdevice_notifier(&ionic->nb);
2401 	if (err)
2402 		ionic->nb.notifier_call = NULL;
2403 
2404 	/* only register LIF0 for now */
2405 	err = register_netdev(ionic->master_lif->netdev);
2406 	if (err) {
2407 		dev_err(ionic->dev, "Cannot register net device, aborting\n");
2408 		return err;
2409 	}
2410 
2411 	ionic_link_status_check_request(ionic->master_lif);
2412 	ionic->master_lif->registered = true;
2413 
2414 	return 0;
2415 }
2416 
2417 void ionic_lifs_unregister(struct ionic *ionic)
2418 {
2419 	if (ionic->nb.notifier_call) {
2420 		unregister_netdevice_notifier(&ionic->nb);
2421 		cancel_work_sync(&ionic->nb_work);
2422 		ionic->nb.notifier_call = NULL;
2423 	}
2424 
2425 	/* There is only one lif ever registered in the
2426 	 * current model, so don't bother searching the
2427 	 * ionic->lif for candidates to unregister
2428 	 */
2429 	if (!ionic->master_lif)
2430 		return;
2431 
2432 	cancel_work_sync(&ionic->master_lif->deferred.work);
2433 	cancel_work_sync(&ionic->master_lif->tx_timeout_work);
2434 	if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
2435 		unregister_netdev(ionic->master_lif->netdev);
2436 }
2437 
2438 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2439 		       union ionic_lif_identity *lid)
2440 {
2441 	struct ionic_dev *idev = &ionic->idev;
2442 	size_t sz;
2443 	int err;
2444 
2445 	sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2446 
2447 	mutex_lock(&ionic->dev_cmd_lock);
2448 	ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2449 	err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2450 	memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2451 	mutex_unlock(&ionic->dev_cmd_lock);
2452 	if (err)
2453 		return (err);
2454 
2455 	dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2456 		le64_to_cpu(lid->capabilities));
2457 
2458 	dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2459 		le32_to_cpu(lid->eth.max_ucast_filters));
2460 	dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2461 		le32_to_cpu(lid->eth.max_mcast_filters));
2462 	dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2463 		le64_to_cpu(lid->eth.config.features));
2464 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2465 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2466 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2467 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2468 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2469 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2470 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2471 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2472 	dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2473 	dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2474 	dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2475 		le32_to_cpu(lid->eth.config.mtu));
2476 
2477 	return 0;
2478 }
2479 
2480 int ionic_lifs_size(struct ionic *ionic)
2481 {
2482 	struct ionic_identity *ident = &ionic->ident;
2483 	unsigned int nintrs, dev_nintrs;
2484 	union ionic_lif_config *lc;
2485 	unsigned int ntxqs_per_lif;
2486 	unsigned int nrxqs_per_lif;
2487 	unsigned int neqs_per_lif;
2488 	unsigned int nnqs_per_lif;
2489 	unsigned int nxqs, neqs;
2490 	unsigned int min_intrs;
2491 	int err;
2492 
2493 	lc = &ident->lif.eth.config;
2494 	dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2495 	neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2496 	nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2497 	ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2498 	nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2499 
2500 	nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2501 	nxqs = min(nxqs, num_online_cpus());
2502 	neqs = min(neqs_per_lif, num_online_cpus());
2503 
2504 try_again:
2505 	/* interrupt usage:
2506 	 *    1 for master lif adminq/notifyq
2507 	 *    1 for each CPU for master lif TxRx queue pairs
2508 	 *    whatever's left is for RDMA queues
2509 	 */
2510 	nintrs = 1 + nxqs + neqs;
2511 	min_intrs = 2;  /* adminq + 1 TxRx queue pair */
2512 
2513 	if (nintrs > dev_nintrs)
2514 		goto try_fewer;
2515 
2516 	err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2517 	if (err < 0 && err != -ENOSPC) {
2518 		dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2519 		return err;
2520 	}
2521 	if (err == -ENOSPC)
2522 		goto try_fewer;
2523 
2524 	if (err != nintrs) {
2525 		ionic_bus_free_irq_vectors(ionic);
2526 		goto try_fewer;
2527 	}
2528 
2529 	ionic->nnqs_per_lif = nnqs_per_lif;
2530 	ionic->neqs_per_lif = neqs;
2531 	ionic->ntxqs_per_lif = nxqs;
2532 	ionic->nrxqs_per_lif = nxqs;
2533 	ionic->nintrs = nintrs;
2534 
2535 	ionic_debugfs_add_sizes(ionic);
2536 
2537 	return 0;
2538 
2539 try_fewer:
2540 	if (nnqs_per_lif > 1) {
2541 		nnqs_per_lif >>= 1;
2542 		goto try_again;
2543 	}
2544 	if (neqs > 1) {
2545 		neqs >>= 1;
2546 		goto try_again;
2547 	}
2548 	if (nxqs > 1) {
2549 		nxqs >>= 1;
2550 		goto try_again;
2551 	}
2552 	dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2553 	return -ENOSPC;
2554 }
2555