1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/cpumask.h>
12 
13 #include "ionic.h"
14 #include "ionic_bus.h"
15 #include "ionic_lif.h"
16 #include "ionic_txrx.h"
17 #include "ionic_ethtool.h"
18 #include "ionic_debugfs.h"
19 
20 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
21 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
22 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
23 static void ionic_link_status_check(struct ionic_lif *lif);
24 
25 static void ionic_lif_deferred_work(struct work_struct *work)
26 {
27 	struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
28 	struct ionic_deferred *def = &lif->deferred;
29 	struct ionic_deferred_work *w = NULL;
30 
31 	spin_lock_bh(&def->lock);
32 	if (!list_empty(&def->list)) {
33 		w = list_first_entry(&def->list,
34 				     struct ionic_deferred_work, list);
35 		list_del(&w->list);
36 	}
37 	spin_unlock_bh(&def->lock);
38 
39 	if (w) {
40 		switch (w->type) {
41 		case IONIC_DW_TYPE_RX_MODE:
42 			ionic_lif_rx_mode(lif, w->rx_mode);
43 			break;
44 		case IONIC_DW_TYPE_RX_ADDR_ADD:
45 			ionic_lif_addr_add(lif, w->addr);
46 			break;
47 		case IONIC_DW_TYPE_RX_ADDR_DEL:
48 			ionic_lif_addr_del(lif, w->addr);
49 			break;
50 		case IONIC_DW_TYPE_LINK_STATUS:
51 			ionic_link_status_check(lif);
52 			break;
53 		default:
54 			break;
55 		}
56 		kfree(w);
57 		schedule_work(&def->work);
58 	}
59 }
60 
61 static void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
62 				       struct ionic_deferred_work *work)
63 {
64 	spin_lock_bh(&def->lock);
65 	list_add_tail(&work->list, &def->list);
66 	spin_unlock_bh(&def->lock);
67 	schedule_work(&def->work);
68 }
69 
70 static void ionic_link_status_check(struct ionic_lif *lif)
71 {
72 	struct net_device *netdev = lif->netdev;
73 	u16 link_status;
74 	bool link_up;
75 
76 	link_status = le16_to_cpu(lif->info->status.link_status);
77 	link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
78 
79 	/* filter out the no-change cases */
80 	if (link_up == netif_carrier_ok(netdev))
81 		goto link_out;
82 
83 	if (link_up) {
84 		netdev_info(netdev, "Link up - %d Gbps\n",
85 			    le32_to_cpu(lif->info->status.link_speed) / 1000);
86 
87 		if (test_bit(IONIC_LIF_UP, lif->state)) {
88 			netif_tx_wake_all_queues(lif->netdev);
89 			netif_carrier_on(netdev);
90 		}
91 	} else {
92 		netdev_info(netdev, "Link down\n");
93 
94 		/* carrier off first to avoid watchdog timeout */
95 		netif_carrier_off(netdev);
96 		if (test_bit(IONIC_LIF_UP, lif->state))
97 			netif_tx_stop_all_queues(netdev);
98 	}
99 
100 link_out:
101 	clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state);
102 }
103 
104 static void ionic_link_status_check_request(struct ionic_lif *lif)
105 {
106 	struct ionic_deferred_work *work;
107 
108 	/* we only need one request outstanding at a time */
109 	if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state))
110 		return;
111 
112 	if (in_interrupt()) {
113 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
114 		if (!work)
115 			return;
116 
117 		work->type = IONIC_DW_TYPE_LINK_STATUS;
118 		ionic_lif_deferred_enqueue(&lif->deferred, work);
119 	} else {
120 		ionic_link_status_check(lif);
121 	}
122 }
123 
124 static irqreturn_t ionic_isr(int irq, void *data)
125 {
126 	struct napi_struct *napi = data;
127 
128 	napi_schedule_irqoff(napi);
129 
130 	return IRQ_HANDLED;
131 }
132 
133 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
134 {
135 	struct ionic_intr_info *intr = &qcq->intr;
136 	struct device *dev = lif->ionic->dev;
137 	struct ionic_queue *q = &qcq->q;
138 	const char *name;
139 
140 	if (lif->registered)
141 		name = lif->netdev->name;
142 	else
143 		name = dev_name(dev);
144 
145 	snprintf(intr->name, sizeof(intr->name),
146 		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
147 
148 	return devm_request_irq(dev, intr->vector, ionic_isr,
149 				0, intr->name, &qcq->napi);
150 }
151 
152 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
153 {
154 	struct ionic *ionic = lif->ionic;
155 	int index;
156 
157 	index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
158 	if (index == ionic->nintrs) {
159 		netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
160 			    __func__, index, ionic->nintrs);
161 		return -ENOSPC;
162 	}
163 
164 	set_bit(index, ionic->intrs);
165 	ionic_intr_init(&ionic->idev, intr, index);
166 
167 	return 0;
168 }
169 
170 static void ionic_intr_free(struct ionic_lif *lif, int index)
171 {
172 	if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
173 		clear_bit(index, lif->ionic->intrs);
174 }
175 
176 static int ionic_qcq_enable(struct ionic_qcq *qcq)
177 {
178 	struct ionic_queue *q = &qcq->q;
179 	struct ionic_lif *lif = q->lif;
180 	struct ionic_dev *idev;
181 	struct device *dev;
182 
183 	struct ionic_admin_ctx ctx = {
184 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
185 		.cmd.q_control = {
186 			.opcode = IONIC_CMD_Q_CONTROL,
187 			.lif_index = cpu_to_le16(lif->index),
188 			.type = q->type,
189 			.index = cpu_to_le32(q->index),
190 			.oper = IONIC_Q_ENABLE,
191 		},
192 	};
193 
194 	idev = &lif->ionic->idev;
195 	dev = lif->ionic->dev;
196 
197 	dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
198 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
199 
200 	if (qcq->flags & IONIC_QCQ_F_INTR) {
201 		irq_set_affinity_hint(qcq->intr.vector,
202 				      &qcq->intr.affinity_mask);
203 		napi_enable(&qcq->napi);
204 		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
205 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
206 				IONIC_INTR_MASK_CLEAR);
207 	}
208 
209 	return ionic_adminq_post_wait(lif, &ctx);
210 }
211 
212 static int ionic_qcq_disable(struct ionic_qcq *qcq)
213 {
214 	struct ionic_queue *q = &qcq->q;
215 	struct ionic_lif *lif = q->lif;
216 	struct ionic_dev *idev;
217 	struct device *dev;
218 
219 	struct ionic_admin_ctx ctx = {
220 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
221 		.cmd.q_control = {
222 			.opcode = IONIC_CMD_Q_CONTROL,
223 			.lif_index = cpu_to_le16(lif->index),
224 			.type = q->type,
225 			.index = cpu_to_le32(q->index),
226 			.oper = IONIC_Q_DISABLE,
227 		},
228 	};
229 
230 	idev = &lif->ionic->idev;
231 	dev = lif->ionic->dev;
232 
233 	dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
234 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
235 
236 	if (qcq->flags & IONIC_QCQ_F_INTR) {
237 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
238 				IONIC_INTR_MASK_SET);
239 		synchronize_irq(qcq->intr.vector);
240 		irq_set_affinity_hint(qcq->intr.vector, NULL);
241 		napi_disable(&qcq->napi);
242 	}
243 
244 	return ionic_adminq_post_wait(lif, &ctx);
245 }
246 
247 static void ionic_lif_quiesce(struct ionic_lif *lif)
248 {
249 	struct ionic_admin_ctx ctx = {
250 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
251 		.cmd.lif_setattr = {
252 			.opcode = IONIC_CMD_LIF_SETATTR,
253 			.attr = IONIC_LIF_ATTR_STATE,
254 			.index = lif->index,
255 			.state = IONIC_LIF_DISABLE
256 		},
257 	};
258 
259 	ionic_adminq_post_wait(lif, &ctx);
260 }
261 
262 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
263 {
264 	struct ionic_dev *idev = &lif->ionic->idev;
265 	struct device *dev = lif->ionic->dev;
266 
267 	if (!qcq)
268 		return;
269 
270 	ionic_debugfs_del_qcq(qcq);
271 
272 	if (!(qcq->flags & IONIC_QCQ_F_INITED))
273 		return;
274 
275 	if (qcq->flags & IONIC_QCQ_F_INTR) {
276 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
277 				IONIC_INTR_MASK_SET);
278 		devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
279 		netif_napi_del(&qcq->napi);
280 	}
281 
282 	qcq->flags &= ~IONIC_QCQ_F_INITED;
283 }
284 
285 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
286 {
287 	struct device *dev = lif->ionic->dev;
288 
289 	if (!qcq)
290 		return;
291 
292 	dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
293 	qcq->base = NULL;
294 	qcq->base_pa = 0;
295 
296 	if (qcq->flags & IONIC_QCQ_F_INTR)
297 		ionic_intr_free(lif, qcq->intr.index);
298 
299 	devm_kfree(dev, qcq->cq.info);
300 	qcq->cq.info = NULL;
301 	devm_kfree(dev, qcq->q.info);
302 	qcq->q.info = NULL;
303 	devm_kfree(dev, qcq);
304 }
305 
306 static void ionic_qcqs_free(struct ionic_lif *lif)
307 {
308 	struct device *dev = lif->ionic->dev;
309 	unsigned int i;
310 
311 	if (lif->notifyqcq) {
312 		ionic_qcq_free(lif, lif->notifyqcq);
313 		lif->notifyqcq = NULL;
314 	}
315 
316 	if (lif->adminqcq) {
317 		ionic_qcq_free(lif, lif->adminqcq);
318 		lif->adminqcq = NULL;
319 	}
320 
321 	for (i = 0; i < lif->nxqs; i++)
322 		if (lif->rxqcqs[i].stats)
323 			devm_kfree(dev, lif->rxqcqs[i].stats);
324 
325 	devm_kfree(dev, lif->rxqcqs);
326 	lif->rxqcqs = NULL;
327 
328 	for (i = 0; i < lif->nxqs; i++)
329 		if (lif->txqcqs[i].stats)
330 			devm_kfree(dev, lif->txqcqs[i].stats);
331 
332 	devm_kfree(dev, lif->txqcqs);
333 	lif->txqcqs = NULL;
334 }
335 
336 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
337 				      struct ionic_qcq *n_qcq)
338 {
339 	if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
340 		ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
341 		n_qcq->flags &= ~IONIC_QCQ_F_INTR;
342 	}
343 
344 	n_qcq->intr.vector = src_qcq->intr.vector;
345 	n_qcq->intr.index = src_qcq->intr.index;
346 }
347 
348 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
349 			   unsigned int index,
350 			   const char *name, unsigned int flags,
351 			   unsigned int num_descs, unsigned int desc_size,
352 			   unsigned int cq_desc_size,
353 			   unsigned int sg_desc_size,
354 			   unsigned int pid, struct ionic_qcq **qcq)
355 {
356 	struct ionic_dev *idev = &lif->ionic->idev;
357 	u32 q_size, cq_size, sg_size, total_size;
358 	struct device *dev = lif->ionic->dev;
359 	void *q_base, *cq_base, *sg_base;
360 	dma_addr_t cq_base_pa = 0;
361 	dma_addr_t sg_base_pa = 0;
362 	dma_addr_t q_base_pa = 0;
363 	struct ionic_qcq *new;
364 	int err;
365 
366 	*qcq = NULL;
367 
368 	q_size  = num_descs * desc_size;
369 	cq_size = num_descs * cq_desc_size;
370 	sg_size = num_descs * sg_desc_size;
371 
372 	total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
373 	/* Note: aligning q_size/cq_size is not enough due to cq_base
374 	 * address aligning as q_base could be not aligned to the page.
375 	 * Adding PAGE_SIZE.
376 	 */
377 	total_size += PAGE_SIZE;
378 	if (flags & IONIC_QCQ_F_SG) {
379 		total_size += ALIGN(sg_size, PAGE_SIZE);
380 		total_size += PAGE_SIZE;
381 	}
382 
383 	new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
384 	if (!new) {
385 		netdev_err(lif->netdev, "Cannot allocate queue structure\n");
386 		err = -ENOMEM;
387 		goto err_out;
388 	}
389 
390 	new->flags = flags;
391 
392 	new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
393 				   GFP_KERNEL);
394 	if (!new->q.info) {
395 		netdev_err(lif->netdev, "Cannot allocate queue info\n");
396 		err = -ENOMEM;
397 		goto err_out;
398 	}
399 
400 	new->q.type = type;
401 
402 	err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
403 			   desc_size, sg_desc_size, pid);
404 	if (err) {
405 		netdev_err(lif->netdev, "Cannot initialize queue\n");
406 		goto err_out;
407 	}
408 
409 	if (flags & IONIC_QCQ_F_INTR) {
410 		err = ionic_intr_alloc(lif, &new->intr);
411 		if (err) {
412 			netdev_warn(lif->netdev, "no intr for %s: %d\n",
413 				    name, err);
414 			goto err_out;
415 		}
416 
417 		err = ionic_bus_get_irq(lif->ionic, new->intr.index);
418 		if (err < 0) {
419 			netdev_warn(lif->netdev, "no vector for %s: %d\n",
420 				    name, err);
421 			goto err_out_free_intr;
422 		}
423 		new->intr.vector = err;
424 		ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
425 				       IONIC_INTR_MASK_SET);
426 
427 		new->intr.cpu = new->intr.index % num_online_cpus();
428 		if (cpu_online(new->intr.cpu))
429 			cpumask_set_cpu(new->intr.cpu,
430 					&new->intr.affinity_mask);
431 	} else {
432 		new->intr.index = INTR_INDEX_NOT_ASSIGNED;
433 	}
434 
435 	new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
436 				    GFP_KERNEL);
437 	if (!new->cq.info) {
438 		netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
439 		err = -ENOMEM;
440 		goto err_out_free_intr;
441 	}
442 
443 	err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
444 	if (err) {
445 		netdev_err(lif->netdev, "Cannot initialize completion queue\n");
446 		goto err_out_free_intr;
447 	}
448 
449 	new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
450 				       GFP_KERNEL);
451 	if (!new->base) {
452 		netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
453 		err = -ENOMEM;
454 		goto err_out_free_intr;
455 	}
456 
457 	new->total_size = total_size;
458 
459 	q_base = new->base;
460 	q_base_pa = new->base_pa;
461 
462 	cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
463 	cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
464 
465 	if (flags & IONIC_QCQ_F_SG) {
466 		sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
467 					PAGE_SIZE);
468 		sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
469 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
470 	}
471 
472 	ionic_q_map(&new->q, q_base, q_base_pa);
473 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
474 	ionic_cq_bind(&new->cq, &new->q);
475 
476 	*qcq = new;
477 
478 	return 0;
479 
480 err_out_free_intr:
481 	ionic_intr_free(lif, new->intr.index);
482 err_out:
483 	dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
484 	return err;
485 }
486 
487 static int ionic_qcqs_alloc(struct ionic_lif *lif)
488 {
489 	struct device *dev = lif->ionic->dev;
490 	unsigned int q_list_size;
491 	unsigned int flags;
492 	int err;
493 	int i;
494 
495 	flags = IONIC_QCQ_F_INTR;
496 	err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
497 			      IONIC_ADMINQ_LENGTH,
498 			      sizeof(struct ionic_admin_cmd),
499 			      sizeof(struct ionic_admin_comp),
500 			      0, lif->kern_pid, &lif->adminqcq);
501 	if (err)
502 		return err;
503 
504 	if (lif->ionic->nnqs_per_lif) {
505 		flags = IONIC_QCQ_F_NOTIFYQ;
506 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
507 				      flags, IONIC_NOTIFYQ_LENGTH,
508 				      sizeof(struct ionic_notifyq_cmd),
509 				      sizeof(union ionic_notifyq_comp),
510 				      0, lif->kern_pid, &lif->notifyqcq);
511 		if (err)
512 			goto err_out_free_adminqcq;
513 
514 		/* Let the notifyq ride on the adminq interrupt */
515 		ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
516 	}
517 
518 	q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
519 	err = -ENOMEM;
520 	lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
521 	if (!lif->txqcqs)
522 		goto err_out_free_notifyqcq;
523 	for (i = 0; i < lif->nxqs; i++) {
524 		lif->txqcqs[i].stats = devm_kzalloc(dev,
525 						    sizeof(struct ionic_q_stats),
526 						    GFP_KERNEL);
527 		if (!lif->txqcqs[i].stats)
528 			goto err_out_free_tx_stats;
529 	}
530 
531 	lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
532 	if (!lif->rxqcqs)
533 		goto err_out_free_tx_stats;
534 	for (i = 0; i < lif->nxqs; i++) {
535 		lif->rxqcqs[i].stats = devm_kzalloc(dev,
536 						    sizeof(struct ionic_q_stats),
537 						    GFP_KERNEL);
538 		if (!lif->rxqcqs[i].stats)
539 			goto err_out_free_rx_stats;
540 	}
541 
542 	return 0;
543 
544 err_out_free_rx_stats:
545 	for (i = 0; i < lif->nxqs; i++)
546 		if (lif->rxqcqs[i].stats)
547 			devm_kfree(dev, lif->rxqcqs[i].stats);
548 	devm_kfree(dev, lif->rxqcqs);
549 	lif->rxqcqs = NULL;
550 err_out_free_tx_stats:
551 	for (i = 0; i < lif->nxqs; i++)
552 		if (lif->txqcqs[i].stats)
553 			devm_kfree(dev, lif->txqcqs[i].stats);
554 	devm_kfree(dev, lif->txqcqs);
555 	lif->txqcqs = NULL;
556 err_out_free_notifyqcq:
557 	if (lif->notifyqcq) {
558 		ionic_qcq_free(lif, lif->notifyqcq);
559 		lif->notifyqcq = NULL;
560 	}
561 err_out_free_adminqcq:
562 	ionic_qcq_free(lif, lif->adminqcq);
563 	lif->adminqcq = NULL;
564 
565 	return err;
566 }
567 
568 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
569 {
570 	struct device *dev = lif->ionic->dev;
571 	struct ionic_queue *q = &qcq->q;
572 	struct ionic_cq *cq = &qcq->cq;
573 	struct ionic_admin_ctx ctx = {
574 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
575 		.cmd.q_init = {
576 			.opcode = IONIC_CMD_Q_INIT,
577 			.lif_index = cpu_to_le16(lif->index),
578 			.type = q->type,
579 			.index = cpu_to_le32(q->index),
580 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
581 					     IONIC_QINIT_F_SG),
582 			.intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
583 			.pid = cpu_to_le16(q->pid),
584 			.ring_size = ilog2(q->num_descs),
585 			.ring_base = cpu_to_le64(q->base_pa),
586 			.cq_ring_base = cpu_to_le64(cq->base_pa),
587 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
588 		},
589 	};
590 	int err;
591 
592 	dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
593 	dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
594 	dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
595 	dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
596 
597 	err = ionic_adminq_post_wait(lif, &ctx);
598 	if (err)
599 		return err;
600 
601 	q->hw_type = ctx.comp.q_init.hw_type;
602 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
603 	q->dbval = IONIC_DBELL_QID(q->hw_index);
604 
605 	dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
606 	dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
607 
608 	qcq->flags |= IONIC_QCQ_F_INITED;
609 
610 	ionic_debugfs_add_qcq(lif, qcq);
611 
612 	return 0;
613 }
614 
615 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
616 {
617 	struct device *dev = lif->ionic->dev;
618 	struct ionic_queue *q = &qcq->q;
619 	struct ionic_cq *cq = &qcq->cq;
620 	struct ionic_admin_ctx ctx = {
621 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
622 		.cmd.q_init = {
623 			.opcode = IONIC_CMD_Q_INIT,
624 			.lif_index = cpu_to_le16(lif->index),
625 			.type = q->type,
626 			.index = cpu_to_le32(q->index),
627 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
628 					     IONIC_QINIT_F_SG),
629 			.intr_index = cpu_to_le16(cq->bound_intr->index),
630 			.pid = cpu_to_le16(q->pid),
631 			.ring_size = ilog2(q->num_descs),
632 			.ring_base = cpu_to_le64(q->base_pa),
633 			.cq_ring_base = cpu_to_le64(cq->base_pa),
634 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
635 		},
636 	};
637 	int err;
638 
639 	dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
640 	dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
641 	dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
642 	dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
643 
644 	err = ionic_adminq_post_wait(lif, &ctx);
645 	if (err)
646 		return err;
647 
648 	q->hw_type = ctx.comp.q_init.hw_type;
649 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
650 	q->dbval = IONIC_DBELL_QID(q->hw_index);
651 
652 	dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
653 	dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
654 
655 	netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
656 		       NAPI_POLL_WEIGHT);
657 
658 	err = ionic_request_irq(lif, qcq);
659 	if (err) {
660 		netif_napi_del(&qcq->napi);
661 		return err;
662 	}
663 
664 	qcq->flags |= IONIC_QCQ_F_INITED;
665 
666 	ionic_debugfs_add_qcq(lif, qcq);
667 
668 	return 0;
669 }
670 
671 static bool ionic_notifyq_service(struct ionic_cq *cq,
672 				  struct ionic_cq_info *cq_info)
673 {
674 	union ionic_notifyq_comp *comp = cq_info->cq_desc;
675 	struct net_device *netdev;
676 	struct ionic_queue *q;
677 	struct ionic_lif *lif;
678 	u64 eid;
679 
680 	q = cq->bound_q;
681 	lif = q->info[0].cb_arg;
682 	netdev = lif->netdev;
683 	eid = le64_to_cpu(comp->event.eid);
684 
685 	/* Have we run out of new completions to process? */
686 	if (eid <= lif->last_eid)
687 		return false;
688 
689 	lif->last_eid = eid;
690 
691 	dev_dbg(lif->ionic->dev, "notifyq event:\n");
692 	dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
693 			 comp, sizeof(*comp), true);
694 
695 	switch (le16_to_cpu(comp->event.ecode)) {
696 	case IONIC_EVENT_LINK_CHANGE:
697 		ionic_link_status_check_request(lif);
698 		break;
699 	case IONIC_EVENT_RESET:
700 		netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n",
701 			    eid);
702 		netdev_info(netdev, "  reset_code=%d state=%d\n",
703 			    comp->reset.reset_code,
704 			    comp->reset.state);
705 		break;
706 	default:
707 		netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
708 			    comp->event.ecode, eid);
709 		break;
710 	}
711 
712 	return true;
713 }
714 
715 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
716 {
717 	struct ionic_dev *idev = &lif->ionic->idev;
718 	struct ionic_cq *cq = &lif->notifyqcq->cq;
719 	u32 work_done;
720 
721 	work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
722 				     NULL, NULL);
723 	if (work_done)
724 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
725 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
726 
727 	return work_done;
728 }
729 
730 static bool ionic_adminq_service(struct ionic_cq *cq,
731 				 struct ionic_cq_info *cq_info)
732 {
733 	struct ionic_admin_comp *comp = cq_info->cq_desc;
734 
735 	if (!color_match(comp->color, cq->done_color))
736 		return false;
737 
738 	ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
739 
740 	return true;
741 }
742 
743 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
744 {
745 	struct ionic_lif *lif = napi_to_cq(napi)->lif;
746 	int n_work = 0;
747 	int a_work = 0;
748 
749 	if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
750 		n_work = ionic_notifyq_clean(lif, budget);
751 	a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
752 
753 	return max(n_work, a_work);
754 }
755 
756 static void ionic_get_stats64(struct net_device *netdev,
757 			      struct rtnl_link_stats64 *ns)
758 {
759 	struct ionic_lif *lif = netdev_priv(netdev);
760 	struct ionic_lif_stats *ls;
761 
762 	memset(ns, 0, sizeof(*ns));
763 	ls = &lif->info->stats;
764 
765 	ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
766 			 le64_to_cpu(ls->rx_mcast_packets) +
767 			 le64_to_cpu(ls->rx_bcast_packets);
768 
769 	ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
770 			 le64_to_cpu(ls->tx_mcast_packets) +
771 			 le64_to_cpu(ls->tx_bcast_packets);
772 
773 	ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
774 		       le64_to_cpu(ls->rx_mcast_bytes) +
775 		       le64_to_cpu(ls->rx_bcast_bytes);
776 
777 	ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
778 		       le64_to_cpu(ls->tx_mcast_bytes) +
779 		       le64_to_cpu(ls->tx_bcast_bytes);
780 
781 	ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
782 			 le64_to_cpu(ls->rx_mcast_drop_packets) +
783 			 le64_to_cpu(ls->rx_bcast_drop_packets);
784 
785 	ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
786 			 le64_to_cpu(ls->tx_mcast_drop_packets) +
787 			 le64_to_cpu(ls->tx_bcast_drop_packets);
788 
789 	ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
790 
791 	ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
792 
793 	ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
794 			       le64_to_cpu(ls->rx_queue_disabled) +
795 			       le64_to_cpu(ls->rx_desc_fetch_error) +
796 			       le64_to_cpu(ls->rx_desc_data_error);
797 
798 	ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
799 				le64_to_cpu(ls->tx_queue_disabled) +
800 				le64_to_cpu(ls->tx_desc_fetch_error) +
801 				le64_to_cpu(ls->tx_desc_data_error);
802 
803 	ns->rx_errors = ns->rx_over_errors +
804 			ns->rx_missed_errors;
805 
806 	ns->tx_errors = ns->tx_aborted_errors;
807 }
808 
809 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
810 {
811 	struct ionic_admin_ctx ctx = {
812 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
813 		.cmd.rx_filter_add = {
814 			.opcode = IONIC_CMD_RX_FILTER_ADD,
815 			.lif_index = cpu_to_le16(lif->index),
816 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
817 		},
818 	};
819 	struct ionic_rx_filter *f;
820 	int err;
821 
822 	/* don't bother if we already have it */
823 	spin_lock_bh(&lif->rx_filters.lock);
824 	f = ionic_rx_filter_by_addr(lif, addr);
825 	spin_unlock_bh(&lif->rx_filters.lock);
826 	if (f)
827 		return 0;
828 
829 	netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
830 		   ctx.comp.rx_filter_add.filter_id);
831 
832 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
833 	err = ionic_adminq_post_wait(lif, &ctx);
834 	if (err)
835 		return err;
836 
837 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
838 }
839 
840 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
841 {
842 	struct ionic_admin_ctx ctx = {
843 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
844 		.cmd.rx_filter_del = {
845 			.opcode = IONIC_CMD_RX_FILTER_DEL,
846 			.lif_index = cpu_to_le16(lif->index),
847 		},
848 	};
849 	struct ionic_rx_filter *f;
850 	int err;
851 
852 	spin_lock_bh(&lif->rx_filters.lock);
853 	f = ionic_rx_filter_by_addr(lif, addr);
854 	if (!f) {
855 		spin_unlock_bh(&lif->rx_filters.lock);
856 		return -ENOENT;
857 	}
858 
859 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
860 	ionic_rx_filter_free(lif, f);
861 	spin_unlock_bh(&lif->rx_filters.lock);
862 
863 	err = ionic_adminq_post_wait(lif, &ctx);
864 	if (err)
865 		return err;
866 
867 	netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
868 		   ctx.cmd.rx_filter_del.filter_id);
869 
870 	return 0;
871 }
872 
873 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
874 {
875 	struct ionic *ionic = lif->ionic;
876 	struct ionic_deferred_work *work;
877 	unsigned int nmfilters;
878 	unsigned int nufilters;
879 
880 	if (add) {
881 		/* Do we have space for this filter?  We test the counters
882 		 * here before checking the need for deferral so that we
883 		 * can return an overflow error to the stack.
884 		 */
885 		nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
886 		nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
887 
888 		if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
889 			lif->nmcast++;
890 		else if (!is_multicast_ether_addr(addr) &&
891 			 lif->nucast < nufilters)
892 			lif->nucast++;
893 		else
894 			return -ENOSPC;
895 	} else {
896 		if (is_multicast_ether_addr(addr) && lif->nmcast)
897 			lif->nmcast--;
898 		else if (!is_multicast_ether_addr(addr) && lif->nucast)
899 			lif->nucast--;
900 	}
901 
902 	if (in_interrupt()) {
903 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
904 		if (!work) {
905 			netdev_err(lif->netdev, "%s OOM\n", __func__);
906 			return -ENOMEM;
907 		}
908 		work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
909 				   IONIC_DW_TYPE_RX_ADDR_DEL;
910 		memcpy(work->addr, addr, ETH_ALEN);
911 		netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
912 			   add ? "add" : "del", addr);
913 		ionic_lif_deferred_enqueue(&lif->deferred, work);
914 	} else {
915 		netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
916 			   add ? "add" : "del", addr);
917 		if (add)
918 			return ionic_lif_addr_add(lif, addr);
919 		else
920 			return ionic_lif_addr_del(lif, addr);
921 	}
922 
923 	return 0;
924 }
925 
926 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
927 {
928 	return ionic_lif_addr(netdev_priv(netdev), addr, true);
929 }
930 
931 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
932 {
933 	return ionic_lif_addr(netdev_priv(netdev), addr, false);
934 }
935 
936 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
937 {
938 	struct ionic_admin_ctx ctx = {
939 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
940 		.cmd.rx_mode_set = {
941 			.opcode = IONIC_CMD_RX_MODE_SET,
942 			.lif_index = cpu_to_le16(lif->index),
943 			.rx_mode = cpu_to_le16(rx_mode),
944 		},
945 	};
946 	char buf[128];
947 	int err;
948 	int i;
949 #define REMAIN(__x) (sizeof(buf) - (__x))
950 
951 	i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
952 		     lif->rx_mode, rx_mode);
953 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
954 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
955 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
956 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
957 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
958 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
959 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
960 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
961 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
962 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
963 	netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
964 
965 	err = ionic_adminq_post_wait(lif, &ctx);
966 	if (err)
967 		netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
968 			    rx_mode, err);
969 	else
970 		lif->rx_mode = rx_mode;
971 }
972 
973 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
974 {
975 	struct ionic_deferred_work *work;
976 
977 	if (in_interrupt()) {
978 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
979 		if (!work) {
980 			netdev_err(lif->netdev, "%s OOM\n", __func__);
981 			return;
982 		}
983 		work->type = IONIC_DW_TYPE_RX_MODE;
984 		work->rx_mode = rx_mode;
985 		netdev_dbg(lif->netdev, "deferred: rx_mode\n");
986 		ionic_lif_deferred_enqueue(&lif->deferred, work);
987 	} else {
988 		ionic_lif_rx_mode(lif, rx_mode);
989 	}
990 }
991 
992 static void ionic_set_rx_mode(struct net_device *netdev)
993 {
994 	struct ionic_lif *lif = netdev_priv(netdev);
995 	struct ionic_identity *ident;
996 	unsigned int nfilters;
997 	unsigned int rx_mode;
998 
999 	ident = &lif->ionic->ident;
1000 
1001 	rx_mode = IONIC_RX_MODE_F_UNICAST;
1002 	rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1003 	rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1004 	rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1005 	rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1006 
1007 	/* sync unicast addresses
1008 	 * next check to see if we're in an overflow state
1009 	 *    if so, we track that we overflowed and enable NIC PROMISC
1010 	 *    else if the overflow is set and not needed
1011 	 *       we remove our overflow flag and check the netdev flags
1012 	 *       to see if we can disable NIC PROMISC
1013 	 */
1014 	__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1015 	nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
1016 	if (netdev_uc_count(netdev) + 1 > nfilters) {
1017 		rx_mode |= IONIC_RX_MODE_F_PROMISC;
1018 		lif->uc_overflow = true;
1019 	} else if (lif->uc_overflow) {
1020 		lif->uc_overflow = false;
1021 		if (!(netdev->flags & IFF_PROMISC))
1022 			rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1023 	}
1024 
1025 	/* same for multicast */
1026 	__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1027 	nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1028 	if (netdev_mc_count(netdev) > nfilters) {
1029 		rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1030 		lif->mc_overflow = true;
1031 	} else if (lif->mc_overflow) {
1032 		lif->mc_overflow = false;
1033 		if (!(netdev->flags & IFF_ALLMULTI))
1034 			rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1035 	}
1036 
1037 	if (lif->rx_mode != rx_mode)
1038 		_ionic_lif_rx_mode(lif, rx_mode);
1039 }
1040 
1041 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1042 {
1043 	u64 wanted = 0;
1044 
1045 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
1046 		wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1047 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1048 		wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1049 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1050 		wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1051 	if (features & NETIF_F_RXHASH)
1052 		wanted |= IONIC_ETH_HW_RX_HASH;
1053 	if (features & NETIF_F_RXCSUM)
1054 		wanted |= IONIC_ETH_HW_RX_CSUM;
1055 	if (features & NETIF_F_SG)
1056 		wanted |= IONIC_ETH_HW_TX_SG;
1057 	if (features & NETIF_F_HW_CSUM)
1058 		wanted |= IONIC_ETH_HW_TX_CSUM;
1059 	if (features & NETIF_F_TSO)
1060 		wanted |= IONIC_ETH_HW_TSO;
1061 	if (features & NETIF_F_TSO6)
1062 		wanted |= IONIC_ETH_HW_TSO_IPV6;
1063 	if (features & NETIF_F_TSO_ECN)
1064 		wanted |= IONIC_ETH_HW_TSO_ECN;
1065 	if (features & NETIF_F_GSO_GRE)
1066 		wanted |= IONIC_ETH_HW_TSO_GRE;
1067 	if (features & NETIF_F_GSO_GRE_CSUM)
1068 		wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1069 	if (features & NETIF_F_GSO_IPXIP4)
1070 		wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1071 	if (features & NETIF_F_GSO_IPXIP6)
1072 		wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1073 	if (features & NETIF_F_GSO_UDP_TUNNEL)
1074 		wanted |= IONIC_ETH_HW_TSO_UDP;
1075 	if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1076 		wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1077 
1078 	return cpu_to_le64(wanted);
1079 }
1080 
1081 static int ionic_set_nic_features(struct ionic_lif *lif,
1082 				  netdev_features_t features)
1083 {
1084 	struct device *dev = lif->ionic->dev;
1085 	struct ionic_admin_ctx ctx = {
1086 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1087 		.cmd.lif_setattr = {
1088 			.opcode = IONIC_CMD_LIF_SETATTR,
1089 			.index = cpu_to_le16(lif->index),
1090 			.attr = IONIC_LIF_ATTR_FEATURES,
1091 		},
1092 	};
1093 	u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1094 			 IONIC_ETH_HW_VLAN_RX_STRIP |
1095 			 IONIC_ETH_HW_VLAN_RX_FILTER;
1096 	int err;
1097 
1098 	ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1099 	err = ionic_adminq_post_wait(lif, &ctx);
1100 	if (err)
1101 		return err;
1102 
1103 	lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1104 				       ctx.comp.lif_setattr.features);
1105 
1106 	if ((vlan_flags & features) &&
1107 	    !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1108 		dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1109 
1110 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1111 		dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1112 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1113 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1114 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1115 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1116 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1117 		dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1118 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1119 		dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1120 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1121 		dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1122 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1123 		dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1124 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1125 		dev_dbg(dev, "feature ETH_HW_TSO\n");
1126 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1127 		dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1128 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1129 		dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1130 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1131 		dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1132 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1133 		dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1134 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1135 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1136 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1137 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1138 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1139 		dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1140 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1141 		dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1142 
1143 	return 0;
1144 }
1145 
1146 static int ionic_init_nic_features(struct ionic_lif *lif)
1147 {
1148 	struct net_device *netdev = lif->netdev;
1149 	netdev_features_t features;
1150 	int err;
1151 
1152 	/* set up what we expect to support by default */
1153 	features = NETIF_F_HW_VLAN_CTAG_TX |
1154 		   NETIF_F_HW_VLAN_CTAG_RX |
1155 		   NETIF_F_HW_VLAN_CTAG_FILTER |
1156 		   NETIF_F_RXHASH |
1157 		   NETIF_F_SG |
1158 		   NETIF_F_HW_CSUM |
1159 		   NETIF_F_RXCSUM |
1160 		   NETIF_F_TSO |
1161 		   NETIF_F_TSO6 |
1162 		   NETIF_F_TSO_ECN;
1163 
1164 	err = ionic_set_nic_features(lif, features);
1165 	if (err)
1166 		return err;
1167 
1168 	/* tell the netdev what we actually can support */
1169 	netdev->features |= NETIF_F_HIGHDMA;
1170 
1171 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1172 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1173 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1174 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1175 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1176 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1177 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1178 		netdev->hw_features |= NETIF_F_RXHASH;
1179 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1180 		netdev->hw_features |= NETIF_F_SG;
1181 
1182 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1183 		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1184 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1185 		netdev->hw_enc_features |= NETIF_F_RXCSUM;
1186 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1187 		netdev->hw_enc_features |= NETIF_F_TSO;
1188 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1189 		netdev->hw_enc_features |= NETIF_F_TSO6;
1190 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1191 		netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1192 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1193 		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1194 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1195 		netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1196 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1197 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1198 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1199 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1200 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1201 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1202 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1203 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1204 
1205 	netdev->hw_features |= netdev->hw_enc_features;
1206 	netdev->features |= netdev->hw_features;
1207 
1208 	netdev->priv_flags |= IFF_UNICAST_FLT;
1209 
1210 	return 0;
1211 }
1212 
1213 static int ionic_set_features(struct net_device *netdev,
1214 			      netdev_features_t features)
1215 {
1216 	struct ionic_lif *lif = netdev_priv(netdev);
1217 	int err;
1218 
1219 	netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1220 		   __func__, (u64)lif->netdev->features, (u64)features);
1221 
1222 	err = ionic_set_nic_features(lif, features);
1223 
1224 	return err;
1225 }
1226 
1227 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1228 {
1229 	struct sockaddr *addr = sa;
1230 	u8 *mac;
1231 	int err;
1232 
1233 	mac = (u8 *)addr->sa_data;
1234 	if (ether_addr_equal(netdev->dev_addr, mac))
1235 		return 0;
1236 
1237 	err = eth_prepare_mac_addr_change(netdev, addr);
1238 	if (err)
1239 		return err;
1240 
1241 	if (!is_zero_ether_addr(netdev->dev_addr)) {
1242 		netdev_info(netdev, "deleting mac addr %pM\n",
1243 			    netdev->dev_addr);
1244 		ionic_addr_del(netdev, netdev->dev_addr);
1245 	}
1246 
1247 	eth_commit_mac_addr_change(netdev, addr);
1248 	netdev_info(netdev, "updating mac addr %pM\n", mac);
1249 
1250 	return ionic_addr_add(netdev, mac);
1251 }
1252 
1253 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1254 {
1255 	struct ionic_lif *lif = netdev_priv(netdev);
1256 	struct ionic_admin_ctx ctx = {
1257 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1258 		.cmd.lif_setattr = {
1259 			.opcode = IONIC_CMD_LIF_SETATTR,
1260 			.index = cpu_to_le16(lif->index),
1261 			.attr = IONIC_LIF_ATTR_MTU,
1262 			.mtu = cpu_to_le32(new_mtu),
1263 		},
1264 	};
1265 	int err;
1266 
1267 	err = ionic_adminq_post_wait(lif, &ctx);
1268 	if (err)
1269 		return err;
1270 
1271 	netdev->mtu = new_mtu;
1272 	err = ionic_reset_queues(lif);
1273 
1274 	return err;
1275 }
1276 
1277 static void ionic_tx_timeout_work(struct work_struct *ws)
1278 {
1279 	struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1280 
1281 	netdev_info(lif->netdev, "Tx Timeout recovery\n");
1282 
1283 	rtnl_lock();
1284 	ionic_reset_queues(lif);
1285 	rtnl_unlock();
1286 }
1287 
1288 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1289 {
1290 	struct ionic_lif *lif = netdev_priv(netdev);
1291 
1292 	schedule_work(&lif->tx_timeout_work);
1293 }
1294 
1295 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1296 				 u16 vid)
1297 {
1298 	struct ionic_lif *lif = netdev_priv(netdev);
1299 	struct ionic_admin_ctx ctx = {
1300 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1301 		.cmd.rx_filter_add = {
1302 			.opcode = IONIC_CMD_RX_FILTER_ADD,
1303 			.lif_index = cpu_to_le16(lif->index),
1304 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1305 			.vlan.vlan = cpu_to_le16(vid),
1306 		},
1307 	};
1308 	int err;
1309 
1310 	err = ionic_adminq_post_wait(lif, &ctx);
1311 	if (err)
1312 		return err;
1313 
1314 	netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
1315 		   ctx.comp.rx_filter_add.filter_id);
1316 
1317 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1318 }
1319 
1320 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1321 				  u16 vid)
1322 {
1323 	struct ionic_lif *lif = netdev_priv(netdev);
1324 	struct ionic_admin_ctx ctx = {
1325 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1326 		.cmd.rx_filter_del = {
1327 			.opcode = IONIC_CMD_RX_FILTER_DEL,
1328 			.lif_index = cpu_to_le16(lif->index),
1329 		},
1330 	};
1331 	struct ionic_rx_filter *f;
1332 
1333 	spin_lock_bh(&lif->rx_filters.lock);
1334 
1335 	f = ionic_rx_filter_by_vlan(lif, vid);
1336 	if (!f) {
1337 		spin_unlock_bh(&lif->rx_filters.lock);
1338 		return -ENOENT;
1339 	}
1340 
1341 	netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
1342 		   le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
1343 
1344 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1345 	ionic_rx_filter_free(lif, f);
1346 	spin_unlock_bh(&lif->rx_filters.lock);
1347 
1348 	return ionic_adminq_post_wait(lif, &ctx);
1349 }
1350 
1351 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1352 			 const u8 *key, const u32 *indir)
1353 {
1354 	struct ionic_admin_ctx ctx = {
1355 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1356 		.cmd.lif_setattr = {
1357 			.opcode = IONIC_CMD_LIF_SETATTR,
1358 			.attr = IONIC_LIF_ATTR_RSS,
1359 			.rss.types = cpu_to_le16(types),
1360 			.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1361 		},
1362 	};
1363 	unsigned int i, tbl_sz;
1364 
1365 	lif->rss_types = types;
1366 
1367 	if (key)
1368 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1369 
1370 	if (indir) {
1371 		tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1372 		for (i = 0; i < tbl_sz; i++)
1373 			lif->rss_ind_tbl[i] = indir[i];
1374 	}
1375 
1376 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1377 	       IONIC_RSS_HASH_KEY_SIZE);
1378 
1379 	return ionic_adminq_post_wait(lif, &ctx);
1380 }
1381 
1382 static int ionic_lif_rss_init(struct ionic_lif *lif)
1383 {
1384 	unsigned int tbl_sz;
1385 	unsigned int i;
1386 
1387 	lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1388 			 IONIC_RSS_TYPE_IPV4_TCP |
1389 			 IONIC_RSS_TYPE_IPV4_UDP |
1390 			 IONIC_RSS_TYPE_IPV6     |
1391 			 IONIC_RSS_TYPE_IPV6_TCP |
1392 			 IONIC_RSS_TYPE_IPV6_UDP;
1393 
1394 	/* Fill indirection table with 'default' values */
1395 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1396 	for (i = 0; i < tbl_sz; i++)
1397 		lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1398 
1399 	return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1400 }
1401 
1402 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1403 {
1404 	int tbl_sz;
1405 
1406 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1407 	memset(lif->rss_ind_tbl, 0, tbl_sz);
1408 	memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1409 
1410 	ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1411 }
1412 
1413 static void ionic_txrx_disable(struct ionic_lif *lif)
1414 {
1415 	unsigned int i;
1416 
1417 	for (i = 0; i < lif->nxqs; i++) {
1418 		ionic_qcq_disable(lif->txqcqs[i].qcq);
1419 		ionic_qcq_disable(lif->rxqcqs[i].qcq);
1420 	}
1421 }
1422 
1423 static void ionic_txrx_deinit(struct ionic_lif *lif)
1424 {
1425 	unsigned int i;
1426 
1427 	for (i = 0; i < lif->nxqs; i++) {
1428 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1429 		ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
1430 
1431 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1432 		ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
1433 		ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
1434 	}
1435 }
1436 
1437 static void ionic_txrx_free(struct ionic_lif *lif)
1438 {
1439 	unsigned int i;
1440 
1441 	for (i = 0; i < lif->nxqs; i++) {
1442 		ionic_qcq_free(lif, lif->txqcqs[i].qcq);
1443 		lif->txqcqs[i].qcq = NULL;
1444 
1445 		ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
1446 		lif->rxqcqs[i].qcq = NULL;
1447 	}
1448 }
1449 
1450 static int ionic_txrx_alloc(struct ionic_lif *lif)
1451 {
1452 	unsigned int flags;
1453 	unsigned int i;
1454 	int err = 0;
1455 
1456 	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1457 	for (i = 0; i < lif->nxqs; i++) {
1458 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1459 				      lif->ntxq_descs,
1460 				      sizeof(struct ionic_txq_desc),
1461 				      sizeof(struct ionic_txq_comp),
1462 				      sizeof(struct ionic_txq_sg_desc),
1463 				      lif->kern_pid, &lif->txqcqs[i].qcq);
1464 		if (err)
1465 			goto err_out;
1466 
1467 		lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
1468 	}
1469 
1470 	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1471 	for (i = 0; i < lif->nxqs; i++) {
1472 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1473 				      lif->nrxq_descs,
1474 				      sizeof(struct ionic_rxq_desc),
1475 				      sizeof(struct ionic_rxq_comp),
1476 				      sizeof(struct ionic_rxq_sg_desc),
1477 				      lif->kern_pid, &lif->rxqcqs[i].qcq);
1478 		if (err)
1479 			goto err_out;
1480 
1481 		lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
1482 
1483 		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1484 				     lif->rxqcqs[i].qcq->intr.index,
1485 				     lif->rx_coalesce_hw);
1486 		ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
1487 					  lif->txqcqs[i].qcq);
1488 	}
1489 
1490 	return 0;
1491 
1492 err_out:
1493 	ionic_txrx_free(lif);
1494 
1495 	return err;
1496 }
1497 
1498 static int ionic_txrx_init(struct ionic_lif *lif)
1499 {
1500 	unsigned int i;
1501 	int err;
1502 
1503 	for (i = 0; i < lif->nxqs; i++) {
1504 		err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
1505 		if (err)
1506 			goto err_out;
1507 
1508 		err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
1509 		if (err) {
1510 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1511 			goto err_out;
1512 		}
1513 	}
1514 
1515 	if (lif->netdev->features & NETIF_F_RXHASH)
1516 		ionic_lif_rss_init(lif);
1517 
1518 	ionic_set_rx_mode(lif->netdev);
1519 
1520 	return 0;
1521 
1522 err_out:
1523 	while (i--) {
1524 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1525 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1526 	}
1527 
1528 	return err;
1529 }
1530 
1531 static int ionic_txrx_enable(struct ionic_lif *lif)
1532 {
1533 	int i, err;
1534 
1535 	for (i = 0; i < lif->nxqs; i++) {
1536 		err = ionic_qcq_enable(lif->txqcqs[i].qcq);
1537 		if (err)
1538 			goto err_out;
1539 
1540 		ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
1541 		err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
1542 		if (err) {
1543 			ionic_qcq_disable(lif->txqcqs[i].qcq);
1544 			goto err_out;
1545 		}
1546 	}
1547 
1548 	return 0;
1549 
1550 err_out:
1551 	while (i--) {
1552 		ionic_qcq_disable(lif->rxqcqs[i].qcq);
1553 		ionic_qcq_disable(lif->txqcqs[i].qcq);
1554 	}
1555 
1556 	return err;
1557 }
1558 
1559 int ionic_open(struct net_device *netdev)
1560 {
1561 	struct ionic_lif *lif = netdev_priv(netdev);
1562 	int err;
1563 
1564 	netif_carrier_off(netdev);
1565 
1566 	err = ionic_txrx_alloc(lif);
1567 	if (err)
1568 		return err;
1569 
1570 	err = ionic_txrx_init(lif);
1571 	if (err)
1572 		goto err_txrx_free;
1573 
1574 	err = ionic_txrx_enable(lif);
1575 	if (err)
1576 		goto err_txrx_deinit;
1577 
1578 	netif_set_real_num_tx_queues(netdev, lif->nxqs);
1579 	netif_set_real_num_rx_queues(netdev, lif->nxqs);
1580 
1581 	set_bit(IONIC_LIF_UP, lif->state);
1582 
1583 	ionic_link_status_check_request(lif);
1584 	if (netif_carrier_ok(netdev))
1585 		netif_tx_wake_all_queues(netdev);
1586 
1587 	return 0;
1588 
1589 err_txrx_deinit:
1590 	ionic_txrx_deinit(lif);
1591 err_txrx_free:
1592 	ionic_txrx_free(lif);
1593 	return err;
1594 }
1595 
1596 int ionic_stop(struct net_device *netdev)
1597 {
1598 	struct ionic_lif *lif = netdev_priv(netdev);
1599 	int err = 0;
1600 
1601 	if (!test_bit(IONIC_LIF_UP, lif->state)) {
1602 		dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n",
1603 			__func__, lif->name);
1604 		return 0;
1605 	}
1606 	dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name);
1607 	clear_bit(IONIC_LIF_UP, lif->state);
1608 
1609 	/* carrier off before disabling queues to avoid watchdog timeout */
1610 	netif_carrier_off(netdev);
1611 	netif_tx_stop_all_queues(netdev);
1612 	netif_tx_disable(netdev);
1613 
1614 	ionic_txrx_disable(lif);
1615 	ionic_lif_quiesce(lif);
1616 	ionic_txrx_deinit(lif);
1617 	ionic_txrx_free(lif);
1618 
1619 	return err;
1620 }
1621 
1622 static int ionic_get_vf_config(struct net_device *netdev,
1623 			       int vf, struct ifla_vf_info *ivf)
1624 {
1625 	struct ionic_lif *lif = netdev_priv(netdev);
1626 	struct ionic *ionic = lif->ionic;
1627 	int ret = 0;
1628 
1629 	down_read(&ionic->vf_op_lock);
1630 
1631 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1632 		ret = -EINVAL;
1633 	} else {
1634 		ivf->vf           = vf;
1635 		ivf->vlan         = ionic->vfs[vf].vlanid;
1636 		ivf->qos	  = 0;
1637 		ivf->spoofchk     = ionic->vfs[vf].spoofchk;
1638 		ivf->linkstate    = ionic->vfs[vf].linkstate;
1639 		ivf->max_tx_rate  = ionic->vfs[vf].maxrate;
1640 		ivf->trusted      = ionic->vfs[vf].trusted;
1641 		ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1642 	}
1643 
1644 	up_read(&ionic->vf_op_lock);
1645 	return ret;
1646 }
1647 
1648 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1649 			      struct ifla_vf_stats *vf_stats)
1650 {
1651 	struct ionic_lif *lif = netdev_priv(netdev);
1652 	struct ionic *ionic = lif->ionic;
1653 	struct ionic_lif_stats *vs;
1654 	int ret = 0;
1655 
1656 	down_read(&ionic->vf_op_lock);
1657 
1658 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1659 		ret = -EINVAL;
1660 	} else {
1661 		memset(vf_stats, 0, sizeof(*vf_stats));
1662 		vs = &ionic->vfs[vf].stats;
1663 
1664 		vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1665 		vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1666 		vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
1667 		vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
1668 		vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
1669 		vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
1670 		vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1671 				       le64_to_cpu(vs->rx_mcast_drop_packets) +
1672 				       le64_to_cpu(vs->rx_bcast_drop_packets);
1673 		vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1674 				       le64_to_cpu(vs->tx_mcast_drop_packets) +
1675 				       le64_to_cpu(vs->tx_bcast_drop_packets);
1676 	}
1677 
1678 	up_read(&ionic->vf_op_lock);
1679 	return ret;
1680 }
1681 
1682 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1683 {
1684 	struct ionic_lif *lif = netdev_priv(netdev);
1685 	struct ionic *ionic = lif->ionic;
1686 	int ret;
1687 
1688 	if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1689 		return -EINVAL;
1690 
1691 	down_read(&ionic->vf_op_lock);
1692 
1693 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1694 		ret = -EINVAL;
1695 	} else {
1696 		ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1697 		if (!ret)
1698 			ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1699 	}
1700 
1701 	up_read(&ionic->vf_op_lock);
1702 	return ret;
1703 }
1704 
1705 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1706 			     u8 qos, __be16 proto)
1707 {
1708 	struct ionic_lif *lif = netdev_priv(netdev);
1709 	struct ionic *ionic = lif->ionic;
1710 	int ret;
1711 
1712 	/* until someday when we support qos */
1713 	if (qos)
1714 		return -EINVAL;
1715 
1716 	if (vlan > 4095)
1717 		return -EINVAL;
1718 
1719 	if (proto != htons(ETH_P_8021Q))
1720 		return -EPROTONOSUPPORT;
1721 
1722 	down_read(&ionic->vf_op_lock);
1723 
1724 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1725 		ret = -EINVAL;
1726 	} else {
1727 		ret = ionic_set_vf_config(ionic, vf,
1728 					  IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
1729 		if (!ret)
1730 			ionic->vfs[vf].vlanid = vlan;
1731 	}
1732 
1733 	up_read(&ionic->vf_op_lock);
1734 	return ret;
1735 }
1736 
1737 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
1738 			     int tx_min, int tx_max)
1739 {
1740 	struct ionic_lif *lif = netdev_priv(netdev);
1741 	struct ionic *ionic = lif->ionic;
1742 	int ret;
1743 
1744 	/* setting the min just seems silly */
1745 	if (tx_min)
1746 		return -EINVAL;
1747 
1748 	down_write(&ionic->vf_op_lock);
1749 
1750 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1751 		ret = -EINVAL;
1752 	} else {
1753 		ret = ionic_set_vf_config(ionic, vf,
1754 					  IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
1755 		if (!ret)
1756 			lif->ionic->vfs[vf].maxrate = tx_max;
1757 	}
1758 
1759 	up_write(&ionic->vf_op_lock);
1760 	return ret;
1761 }
1762 
1763 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
1764 {
1765 	struct ionic_lif *lif = netdev_priv(netdev);
1766 	struct ionic *ionic = lif->ionic;
1767 	u8 data = set;  /* convert to u8 for config */
1768 	int ret;
1769 
1770 	down_write(&ionic->vf_op_lock);
1771 
1772 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1773 		ret = -EINVAL;
1774 	} else {
1775 		ret = ionic_set_vf_config(ionic, vf,
1776 					  IONIC_VF_ATTR_SPOOFCHK, &data);
1777 		if (!ret)
1778 			ionic->vfs[vf].spoofchk = data;
1779 	}
1780 
1781 	up_write(&ionic->vf_op_lock);
1782 	return ret;
1783 }
1784 
1785 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
1786 {
1787 	struct ionic_lif *lif = netdev_priv(netdev);
1788 	struct ionic *ionic = lif->ionic;
1789 	u8 data = set;  /* convert to u8 for config */
1790 	int ret;
1791 
1792 	down_write(&ionic->vf_op_lock);
1793 
1794 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1795 		ret = -EINVAL;
1796 	} else {
1797 		ret = ionic_set_vf_config(ionic, vf,
1798 					  IONIC_VF_ATTR_TRUST, &data);
1799 		if (!ret)
1800 			ionic->vfs[vf].trusted = data;
1801 	}
1802 
1803 	up_write(&ionic->vf_op_lock);
1804 	return ret;
1805 }
1806 
1807 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
1808 {
1809 	struct ionic_lif *lif = netdev_priv(netdev);
1810 	struct ionic *ionic = lif->ionic;
1811 	u8 data;
1812 	int ret;
1813 
1814 	switch (set) {
1815 	case IFLA_VF_LINK_STATE_ENABLE:
1816 		data = IONIC_VF_LINK_STATUS_UP;
1817 		break;
1818 	case IFLA_VF_LINK_STATE_DISABLE:
1819 		data = IONIC_VF_LINK_STATUS_DOWN;
1820 		break;
1821 	case IFLA_VF_LINK_STATE_AUTO:
1822 		data = IONIC_VF_LINK_STATUS_AUTO;
1823 		break;
1824 	default:
1825 		return -EINVAL;
1826 	}
1827 
1828 	down_write(&ionic->vf_op_lock);
1829 
1830 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1831 		ret = -EINVAL;
1832 	} else {
1833 		ret = ionic_set_vf_config(ionic, vf,
1834 					  IONIC_VF_ATTR_LINKSTATE, &data);
1835 		if (!ret)
1836 			ionic->vfs[vf].linkstate = set;
1837 	}
1838 
1839 	up_write(&ionic->vf_op_lock);
1840 	return ret;
1841 }
1842 
1843 static const struct net_device_ops ionic_netdev_ops = {
1844 	.ndo_open               = ionic_open,
1845 	.ndo_stop               = ionic_stop,
1846 	.ndo_start_xmit		= ionic_start_xmit,
1847 	.ndo_get_stats64	= ionic_get_stats64,
1848 	.ndo_set_rx_mode	= ionic_set_rx_mode,
1849 	.ndo_set_features	= ionic_set_features,
1850 	.ndo_set_mac_address	= ionic_set_mac_address,
1851 	.ndo_validate_addr	= eth_validate_addr,
1852 	.ndo_tx_timeout         = ionic_tx_timeout,
1853 	.ndo_change_mtu         = ionic_change_mtu,
1854 	.ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
1855 	.ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
1856 	.ndo_set_vf_vlan	= ionic_set_vf_vlan,
1857 	.ndo_set_vf_trust	= ionic_set_vf_trust,
1858 	.ndo_set_vf_mac		= ionic_set_vf_mac,
1859 	.ndo_set_vf_rate	= ionic_set_vf_rate,
1860 	.ndo_set_vf_spoofchk	= ionic_set_vf_spoofchk,
1861 	.ndo_get_vf_config	= ionic_get_vf_config,
1862 	.ndo_set_vf_link_state	= ionic_set_vf_link_state,
1863 	.ndo_get_vf_stats       = ionic_get_vf_stats,
1864 };
1865 
1866 int ionic_reset_queues(struct ionic_lif *lif)
1867 {
1868 	bool running;
1869 	int err = 0;
1870 
1871 	/* Put off the next watchdog timeout */
1872 	netif_trans_update(lif->netdev);
1873 
1874 	err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
1875 	if (err)
1876 		return err;
1877 
1878 	running = netif_running(lif->netdev);
1879 	if (running)
1880 		err = ionic_stop(lif->netdev);
1881 	if (!err && running)
1882 		ionic_open(lif->netdev);
1883 
1884 	clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
1885 
1886 	return err;
1887 }
1888 
1889 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
1890 {
1891 	struct device *dev = ionic->dev;
1892 	struct net_device *netdev;
1893 	struct ionic_lif *lif;
1894 	int tbl_sz;
1895 	int err;
1896 
1897 	netdev = alloc_etherdev_mqs(sizeof(*lif),
1898 				    ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
1899 	if (!netdev) {
1900 		dev_err(dev, "Cannot allocate netdev, aborting\n");
1901 		return ERR_PTR(-ENOMEM);
1902 	}
1903 
1904 	SET_NETDEV_DEV(netdev, dev);
1905 
1906 	lif = netdev_priv(netdev);
1907 	lif->netdev = netdev;
1908 	ionic->master_lif = lif;
1909 	netdev->netdev_ops = &ionic_netdev_ops;
1910 	ionic_ethtool_set_ops(netdev);
1911 
1912 	netdev->watchdog_timeo = 2 * HZ;
1913 	netdev->min_mtu = IONIC_MIN_MTU;
1914 	netdev->max_mtu = IONIC_MAX_MTU;
1915 
1916 	lif->neqs = ionic->neqs_per_lif;
1917 	lif->nxqs = ionic->ntxqs_per_lif;
1918 
1919 	lif->ionic = ionic;
1920 	lif->index = index;
1921 	lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
1922 	lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
1923 
1924 	/* Convert the default coalesce value to actual hw resolution */
1925 	lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
1926 	lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
1927 						    lif->rx_coalesce_usecs);
1928 
1929 	snprintf(lif->name, sizeof(lif->name), "lif%u", index);
1930 
1931 	spin_lock_init(&lif->adminq_lock);
1932 
1933 	spin_lock_init(&lif->deferred.lock);
1934 	INIT_LIST_HEAD(&lif->deferred.list);
1935 	INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
1936 
1937 	/* allocate lif info */
1938 	lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
1939 	lif->info = dma_alloc_coherent(dev, lif->info_sz,
1940 				       &lif->info_pa, GFP_KERNEL);
1941 	if (!lif->info) {
1942 		dev_err(dev, "Failed to allocate lif info, aborting\n");
1943 		err = -ENOMEM;
1944 		goto err_out_free_netdev;
1945 	}
1946 
1947 	/* allocate queues */
1948 	err = ionic_qcqs_alloc(lif);
1949 	if (err)
1950 		goto err_out_free_lif_info;
1951 
1952 	/* allocate rss indirection table */
1953 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1954 	lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
1955 	lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
1956 					      &lif->rss_ind_tbl_pa,
1957 					      GFP_KERNEL);
1958 
1959 	if (!lif->rss_ind_tbl) {
1960 		err = -ENOMEM;
1961 		dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
1962 		goto err_out_free_qcqs;
1963 	}
1964 	netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
1965 
1966 	list_add_tail(&lif->list, &ionic->lifs);
1967 
1968 	return lif;
1969 
1970 err_out_free_qcqs:
1971 	ionic_qcqs_free(lif);
1972 err_out_free_lif_info:
1973 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
1974 	lif->info = NULL;
1975 	lif->info_pa = 0;
1976 err_out_free_netdev:
1977 	free_netdev(lif->netdev);
1978 	lif = NULL;
1979 
1980 	return ERR_PTR(err);
1981 }
1982 
1983 int ionic_lifs_alloc(struct ionic *ionic)
1984 {
1985 	struct ionic_lif *lif;
1986 
1987 	INIT_LIST_HEAD(&ionic->lifs);
1988 
1989 	/* only build the first lif, others are for later features */
1990 	set_bit(0, ionic->lifbits);
1991 	lif = ionic_lif_alloc(ionic, 0);
1992 
1993 	return PTR_ERR_OR_ZERO(lif);
1994 }
1995 
1996 static void ionic_lif_reset(struct ionic_lif *lif)
1997 {
1998 	struct ionic_dev *idev = &lif->ionic->idev;
1999 
2000 	mutex_lock(&lif->ionic->dev_cmd_lock);
2001 	ionic_dev_cmd_lif_reset(idev, lif->index);
2002 	ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2003 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2004 }
2005 
2006 static void ionic_lif_free(struct ionic_lif *lif)
2007 {
2008 	struct device *dev = lif->ionic->dev;
2009 
2010 	/* free rss indirection table */
2011 	dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2012 			  lif->rss_ind_tbl_pa);
2013 	lif->rss_ind_tbl = NULL;
2014 	lif->rss_ind_tbl_pa = 0;
2015 
2016 	/* free queues */
2017 	ionic_qcqs_free(lif);
2018 	ionic_lif_reset(lif);
2019 
2020 	/* free lif info */
2021 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2022 	lif->info = NULL;
2023 	lif->info_pa = 0;
2024 
2025 	/* unmap doorbell page */
2026 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2027 	lif->kern_dbpage = NULL;
2028 	kfree(lif->dbid_inuse);
2029 	lif->dbid_inuse = NULL;
2030 
2031 	/* free netdev & lif */
2032 	ionic_debugfs_del_lif(lif);
2033 	list_del(&lif->list);
2034 	free_netdev(lif->netdev);
2035 }
2036 
2037 void ionic_lifs_free(struct ionic *ionic)
2038 {
2039 	struct list_head *cur, *tmp;
2040 	struct ionic_lif *lif;
2041 
2042 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2043 		lif = list_entry(cur, struct ionic_lif, list);
2044 
2045 		ionic_lif_free(lif);
2046 	}
2047 }
2048 
2049 static void ionic_lif_deinit(struct ionic_lif *lif)
2050 {
2051 	if (!test_bit(IONIC_LIF_INITED, lif->state))
2052 		return;
2053 
2054 	clear_bit(IONIC_LIF_INITED, lif->state);
2055 
2056 	ionic_rx_filters_deinit(lif);
2057 	ionic_lif_rss_deinit(lif);
2058 
2059 	napi_disable(&lif->adminqcq->napi);
2060 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2061 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2062 
2063 	ionic_lif_reset(lif);
2064 }
2065 
2066 void ionic_lifs_deinit(struct ionic *ionic)
2067 {
2068 	struct list_head *cur, *tmp;
2069 	struct ionic_lif *lif;
2070 
2071 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2072 		lif = list_entry(cur, struct ionic_lif, list);
2073 		ionic_lif_deinit(lif);
2074 	}
2075 }
2076 
2077 static int ionic_lif_adminq_init(struct ionic_lif *lif)
2078 {
2079 	struct device *dev = lif->ionic->dev;
2080 	struct ionic_q_init_comp comp;
2081 	struct ionic_dev *idev;
2082 	struct ionic_qcq *qcq;
2083 	struct ionic_queue *q;
2084 	int err;
2085 
2086 	idev = &lif->ionic->idev;
2087 	qcq = lif->adminqcq;
2088 	q = &qcq->q;
2089 
2090 	mutex_lock(&lif->ionic->dev_cmd_lock);
2091 	ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2092 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2093 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2094 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2095 	if (err) {
2096 		netdev_err(lif->netdev, "adminq init failed %d\n", err);
2097 		return err;
2098 	}
2099 
2100 	q->hw_type = comp.hw_type;
2101 	q->hw_index = le32_to_cpu(comp.hw_index);
2102 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2103 
2104 	dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2105 	dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2106 
2107 	netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2108 		       NAPI_POLL_WEIGHT);
2109 
2110 	err = ionic_request_irq(lif, qcq);
2111 	if (err) {
2112 		netdev_warn(lif->netdev, "adminq irq request failed %d\n", err);
2113 		netif_napi_del(&qcq->napi);
2114 		return err;
2115 	}
2116 
2117 	napi_enable(&qcq->napi);
2118 
2119 	if (qcq->flags & IONIC_QCQ_F_INTR)
2120 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2121 				IONIC_INTR_MASK_CLEAR);
2122 
2123 	qcq->flags |= IONIC_QCQ_F_INITED;
2124 
2125 	ionic_debugfs_add_qcq(lif, qcq);
2126 
2127 	return 0;
2128 }
2129 
2130 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2131 {
2132 	struct ionic_qcq *qcq = lif->notifyqcq;
2133 	struct device *dev = lif->ionic->dev;
2134 	struct ionic_queue *q = &qcq->q;
2135 	int err;
2136 
2137 	struct ionic_admin_ctx ctx = {
2138 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2139 		.cmd.q_init = {
2140 			.opcode = IONIC_CMD_Q_INIT,
2141 			.lif_index = cpu_to_le16(lif->index),
2142 			.type = q->type,
2143 			.index = cpu_to_le32(q->index),
2144 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2145 					     IONIC_QINIT_F_ENA),
2146 			.intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2147 			.pid = cpu_to_le16(q->pid),
2148 			.ring_size = ilog2(q->num_descs),
2149 			.ring_base = cpu_to_le64(q->base_pa),
2150 		}
2151 	};
2152 
2153 	dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2154 	dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2155 	dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2156 	dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2157 
2158 	err = ionic_adminq_post_wait(lif, &ctx);
2159 	if (err)
2160 		return err;
2161 
2162 	q->hw_type = ctx.comp.q_init.hw_type;
2163 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2164 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2165 
2166 	dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2167 	dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2168 
2169 	/* preset the callback info */
2170 	q->info[0].cb_arg = lif;
2171 
2172 	qcq->flags |= IONIC_QCQ_F_INITED;
2173 
2174 	ionic_debugfs_add_qcq(lif, qcq);
2175 
2176 	return 0;
2177 }
2178 
2179 static int ionic_station_set(struct ionic_lif *lif)
2180 {
2181 	struct net_device *netdev = lif->netdev;
2182 	struct ionic_admin_ctx ctx = {
2183 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2184 		.cmd.lif_getattr = {
2185 			.opcode = IONIC_CMD_LIF_GETATTR,
2186 			.index = cpu_to_le16(lif->index),
2187 			.attr = IONIC_LIF_ATTR_MAC,
2188 		},
2189 	};
2190 	struct sockaddr addr;
2191 	int err;
2192 
2193 	err = ionic_adminq_post_wait(lif, &ctx);
2194 	if (err)
2195 		return err;
2196 
2197 	if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2198 		return 0;
2199 
2200 	memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2201 	addr.sa_family = AF_INET;
2202 	err = eth_prepare_mac_addr_change(netdev, &addr);
2203 	if (err) {
2204 		netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM\n",
2205 			    addr.sa_data);
2206 		return 0;
2207 	}
2208 
2209 	netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
2210 		   netdev->dev_addr);
2211 	ionic_lif_addr(lif, netdev->dev_addr, false);
2212 
2213 	eth_commit_mac_addr_change(netdev, &addr);
2214 	netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2215 		   netdev->dev_addr);
2216 	ionic_lif_addr(lif, netdev->dev_addr, true);
2217 
2218 	return 0;
2219 }
2220 
2221 static int ionic_lif_init(struct ionic_lif *lif)
2222 {
2223 	struct ionic_dev *idev = &lif->ionic->idev;
2224 	struct device *dev = lif->ionic->dev;
2225 	struct ionic_lif_init_comp comp;
2226 	int dbpage_num;
2227 	int err;
2228 
2229 	ionic_debugfs_add_lif(lif);
2230 
2231 	mutex_lock(&lif->ionic->dev_cmd_lock);
2232 	ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2233 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2234 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2235 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2236 	if (err)
2237 		return err;
2238 
2239 	lif->hw_index = le16_to_cpu(comp.hw_index);
2240 
2241 	/* now that we have the hw_index we can figure out our doorbell page */
2242 	lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2243 	if (!lif->dbid_count) {
2244 		dev_err(dev, "No doorbell pages, aborting\n");
2245 		return -EINVAL;
2246 	}
2247 
2248 	lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2249 	if (!lif->dbid_inuse) {
2250 		dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2251 		return -ENOMEM;
2252 	}
2253 
2254 	/* first doorbell id reserved for kernel (dbid aka pid == zero) */
2255 	set_bit(0, lif->dbid_inuse);
2256 	lif->kern_pid = 0;
2257 
2258 	dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2259 	lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2260 	if (!lif->kern_dbpage) {
2261 		dev_err(dev, "Cannot map dbpage, aborting\n");
2262 		err = -ENOMEM;
2263 		goto err_out_free_dbid;
2264 	}
2265 
2266 	err = ionic_lif_adminq_init(lif);
2267 	if (err)
2268 		goto err_out_adminq_deinit;
2269 
2270 	if (lif->ionic->nnqs_per_lif) {
2271 		err = ionic_lif_notifyq_init(lif);
2272 		if (err)
2273 			goto err_out_notifyq_deinit;
2274 	}
2275 
2276 	err = ionic_init_nic_features(lif);
2277 	if (err)
2278 		goto err_out_notifyq_deinit;
2279 
2280 	err = ionic_rx_filters_init(lif);
2281 	if (err)
2282 		goto err_out_notifyq_deinit;
2283 
2284 	err = ionic_station_set(lif);
2285 	if (err)
2286 		goto err_out_notifyq_deinit;
2287 
2288 	lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2289 
2290 	set_bit(IONIC_LIF_INITED, lif->state);
2291 
2292 	INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2293 
2294 	return 0;
2295 
2296 err_out_notifyq_deinit:
2297 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2298 err_out_adminq_deinit:
2299 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2300 	ionic_lif_reset(lif);
2301 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2302 	lif->kern_dbpage = NULL;
2303 err_out_free_dbid:
2304 	kfree(lif->dbid_inuse);
2305 	lif->dbid_inuse = NULL;
2306 
2307 	return err;
2308 }
2309 
2310 int ionic_lifs_init(struct ionic *ionic)
2311 {
2312 	struct list_head *cur, *tmp;
2313 	struct ionic_lif *lif;
2314 	int err;
2315 
2316 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2317 		lif = list_entry(cur, struct ionic_lif, list);
2318 		err = ionic_lif_init(lif);
2319 		if (err)
2320 			return err;
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 static void ionic_lif_notify_work(struct work_struct *ws)
2327 {
2328 }
2329 
2330 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2331 {
2332 	struct ionic_admin_ctx ctx = {
2333 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2334 		.cmd.lif_setattr = {
2335 			.opcode = IONIC_CMD_LIF_SETATTR,
2336 			.index = cpu_to_le16(lif->index),
2337 			.attr = IONIC_LIF_ATTR_NAME,
2338 		},
2339 	};
2340 
2341 	strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2342 		sizeof(ctx.cmd.lif_setattr.name));
2343 
2344 	ionic_adminq_post_wait(lif, &ctx);
2345 }
2346 
2347 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2348 {
2349 	if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2350 		return NULL;
2351 
2352 	return netdev_priv(netdev);
2353 }
2354 
2355 static int ionic_lif_notify(struct notifier_block *nb,
2356 			    unsigned long event, void *info)
2357 {
2358 	struct net_device *ndev = netdev_notifier_info_to_dev(info);
2359 	struct ionic *ionic = container_of(nb, struct ionic, nb);
2360 	struct ionic_lif *lif = ionic_netdev_lif(ndev);
2361 
2362 	if (!lif || lif->ionic != ionic)
2363 		return NOTIFY_DONE;
2364 
2365 	switch (event) {
2366 	case NETDEV_CHANGENAME:
2367 		ionic_lif_set_netdev_info(lif);
2368 		break;
2369 	}
2370 
2371 	return NOTIFY_DONE;
2372 }
2373 
2374 int ionic_lifs_register(struct ionic *ionic)
2375 {
2376 	int err;
2377 
2378 	INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
2379 
2380 	ionic->nb.notifier_call = ionic_lif_notify;
2381 
2382 	err = register_netdevice_notifier(&ionic->nb);
2383 	if (err)
2384 		ionic->nb.notifier_call = NULL;
2385 
2386 	/* only register LIF0 for now */
2387 	err = register_netdev(ionic->master_lif->netdev);
2388 	if (err) {
2389 		dev_err(ionic->dev, "Cannot register net device, aborting\n");
2390 		return err;
2391 	}
2392 
2393 	ionic_link_status_check_request(ionic->master_lif);
2394 	ionic->master_lif->registered = true;
2395 
2396 	return 0;
2397 }
2398 
2399 void ionic_lifs_unregister(struct ionic *ionic)
2400 {
2401 	if (ionic->nb.notifier_call) {
2402 		unregister_netdevice_notifier(&ionic->nb);
2403 		cancel_work_sync(&ionic->nb_work);
2404 		ionic->nb.notifier_call = NULL;
2405 	}
2406 
2407 	/* There is only one lif ever registered in the
2408 	 * current model, so don't bother searching the
2409 	 * ionic->lif for candidates to unregister
2410 	 */
2411 	cancel_work_sync(&ionic->master_lif->deferred.work);
2412 	cancel_work_sync(&ionic->master_lif->tx_timeout_work);
2413 	if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
2414 		unregister_netdev(ionic->master_lif->netdev);
2415 }
2416 
2417 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2418 		       union ionic_lif_identity *lid)
2419 {
2420 	struct ionic_dev *idev = &ionic->idev;
2421 	size_t sz;
2422 	int err;
2423 
2424 	sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2425 
2426 	mutex_lock(&ionic->dev_cmd_lock);
2427 	ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2428 	err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2429 	memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2430 	mutex_unlock(&ionic->dev_cmd_lock);
2431 	if (err)
2432 		return (err);
2433 
2434 	dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2435 		le64_to_cpu(lid->capabilities));
2436 
2437 	dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2438 		le32_to_cpu(lid->eth.max_ucast_filters));
2439 	dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2440 		le32_to_cpu(lid->eth.max_mcast_filters));
2441 	dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2442 		le64_to_cpu(lid->eth.config.features));
2443 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2444 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2445 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2446 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2447 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2448 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2449 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2450 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2451 	dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2452 	dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2453 	dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2454 		le32_to_cpu(lid->eth.config.mtu));
2455 
2456 	return 0;
2457 }
2458 
2459 int ionic_lifs_size(struct ionic *ionic)
2460 {
2461 	struct ionic_identity *ident = &ionic->ident;
2462 	unsigned int nintrs, dev_nintrs;
2463 	union ionic_lif_config *lc;
2464 	unsigned int ntxqs_per_lif;
2465 	unsigned int nrxqs_per_lif;
2466 	unsigned int neqs_per_lif;
2467 	unsigned int nnqs_per_lif;
2468 	unsigned int nxqs, neqs;
2469 	unsigned int min_intrs;
2470 	int err;
2471 
2472 	lc = &ident->lif.eth.config;
2473 	dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2474 	neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2475 	nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2476 	ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2477 	nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2478 
2479 	nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2480 	nxqs = min(nxqs, num_online_cpus());
2481 	neqs = min(neqs_per_lif, num_online_cpus());
2482 
2483 try_again:
2484 	/* interrupt usage:
2485 	 *    1 for master lif adminq/notifyq
2486 	 *    1 for each CPU for master lif TxRx queue pairs
2487 	 *    whatever's left is for RDMA queues
2488 	 */
2489 	nintrs = 1 + nxqs + neqs;
2490 	min_intrs = 2;  /* adminq + 1 TxRx queue pair */
2491 
2492 	if (nintrs > dev_nintrs)
2493 		goto try_fewer;
2494 
2495 	err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2496 	if (err < 0 && err != -ENOSPC) {
2497 		dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2498 		return err;
2499 	}
2500 	if (err == -ENOSPC)
2501 		goto try_fewer;
2502 
2503 	if (err != nintrs) {
2504 		ionic_bus_free_irq_vectors(ionic);
2505 		goto try_fewer;
2506 	}
2507 
2508 	ionic->nnqs_per_lif = nnqs_per_lif;
2509 	ionic->neqs_per_lif = neqs;
2510 	ionic->ntxqs_per_lif = nxqs;
2511 	ionic->nrxqs_per_lif = nxqs;
2512 	ionic->nintrs = nintrs;
2513 
2514 	ionic_debugfs_add_sizes(ionic);
2515 
2516 	return 0;
2517 
2518 try_fewer:
2519 	if (nnqs_per_lif > 1) {
2520 		nnqs_per_lif >>= 1;
2521 		goto try_again;
2522 	}
2523 	if (neqs > 1) {
2524 		neqs >>= 1;
2525 		goto try_again;
2526 	}
2527 	if (nxqs > 1) {
2528 		nxqs >>= 1;
2529 		goto try_again;
2530 	}
2531 	dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2532 	return -ENOSPC;
2533 }
2534