1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/cpumask.h>
12 
13 #include "ionic.h"
14 #include "ionic_bus.h"
15 #include "ionic_lif.h"
16 #include "ionic_txrx.h"
17 #include "ionic_ethtool.h"
18 #include "ionic_debugfs.h"
19 
20 /* queuetype support level */
21 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
22 	[IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
23 	[IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
24 	[IONIC_QTYPE_RXQ]     = 0,   /* 0 = Base version with CQ+SG support */
25 	[IONIC_QTYPE_TXQ]     = 1,   /* 0 = Base version with CQ+SG support
26 				      * 1 =   ... with Tx SG version 1
27 				      */
28 };
29 
30 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
31 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
32 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
33 static void ionic_link_status_check(struct ionic_lif *lif);
34 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
35 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
36 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
37 
38 static int ionic_start_queues(struct ionic_lif *lif);
39 static void ionic_stop_queues(struct ionic_lif *lif);
40 static void ionic_lif_queue_identify(struct ionic_lif *lif);
41 
42 static void ionic_lif_deferred_work(struct work_struct *work)
43 {
44 	struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
45 	struct ionic_deferred *def = &lif->deferred;
46 	struct ionic_deferred_work *w = NULL;
47 
48 	spin_lock_bh(&def->lock);
49 	if (!list_empty(&def->list)) {
50 		w = list_first_entry(&def->list,
51 				     struct ionic_deferred_work, list);
52 		list_del(&w->list);
53 	}
54 	spin_unlock_bh(&def->lock);
55 
56 	if (w) {
57 		switch (w->type) {
58 		case IONIC_DW_TYPE_RX_MODE:
59 			ionic_lif_rx_mode(lif, w->rx_mode);
60 			break;
61 		case IONIC_DW_TYPE_RX_ADDR_ADD:
62 			ionic_lif_addr_add(lif, w->addr);
63 			break;
64 		case IONIC_DW_TYPE_RX_ADDR_DEL:
65 			ionic_lif_addr_del(lif, w->addr);
66 			break;
67 		case IONIC_DW_TYPE_LINK_STATUS:
68 			ionic_link_status_check(lif);
69 			break;
70 		case IONIC_DW_TYPE_LIF_RESET:
71 			if (w->fw_status)
72 				ionic_lif_handle_fw_up(lif);
73 			else
74 				ionic_lif_handle_fw_down(lif);
75 			break;
76 		default:
77 			break;
78 		}
79 		kfree(w);
80 		schedule_work(&def->work);
81 	}
82 }
83 
84 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
85 				struct ionic_deferred_work *work)
86 {
87 	spin_lock_bh(&def->lock);
88 	list_add_tail(&work->list, &def->list);
89 	spin_unlock_bh(&def->lock);
90 	schedule_work(&def->work);
91 }
92 
93 static void ionic_link_status_check(struct ionic_lif *lif)
94 {
95 	struct net_device *netdev = lif->netdev;
96 	u16 link_status;
97 	bool link_up;
98 
99 	if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
100 		return;
101 
102 	link_status = le16_to_cpu(lif->info->status.link_status);
103 	link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
104 
105 	if (link_up) {
106 		if (!netif_carrier_ok(netdev)) {
107 			u32 link_speed;
108 
109 			ionic_port_identify(lif->ionic);
110 			link_speed = le32_to_cpu(lif->info->status.link_speed);
111 			netdev_info(netdev, "Link up - %d Gbps\n",
112 				    link_speed / 1000);
113 			netif_carrier_on(netdev);
114 		}
115 
116 		if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
117 			ionic_start_queues(lif);
118 	} else {
119 		if (netif_carrier_ok(netdev)) {
120 			netdev_info(netdev, "Link down\n");
121 			netif_carrier_off(netdev);
122 		}
123 
124 		if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
125 			ionic_stop_queues(lif);
126 	}
127 
128 	clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
129 }
130 
131 void ionic_link_status_check_request(struct ionic_lif *lif)
132 {
133 	struct ionic_deferred_work *work;
134 
135 	/* we only need one request outstanding at a time */
136 	if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
137 		return;
138 
139 	if (in_interrupt()) {
140 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
141 		if (!work)
142 			return;
143 
144 		work->type = IONIC_DW_TYPE_LINK_STATUS;
145 		ionic_lif_deferred_enqueue(&lif->deferred, work);
146 	} else {
147 		ionic_link_status_check(lif);
148 	}
149 }
150 
151 static irqreturn_t ionic_isr(int irq, void *data)
152 {
153 	struct napi_struct *napi = data;
154 
155 	napi_schedule_irqoff(napi);
156 
157 	return IRQ_HANDLED;
158 }
159 
160 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
161 {
162 	struct ionic_intr_info *intr = &qcq->intr;
163 	struct device *dev = lif->ionic->dev;
164 	struct ionic_queue *q = &qcq->q;
165 	const char *name;
166 
167 	if (lif->registered)
168 		name = lif->netdev->name;
169 	else
170 		name = dev_name(dev);
171 
172 	snprintf(intr->name, sizeof(intr->name),
173 		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
174 
175 	return devm_request_irq(dev, intr->vector, ionic_isr,
176 				0, intr->name, &qcq->napi);
177 }
178 
179 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
180 {
181 	struct ionic *ionic = lif->ionic;
182 	int index;
183 
184 	index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
185 	if (index == ionic->nintrs) {
186 		netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
187 			    __func__, index, ionic->nintrs);
188 		return -ENOSPC;
189 	}
190 
191 	set_bit(index, ionic->intrs);
192 	ionic_intr_init(&ionic->idev, intr, index);
193 
194 	return 0;
195 }
196 
197 static void ionic_intr_free(struct ionic *ionic, int index)
198 {
199 	if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
200 		clear_bit(index, ionic->intrs);
201 }
202 
203 static int ionic_qcq_enable(struct ionic_qcq *qcq)
204 {
205 	struct ionic_queue *q = &qcq->q;
206 	struct ionic_lif *lif = q->lif;
207 	struct ionic_dev *idev;
208 	struct device *dev;
209 
210 	struct ionic_admin_ctx ctx = {
211 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
212 		.cmd.q_control = {
213 			.opcode = IONIC_CMD_Q_CONTROL,
214 			.lif_index = cpu_to_le16(lif->index),
215 			.type = q->type,
216 			.index = cpu_to_le32(q->index),
217 			.oper = IONIC_Q_ENABLE,
218 		},
219 	};
220 
221 	idev = &lif->ionic->idev;
222 	dev = lif->ionic->dev;
223 
224 	dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
225 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
226 
227 	if (qcq->flags & IONIC_QCQ_F_INTR) {
228 		irq_set_affinity_hint(qcq->intr.vector,
229 				      &qcq->intr.affinity_mask);
230 		napi_enable(&qcq->napi);
231 		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
232 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
233 				IONIC_INTR_MASK_CLEAR);
234 	}
235 
236 	return ionic_adminq_post_wait(lif, &ctx);
237 }
238 
239 static int ionic_qcq_disable(struct ionic_qcq *qcq)
240 {
241 	struct ionic_queue *q = &qcq->q;
242 	struct ionic_lif *lif = q->lif;
243 	struct ionic_dev *idev;
244 	struct device *dev;
245 
246 	struct ionic_admin_ctx ctx = {
247 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
248 		.cmd.q_control = {
249 			.opcode = IONIC_CMD_Q_CONTROL,
250 			.lif_index = cpu_to_le16(lif->index),
251 			.type = q->type,
252 			.index = cpu_to_le32(q->index),
253 			.oper = IONIC_Q_DISABLE,
254 		},
255 	};
256 
257 	idev = &lif->ionic->idev;
258 	dev = lif->ionic->dev;
259 
260 	dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
261 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
262 
263 	if (qcq->flags & IONIC_QCQ_F_INTR) {
264 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
265 				IONIC_INTR_MASK_SET);
266 		synchronize_irq(qcq->intr.vector);
267 		irq_set_affinity_hint(qcq->intr.vector, NULL);
268 		napi_disable(&qcq->napi);
269 	}
270 
271 	return ionic_adminq_post_wait(lif, &ctx);
272 }
273 
274 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
275 {
276 	struct ionic_dev *idev = &lif->ionic->idev;
277 
278 	if (!qcq)
279 		return;
280 
281 	if (!(qcq->flags & IONIC_QCQ_F_INITED))
282 		return;
283 
284 	if (qcq->flags & IONIC_QCQ_F_INTR) {
285 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
286 				IONIC_INTR_MASK_SET);
287 		netif_napi_del(&qcq->napi);
288 	}
289 
290 	qcq->flags &= ~IONIC_QCQ_F_INITED;
291 }
292 
293 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
294 {
295 	struct device *dev = lif->ionic->dev;
296 
297 	if (!qcq)
298 		return;
299 
300 	ionic_debugfs_del_qcq(qcq);
301 
302 	dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
303 	qcq->base = NULL;
304 	qcq->base_pa = 0;
305 
306 	if (qcq->flags & IONIC_QCQ_F_INTR) {
307 		irq_set_affinity_hint(qcq->intr.vector, NULL);
308 		devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
309 		qcq->intr.vector = 0;
310 		ionic_intr_free(lif->ionic, qcq->intr.index);
311 	}
312 
313 	devm_kfree(dev, qcq->cq.info);
314 	qcq->cq.info = NULL;
315 	devm_kfree(dev, qcq->q.info);
316 	qcq->q.info = NULL;
317 	devm_kfree(dev, qcq);
318 }
319 
320 static void ionic_qcqs_free(struct ionic_lif *lif)
321 {
322 	struct device *dev = lif->ionic->dev;
323 	unsigned int i;
324 
325 	if (lif->notifyqcq) {
326 		ionic_qcq_free(lif, lif->notifyqcq);
327 		lif->notifyqcq = NULL;
328 	}
329 
330 	if (lif->adminqcq) {
331 		ionic_qcq_free(lif, lif->adminqcq);
332 		lif->adminqcq = NULL;
333 	}
334 
335 	if (lif->rxqcqs) {
336 		for (i = 0; i < lif->nxqs; i++)
337 			if (lif->rxqcqs[i].stats)
338 				devm_kfree(dev, lif->rxqcqs[i].stats);
339 		devm_kfree(dev, lif->rxqcqs);
340 		lif->rxqcqs = NULL;
341 	}
342 
343 	if (lif->txqcqs) {
344 		for (i = 0; i < lif->nxqs; i++)
345 			if (lif->txqcqs[i].stats)
346 				devm_kfree(dev, lif->txqcqs[i].stats);
347 		devm_kfree(dev, lif->txqcqs);
348 		lif->txqcqs = NULL;
349 	}
350 }
351 
352 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
353 				      struct ionic_qcq *n_qcq)
354 {
355 	if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
356 		ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
357 		n_qcq->flags &= ~IONIC_QCQ_F_INTR;
358 	}
359 
360 	n_qcq->intr.vector = src_qcq->intr.vector;
361 	n_qcq->intr.index = src_qcq->intr.index;
362 }
363 
364 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
365 			   unsigned int index,
366 			   const char *name, unsigned int flags,
367 			   unsigned int num_descs, unsigned int desc_size,
368 			   unsigned int cq_desc_size,
369 			   unsigned int sg_desc_size,
370 			   unsigned int pid, struct ionic_qcq **qcq)
371 {
372 	struct ionic_dev *idev = &lif->ionic->idev;
373 	u32 q_size, cq_size, sg_size, total_size;
374 	struct device *dev = lif->ionic->dev;
375 	void *q_base, *cq_base, *sg_base;
376 	dma_addr_t cq_base_pa = 0;
377 	dma_addr_t sg_base_pa = 0;
378 	dma_addr_t q_base_pa = 0;
379 	struct ionic_qcq *new;
380 	int err;
381 
382 	*qcq = NULL;
383 
384 	q_size  = num_descs * desc_size;
385 	cq_size = num_descs * cq_desc_size;
386 	sg_size = num_descs * sg_desc_size;
387 
388 	total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
389 	/* Note: aligning q_size/cq_size is not enough due to cq_base
390 	 * address aligning as q_base could be not aligned to the page.
391 	 * Adding PAGE_SIZE.
392 	 */
393 	total_size += PAGE_SIZE;
394 	if (flags & IONIC_QCQ_F_SG) {
395 		total_size += ALIGN(sg_size, PAGE_SIZE);
396 		total_size += PAGE_SIZE;
397 	}
398 
399 	new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
400 	if (!new) {
401 		netdev_err(lif->netdev, "Cannot allocate queue structure\n");
402 		err = -ENOMEM;
403 		goto err_out;
404 	}
405 
406 	new->flags = flags;
407 
408 	new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
409 				   GFP_KERNEL);
410 	if (!new->q.info) {
411 		netdev_err(lif->netdev, "Cannot allocate queue info\n");
412 		err = -ENOMEM;
413 		goto err_out;
414 	}
415 
416 	new->q.type = type;
417 
418 	err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
419 			   desc_size, sg_desc_size, pid);
420 	if (err) {
421 		netdev_err(lif->netdev, "Cannot initialize queue\n");
422 		goto err_out;
423 	}
424 
425 	if (flags & IONIC_QCQ_F_INTR) {
426 		err = ionic_intr_alloc(lif, &new->intr);
427 		if (err) {
428 			netdev_warn(lif->netdev, "no intr for %s: %d\n",
429 				    name, err);
430 			goto err_out;
431 		}
432 
433 		err = ionic_bus_get_irq(lif->ionic, new->intr.index);
434 		if (err < 0) {
435 			netdev_warn(lif->netdev, "no vector for %s: %d\n",
436 				    name, err);
437 			goto err_out_free_intr;
438 		}
439 		new->intr.vector = err;
440 		ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
441 				       IONIC_INTR_MASK_SET);
442 
443 		err = ionic_request_irq(lif, new);
444 		if (err) {
445 			netdev_warn(lif->netdev, "irq request failed %d\n", err);
446 			goto err_out_free_intr;
447 		}
448 
449 		new->intr.cpu = cpumask_local_spread(new->intr.index,
450 						     dev_to_node(dev));
451 		if (new->intr.cpu != -1)
452 			cpumask_set_cpu(new->intr.cpu,
453 					&new->intr.affinity_mask);
454 	} else {
455 		new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
456 	}
457 
458 	new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
459 				    GFP_KERNEL);
460 	if (!new->cq.info) {
461 		netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
462 		err = -ENOMEM;
463 		goto err_out_free_irq;
464 	}
465 
466 	err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
467 	if (err) {
468 		netdev_err(lif->netdev, "Cannot initialize completion queue\n");
469 		goto err_out_free_irq;
470 	}
471 
472 	new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
473 				       GFP_KERNEL);
474 	if (!new->base) {
475 		netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
476 		err = -ENOMEM;
477 		goto err_out_free_irq;
478 	}
479 
480 	new->total_size = total_size;
481 
482 	q_base = new->base;
483 	q_base_pa = new->base_pa;
484 
485 	cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
486 	cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
487 
488 	if (flags & IONIC_QCQ_F_SG) {
489 		sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
490 					PAGE_SIZE);
491 		sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
492 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
493 	}
494 
495 	ionic_q_map(&new->q, q_base, q_base_pa);
496 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
497 	ionic_cq_bind(&new->cq, &new->q);
498 
499 	*qcq = new;
500 
501 	return 0;
502 
503 err_out_free_irq:
504 	if (flags & IONIC_QCQ_F_INTR)
505 		devm_free_irq(dev, new->intr.vector, &new->napi);
506 err_out_free_intr:
507 	if (flags & IONIC_QCQ_F_INTR)
508 		ionic_intr_free(lif->ionic, new->intr.index);
509 err_out:
510 	dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
511 	return err;
512 }
513 
514 static int ionic_qcqs_alloc(struct ionic_lif *lif)
515 {
516 	struct device *dev = lif->ionic->dev;
517 	unsigned int q_list_size;
518 	unsigned int flags;
519 	int err;
520 	int i;
521 
522 	flags = IONIC_QCQ_F_INTR;
523 	err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
524 			      IONIC_ADMINQ_LENGTH,
525 			      sizeof(struct ionic_admin_cmd),
526 			      sizeof(struct ionic_admin_comp),
527 			      0, lif->kern_pid, &lif->adminqcq);
528 	if (err)
529 		return err;
530 	ionic_debugfs_add_qcq(lif, lif->adminqcq);
531 
532 	if (lif->ionic->nnqs_per_lif) {
533 		flags = IONIC_QCQ_F_NOTIFYQ;
534 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
535 				      flags, IONIC_NOTIFYQ_LENGTH,
536 				      sizeof(struct ionic_notifyq_cmd),
537 				      sizeof(union ionic_notifyq_comp),
538 				      0, lif->kern_pid, &lif->notifyqcq);
539 		if (err)
540 			goto err_out_free_adminqcq;
541 		ionic_debugfs_add_qcq(lif, lif->notifyqcq);
542 
543 		/* Let the notifyq ride on the adminq interrupt */
544 		ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
545 	}
546 
547 	q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
548 	err = -ENOMEM;
549 	lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
550 	if (!lif->txqcqs)
551 		goto err_out_free_notifyqcq;
552 	for (i = 0; i < lif->nxqs; i++) {
553 		lif->txqcqs[i].stats = devm_kzalloc(dev,
554 						    sizeof(struct ionic_q_stats),
555 						    GFP_KERNEL);
556 		if (!lif->txqcqs[i].stats)
557 			goto err_out_free_tx_stats;
558 	}
559 
560 	lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
561 	if (!lif->rxqcqs)
562 		goto err_out_free_tx_stats;
563 	for (i = 0; i < lif->nxqs; i++) {
564 		lif->rxqcqs[i].stats = devm_kzalloc(dev,
565 						    sizeof(struct ionic_q_stats),
566 						    GFP_KERNEL);
567 		if (!lif->rxqcqs[i].stats)
568 			goto err_out_free_rx_stats;
569 	}
570 
571 	return 0;
572 
573 err_out_free_rx_stats:
574 	for (i = 0; i < lif->nxqs; i++)
575 		if (lif->rxqcqs[i].stats)
576 			devm_kfree(dev, lif->rxqcqs[i].stats);
577 	devm_kfree(dev, lif->rxqcqs);
578 	lif->rxqcqs = NULL;
579 err_out_free_tx_stats:
580 	for (i = 0; i < lif->nxqs; i++)
581 		if (lif->txqcqs[i].stats)
582 			devm_kfree(dev, lif->txqcqs[i].stats);
583 	devm_kfree(dev, lif->txqcqs);
584 	lif->txqcqs = NULL;
585 err_out_free_notifyqcq:
586 	if (lif->notifyqcq) {
587 		ionic_qcq_free(lif, lif->notifyqcq);
588 		lif->notifyqcq = NULL;
589 	}
590 err_out_free_adminqcq:
591 	ionic_qcq_free(lif, lif->adminqcq);
592 	lif->adminqcq = NULL;
593 
594 	return err;
595 }
596 
597 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
598 {
599 	struct device *dev = lif->ionic->dev;
600 	struct ionic_queue *q = &qcq->q;
601 	struct ionic_cq *cq = &qcq->cq;
602 	struct ionic_admin_ctx ctx = {
603 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
604 		.cmd.q_init = {
605 			.opcode = IONIC_CMD_Q_INIT,
606 			.lif_index = cpu_to_le16(lif->index),
607 			.type = q->type,
608 			.ver = lif->qtype_info[q->type].version,
609 			.index = cpu_to_le32(q->index),
610 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
611 					     IONIC_QINIT_F_SG),
612 			.intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
613 			.pid = cpu_to_le16(q->pid),
614 			.ring_size = ilog2(q->num_descs),
615 			.ring_base = cpu_to_le64(q->base_pa),
616 			.cq_ring_base = cpu_to_le64(cq->base_pa),
617 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
618 		},
619 	};
620 	int err;
621 
622 	dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
623 	dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
624 	dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
625 	dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
626 	dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
627 	dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
628 
629 	q->tail = q->info;
630 	q->head = q->tail;
631 	cq->tail = cq->info;
632 
633 	err = ionic_adminq_post_wait(lif, &ctx);
634 	if (err)
635 		return err;
636 
637 	q->hw_type = ctx.comp.q_init.hw_type;
638 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
639 	q->dbval = IONIC_DBELL_QID(q->hw_index);
640 
641 	dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
642 	dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
643 
644 	qcq->flags |= IONIC_QCQ_F_INITED;
645 
646 	return 0;
647 }
648 
649 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
650 {
651 	struct device *dev = lif->ionic->dev;
652 	struct ionic_queue *q = &qcq->q;
653 	struct ionic_cq *cq = &qcq->cq;
654 	struct ionic_admin_ctx ctx = {
655 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
656 		.cmd.q_init = {
657 			.opcode = IONIC_CMD_Q_INIT,
658 			.lif_index = cpu_to_le16(lif->index),
659 			.type = q->type,
660 			.ver = lif->qtype_info[q->type].version,
661 			.index = cpu_to_le32(q->index),
662 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
663 					     IONIC_QINIT_F_SG),
664 			.intr_index = cpu_to_le16(cq->bound_intr->index),
665 			.pid = cpu_to_le16(q->pid),
666 			.ring_size = ilog2(q->num_descs),
667 			.ring_base = cpu_to_le64(q->base_pa),
668 			.cq_ring_base = cpu_to_le64(cq->base_pa),
669 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
670 		},
671 	};
672 	int err;
673 
674 	dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
675 	dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
676 	dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
677 	dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
678 	dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
679 	dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
680 
681 	q->tail = q->info;
682 	q->head = q->tail;
683 	cq->tail = cq->info;
684 
685 	err = ionic_adminq_post_wait(lif, &ctx);
686 	if (err)
687 		return err;
688 
689 	q->hw_type = ctx.comp.q_init.hw_type;
690 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
691 	q->dbval = IONIC_DBELL_QID(q->hw_index);
692 
693 	dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
694 	dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
695 
696 	netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
697 		       NAPI_POLL_WEIGHT);
698 
699 	qcq->flags |= IONIC_QCQ_F_INITED;
700 
701 	return 0;
702 }
703 
704 static bool ionic_notifyq_service(struct ionic_cq *cq,
705 				  struct ionic_cq_info *cq_info)
706 {
707 	union ionic_notifyq_comp *comp = cq_info->cq_desc;
708 	struct ionic_deferred_work *work;
709 	struct net_device *netdev;
710 	struct ionic_queue *q;
711 	struct ionic_lif *lif;
712 	u64 eid;
713 
714 	q = cq->bound_q;
715 	lif = q->info[0].cb_arg;
716 	netdev = lif->netdev;
717 	eid = le64_to_cpu(comp->event.eid);
718 
719 	/* Have we run out of new completions to process? */
720 	if (eid <= lif->last_eid)
721 		return false;
722 
723 	lif->last_eid = eid;
724 
725 	dev_dbg(lif->ionic->dev, "notifyq event:\n");
726 	dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
727 			 comp, sizeof(*comp), true);
728 
729 	switch (le16_to_cpu(comp->event.ecode)) {
730 	case IONIC_EVENT_LINK_CHANGE:
731 		ionic_link_status_check_request(lif);
732 		break;
733 	case IONIC_EVENT_RESET:
734 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
735 		if (!work) {
736 			netdev_err(lif->netdev, "%s OOM\n", __func__);
737 		} else {
738 			work->type = IONIC_DW_TYPE_LIF_RESET;
739 			ionic_lif_deferred_enqueue(&lif->deferred, work);
740 		}
741 		break;
742 	default:
743 		netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
744 			    comp->event.ecode, eid);
745 		break;
746 	}
747 
748 	return true;
749 }
750 
751 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
752 {
753 	struct ionic_dev *idev = &lif->ionic->idev;
754 	struct ionic_cq *cq = &lif->notifyqcq->cq;
755 	u32 work_done;
756 
757 	work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
758 				     NULL, NULL);
759 	if (work_done)
760 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
761 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
762 
763 	return work_done;
764 }
765 
766 static bool ionic_adminq_service(struct ionic_cq *cq,
767 				 struct ionic_cq_info *cq_info)
768 {
769 	struct ionic_admin_comp *comp = cq_info->cq_desc;
770 
771 	if (!color_match(comp->color, cq->done_color))
772 		return false;
773 
774 	ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
775 
776 	return true;
777 }
778 
779 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
780 {
781 	struct ionic_lif *lif = napi_to_cq(napi)->lif;
782 	int n_work = 0;
783 	int a_work = 0;
784 
785 	if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
786 		n_work = ionic_notifyq_clean(lif, budget);
787 	a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
788 
789 	return max(n_work, a_work);
790 }
791 
792 void ionic_get_stats64(struct net_device *netdev,
793 		       struct rtnl_link_stats64 *ns)
794 {
795 	struct ionic_lif *lif = netdev_priv(netdev);
796 	struct ionic_lif_stats *ls;
797 
798 	memset(ns, 0, sizeof(*ns));
799 	ls = &lif->info->stats;
800 
801 	ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
802 			 le64_to_cpu(ls->rx_mcast_packets) +
803 			 le64_to_cpu(ls->rx_bcast_packets);
804 
805 	ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
806 			 le64_to_cpu(ls->tx_mcast_packets) +
807 			 le64_to_cpu(ls->tx_bcast_packets);
808 
809 	ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
810 		       le64_to_cpu(ls->rx_mcast_bytes) +
811 		       le64_to_cpu(ls->rx_bcast_bytes);
812 
813 	ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
814 		       le64_to_cpu(ls->tx_mcast_bytes) +
815 		       le64_to_cpu(ls->tx_bcast_bytes);
816 
817 	ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
818 			 le64_to_cpu(ls->rx_mcast_drop_packets) +
819 			 le64_to_cpu(ls->rx_bcast_drop_packets);
820 
821 	ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
822 			 le64_to_cpu(ls->tx_mcast_drop_packets) +
823 			 le64_to_cpu(ls->tx_bcast_drop_packets);
824 
825 	ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
826 
827 	ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
828 
829 	ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
830 			       le64_to_cpu(ls->rx_queue_disabled) +
831 			       le64_to_cpu(ls->rx_desc_fetch_error) +
832 			       le64_to_cpu(ls->rx_desc_data_error);
833 
834 	ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
835 				le64_to_cpu(ls->tx_queue_disabled) +
836 				le64_to_cpu(ls->tx_desc_fetch_error) +
837 				le64_to_cpu(ls->tx_desc_data_error);
838 
839 	ns->rx_errors = ns->rx_over_errors +
840 			ns->rx_missed_errors;
841 
842 	ns->tx_errors = ns->tx_aborted_errors;
843 }
844 
845 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
846 {
847 	struct ionic_admin_ctx ctx = {
848 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
849 		.cmd.rx_filter_add = {
850 			.opcode = IONIC_CMD_RX_FILTER_ADD,
851 			.lif_index = cpu_to_le16(lif->index),
852 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
853 		},
854 	};
855 	struct ionic_rx_filter *f;
856 	int err;
857 
858 	/* don't bother if we already have it */
859 	spin_lock_bh(&lif->rx_filters.lock);
860 	f = ionic_rx_filter_by_addr(lif, addr);
861 	spin_unlock_bh(&lif->rx_filters.lock);
862 	if (f)
863 		return 0;
864 
865 	netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
866 		   ctx.comp.rx_filter_add.filter_id);
867 
868 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
869 	err = ionic_adminq_post_wait(lif, &ctx);
870 	if (err && err != -EEXIST)
871 		return err;
872 
873 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
874 }
875 
876 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
877 {
878 	struct ionic_admin_ctx ctx = {
879 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
880 		.cmd.rx_filter_del = {
881 			.opcode = IONIC_CMD_RX_FILTER_DEL,
882 			.lif_index = cpu_to_le16(lif->index),
883 		},
884 	};
885 	struct ionic_rx_filter *f;
886 	int err;
887 
888 	spin_lock_bh(&lif->rx_filters.lock);
889 	f = ionic_rx_filter_by_addr(lif, addr);
890 	if (!f) {
891 		spin_unlock_bh(&lif->rx_filters.lock);
892 		return -ENOENT;
893 	}
894 
895 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
896 	ionic_rx_filter_free(lif, f);
897 	spin_unlock_bh(&lif->rx_filters.lock);
898 
899 	err = ionic_adminq_post_wait(lif, &ctx);
900 	if (err && err != -EEXIST)
901 		return err;
902 
903 	netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
904 		   ctx.cmd.rx_filter_del.filter_id);
905 
906 	return 0;
907 }
908 
909 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
910 {
911 	struct ionic *ionic = lif->ionic;
912 	struct ionic_deferred_work *work;
913 	unsigned int nmfilters;
914 	unsigned int nufilters;
915 
916 	if (add) {
917 		/* Do we have space for this filter?  We test the counters
918 		 * here before checking the need for deferral so that we
919 		 * can return an overflow error to the stack.
920 		 */
921 		nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
922 		nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
923 
924 		if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
925 			lif->nmcast++;
926 		else if (!is_multicast_ether_addr(addr) &&
927 			 lif->nucast < nufilters)
928 			lif->nucast++;
929 		else
930 			return -ENOSPC;
931 	} else {
932 		if (is_multicast_ether_addr(addr) && lif->nmcast)
933 			lif->nmcast--;
934 		else if (!is_multicast_ether_addr(addr) && lif->nucast)
935 			lif->nucast--;
936 	}
937 
938 	if (in_interrupt()) {
939 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
940 		if (!work) {
941 			netdev_err(lif->netdev, "%s OOM\n", __func__);
942 			return -ENOMEM;
943 		}
944 		work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
945 				   IONIC_DW_TYPE_RX_ADDR_DEL;
946 		memcpy(work->addr, addr, ETH_ALEN);
947 		netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
948 			   add ? "add" : "del", addr);
949 		ionic_lif_deferred_enqueue(&lif->deferred, work);
950 	} else {
951 		netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
952 			   add ? "add" : "del", addr);
953 		if (add)
954 			return ionic_lif_addr_add(lif, addr);
955 		else
956 			return ionic_lif_addr_del(lif, addr);
957 	}
958 
959 	return 0;
960 }
961 
962 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
963 {
964 	return ionic_lif_addr(netdev_priv(netdev), addr, true);
965 }
966 
967 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
968 {
969 	return ionic_lif_addr(netdev_priv(netdev), addr, false);
970 }
971 
972 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
973 {
974 	struct ionic_admin_ctx ctx = {
975 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
976 		.cmd.rx_mode_set = {
977 			.opcode = IONIC_CMD_RX_MODE_SET,
978 			.lif_index = cpu_to_le16(lif->index),
979 			.rx_mode = cpu_to_le16(rx_mode),
980 		},
981 	};
982 	char buf[128];
983 	int err;
984 	int i;
985 #define REMAIN(__x) (sizeof(buf) - (__x))
986 
987 	i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
988 		      lif->rx_mode, rx_mode);
989 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
990 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
991 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
992 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
993 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
994 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
995 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
996 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
997 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
998 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
999 	netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
1000 
1001 	err = ionic_adminq_post_wait(lif, &ctx);
1002 	if (err)
1003 		netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
1004 			    rx_mode, err);
1005 	else
1006 		lif->rx_mode = rx_mode;
1007 }
1008 
1009 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
1010 {
1011 	struct ionic_deferred_work *work;
1012 
1013 	if (in_interrupt()) {
1014 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
1015 		if (!work) {
1016 			netdev_err(lif->netdev, "%s OOM\n", __func__);
1017 			return;
1018 		}
1019 		work->type = IONIC_DW_TYPE_RX_MODE;
1020 		work->rx_mode = rx_mode;
1021 		netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1022 		ionic_lif_deferred_enqueue(&lif->deferred, work);
1023 	} else {
1024 		ionic_lif_rx_mode(lif, rx_mode);
1025 	}
1026 }
1027 
1028 static void ionic_set_rx_mode(struct net_device *netdev)
1029 {
1030 	struct ionic_lif *lif = netdev_priv(netdev);
1031 	struct ionic_identity *ident;
1032 	unsigned int nfilters;
1033 	unsigned int rx_mode;
1034 
1035 	ident = &lif->ionic->ident;
1036 
1037 	rx_mode = IONIC_RX_MODE_F_UNICAST;
1038 	rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1039 	rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1040 	rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1041 	rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1042 
1043 	/* sync unicast addresses
1044 	 * next check to see if we're in an overflow state
1045 	 *    if so, we track that we overflowed and enable NIC PROMISC
1046 	 *    else if the overflow is set and not needed
1047 	 *       we remove our overflow flag and check the netdev flags
1048 	 *       to see if we can disable NIC PROMISC
1049 	 */
1050 	__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1051 	nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
1052 	if (netdev_uc_count(netdev) + 1 > nfilters) {
1053 		rx_mode |= IONIC_RX_MODE_F_PROMISC;
1054 		lif->uc_overflow = true;
1055 	} else if (lif->uc_overflow) {
1056 		lif->uc_overflow = false;
1057 		if (!(netdev->flags & IFF_PROMISC))
1058 			rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1059 	}
1060 
1061 	/* same for multicast */
1062 	__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1063 	nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1064 	if (netdev_mc_count(netdev) > nfilters) {
1065 		rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1066 		lif->mc_overflow = true;
1067 	} else if (lif->mc_overflow) {
1068 		lif->mc_overflow = false;
1069 		if (!(netdev->flags & IFF_ALLMULTI))
1070 			rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1071 	}
1072 
1073 	if (lif->rx_mode != rx_mode)
1074 		_ionic_lif_rx_mode(lif, rx_mode);
1075 }
1076 
1077 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1078 {
1079 	u64 wanted = 0;
1080 
1081 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
1082 		wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1083 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1084 		wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1085 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1086 		wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1087 	if (features & NETIF_F_RXHASH)
1088 		wanted |= IONIC_ETH_HW_RX_HASH;
1089 	if (features & NETIF_F_RXCSUM)
1090 		wanted |= IONIC_ETH_HW_RX_CSUM;
1091 	if (features & NETIF_F_SG)
1092 		wanted |= IONIC_ETH_HW_TX_SG;
1093 	if (features & NETIF_F_HW_CSUM)
1094 		wanted |= IONIC_ETH_HW_TX_CSUM;
1095 	if (features & NETIF_F_TSO)
1096 		wanted |= IONIC_ETH_HW_TSO;
1097 	if (features & NETIF_F_TSO6)
1098 		wanted |= IONIC_ETH_HW_TSO_IPV6;
1099 	if (features & NETIF_F_TSO_ECN)
1100 		wanted |= IONIC_ETH_HW_TSO_ECN;
1101 	if (features & NETIF_F_GSO_GRE)
1102 		wanted |= IONIC_ETH_HW_TSO_GRE;
1103 	if (features & NETIF_F_GSO_GRE_CSUM)
1104 		wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1105 	if (features & NETIF_F_GSO_IPXIP4)
1106 		wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1107 	if (features & NETIF_F_GSO_IPXIP6)
1108 		wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1109 	if (features & NETIF_F_GSO_UDP_TUNNEL)
1110 		wanted |= IONIC_ETH_HW_TSO_UDP;
1111 	if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1112 		wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1113 
1114 	return cpu_to_le64(wanted);
1115 }
1116 
1117 static int ionic_set_nic_features(struct ionic_lif *lif,
1118 				  netdev_features_t features)
1119 {
1120 	struct device *dev = lif->ionic->dev;
1121 	struct ionic_admin_ctx ctx = {
1122 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1123 		.cmd.lif_setattr = {
1124 			.opcode = IONIC_CMD_LIF_SETATTR,
1125 			.index = cpu_to_le16(lif->index),
1126 			.attr = IONIC_LIF_ATTR_FEATURES,
1127 		},
1128 	};
1129 	u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1130 			 IONIC_ETH_HW_VLAN_RX_STRIP |
1131 			 IONIC_ETH_HW_VLAN_RX_FILTER;
1132 	u64 old_hw_features;
1133 	int err;
1134 
1135 	ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1136 	err = ionic_adminq_post_wait(lif, &ctx);
1137 	if (err)
1138 		return err;
1139 
1140 	old_hw_features = lif->hw_features;
1141 	lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1142 				       ctx.comp.lif_setattr.features);
1143 
1144 	if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1145 		ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1146 
1147 	if ((vlan_flags & features) &&
1148 	    !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1149 		dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1150 
1151 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1152 		dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1153 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1154 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1155 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1156 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1157 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1158 		dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1159 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1160 		dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1161 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1162 		dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1163 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1164 		dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1165 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1166 		dev_dbg(dev, "feature ETH_HW_TSO\n");
1167 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1168 		dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1169 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1170 		dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1171 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1172 		dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1173 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1174 		dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1175 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1176 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1177 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1178 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1179 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1180 		dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1181 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1182 		dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1183 
1184 	return 0;
1185 }
1186 
1187 static int ionic_init_nic_features(struct ionic_lif *lif)
1188 {
1189 	struct net_device *netdev = lif->netdev;
1190 	netdev_features_t features;
1191 	int err;
1192 
1193 	/* set up what we expect to support by default */
1194 	features = NETIF_F_HW_VLAN_CTAG_TX |
1195 		   NETIF_F_HW_VLAN_CTAG_RX |
1196 		   NETIF_F_HW_VLAN_CTAG_FILTER |
1197 		   NETIF_F_RXHASH |
1198 		   NETIF_F_SG |
1199 		   NETIF_F_HW_CSUM |
1200 		   NETIF_F_RXCSUM |
1201 		   NETIF_F_TSO |
1202 		   NETIF_F_TSO6 |
1203 		   NETIF_F_TSO_ECN;
1204 
1205 	err = ionic_set_nic_features(lif, features);
1206 	if (err)
1207 		return err;
1208 
1209 	/* tell the netdev what we actually can support */
1210 	netdev->features |= NETIF_F_HIGHDMA;
1211 
1212 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1213 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1214 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1215 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1216 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1217 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1218 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1219 		netdev->hw_features |= NETIF_F_RXHASH;
1220 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1221 		netdev->hw_features |= NETIF_F_SG;
1222 
1223 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1224 		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1225 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1226 		netdev->hw_enc_features |= NETIF_F_RXCSUM;
1227 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1228 		netdev->hw_enc_features |= NETIF_F_TSO;
1229 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1230 		netdev->hw_enc_features |= NETIF_F_TSO6;
1231 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1232 		netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1233 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1234 		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1235 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1236 		netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1237 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1238 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1239 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1240 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1241 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1242 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1243 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1244 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1245 
1246 	netdev->hw_features |= netdev->hw_enc_features;
1247 	netdev->features |= netdev->hw_features;
1248 
1249 	netdev->priv_flags |= IFF_UNICAST_FLT |
1250 			      IFF_LIVE_ADDR_CHANGE;
1251 
1252 	return 0;
1253 }
1254 
1255 static int ionic_set_features(struct net_device *netdev,
1256 			      netdev_features_t features)
1257 {
1258 	struct ionic_lif *lif = netdev_priv(netdev);
1259 	int err;
1260 
1261 	netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1262 		   __func__, (u64)lif->netdev->features, (u64)features);
1263 
1264 	err = ionic_set_nic_features(lif, features);
1265 
1266 	return err;
1267 }
1268 
1269 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1270 {
1271 	struct sockaddr *addr = sa;
1272 	u8 *mac;
1273 	int err;
1274 
1275 	mac = (u8 *)addr->sa_data;
1276 	if (ether_addr_equal(netdev->dev_addr, mac))
1277 		return 0;
1278 
1279 	err = eth_prepare_mac_addr_change(netdev, addr);
1280 	if (err)
1281 		return err;
1282 
1283 	if (!is_zero_ether_addr(netdev->dev_addr)) {
1284 		netdev_info(netdev, "deleting mac addr %pM\n",
1285 			    netdev->dev_addr);
1286 		ionic_addr_del(netdev, netdev->dev_addr);
1287 	}
1288 
1289 	eth_commit_mac_addr_change(netdev, addr);
1290 	netdev_info(netdev, "updating mac addr %pM\n", mac);
1291 
1292 	return ionic_addr_add(netdev, mac);
1293 }
1294 
1295 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1296 {
1297 	struct ionic_lif *lif = netdev_priv(netdev);
1298 	struct ionic_admin_ctx ctx = {
1299 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1300 		.cmd.lif_setattr = {
1301 			.opcode = IONIC_CMD_LIF_SETATTR,
1302 			.index = cpu_to_le16(lif->index),
1303 			.attr = IONIC_LIF_ATTR_MTU,
1304 			.mtu = cpu_to_le32(new_mtu),
1305 		},
1306 	};
1307 	int err;
1308 
1309 	err = ionic_adminq_post_wait(lif, &ctx);
1310 	if (err)
1311 		return err;
1312 
1313 	netdev->mtu = new_mtu;
1314 	err = ionic_reset_queues(lif);
1315 
1316 	return err;
1317 }
1318 
1319 static void ionic_tx_timeout_work(struct work_struct *ws)
1320 {
1321 	struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1322 
1323 	netdev_info(lif->netdev, "Tx Timeout recovery\n");
1324 
1325 	rtnl_lock();
1326 	ionic_reset_queues(lif);
1327 	rtnl_unlock();
1328 }
1329 
1330 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1331 {
1332 	struct ionic_lif *lif = netdev_priv(netdev);
1333 
1334 	schedule_work(&lif->tx_timeout_work);
1335 }
1336 
1337 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1338 				 u16 vid)
1339 {
1340 	struct ionic_lif *lif = netdev_priv(netdev);
1341 	struct ionic_admin_ctx ctx = {
1342 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1343 		.cmd.rx_filter_add = {
1344 			.opcode = IONIC_CMD_RX_FILTER_ADD,
1345 			.lif_index = cpu_to_le16(lif->index),
1346 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1347 			.vlan.vlan = cpu_to_le16(vid),
1348 		},
1349 	};
1350 	int err;
1351 
1352 	err = ionic_adminq_post_wait(lif, &ctx);
1353 	if (err)
1354 		return err;
1355 
1356 	netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
1357 		   ctx.comp.rx_filter_add.filter_id);
1358 
1359 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1360 }
1361 
1362 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1363 				  u16 vid)
1364 {
1365 	struct ionic_lif *lif = netdev_priv(netdev);
1366 	struct ionic_admin_ctx ctx = {
1367 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1368 		.cmd.rx_filter_del = {
1369 			.opcode = IONIC_CMD_RX_FILTER_DEL,
1370 			.lif_index = cpu_to_le16(lif->index),
1371 		},
1372 	};
1373 	struct ionic_rx_filter *f;
1374 
1375 	spin_lock_bh(&lif->rx_filters.lock);
1376 
1377 	f = ionic_rx_filter_by_vlan(lif, vid);
1378 	if (!f) {
1379 		spin_unlock_bh(&lif->rx_filters.lock);
1380 		return -ENOENT;
1381 	}
1382 
1383 	netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
1384 		   le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
1385 
1386 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1387 	ionic_rx_filter_free(lif, f);
1388 	spin_unlock_bh(&lif->rx_filters.lock);
1389 
1390 	return ionic_adminq_post_wait(lif, &ctx);
1391 }
1392 
1393 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1394 			 const u8 *key, const u32 *indir)
1395 {
1396 	struct ionic_admin_ctx ctx = {
1397 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1398 		.cmd.lif_setattr = {
1399 			.opcode = IONIC_CMD_LIF_SETATTR,
1400 			.attr = IONIC_LIF_ATTR_RSS,
1401 			.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1402 		},
1403 	};
1404 	unsigned int i, tbl_sz;
1405 
1406 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1407 		lif->rss_types = types;
1408 		ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1409 	}
1410 
1411 	if (key)
1412 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1413 
1414 	if (indir) {
1415 		tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1416 		for (i = 0; i < tbl_sz; i++)
1417 			lif->rss_ind_tbl[i] = indir[i];
1418 	}
1419 
1420 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1421 	       IONIC_RSS_HASH_KEY_SIZE);
1422 
1423 	return ionic_adminq_post_wait(lif, &ctx);
1424 }
1425 
1426 static int ionic_lif_rss_init(struct ionic_lif *lif)
1427 {
1428 	unsigned int tbl_sz;
1429 	unsigned int i;
1430 
1431 	lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1432 			 IONIC_RSS_TYPE_IPV4_TCP |
1433 			 IONIC_RSS_TYPE_IPV4_UDP |
1434 			 IONIC_RSS_TYPE_IPV6     |
1435 			 IONIC_RSS_TYPE_IPV6_TCP |
1436 			 IONIC_RSS_TYPE_IPV6_UDP;
1437 
1438 	/* Fill indirection table with 'default' values */
1439 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1440 	for (i = 0; i < tbl_sz; i++)
1441 		lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1442 
1443 	return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1444 }
1445 
1446 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1447 {
1448 	int tbl_sz;
1449 
1450 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1451 	memset(lif->rss_ind_tbl, 0, tbl_sz);
1452 	memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1453 
1454 	ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1455 }
1456 
1457 static void ionic_txrx_disable(struct ionic_lif *lif)
1458 {
1459 	unsigned int i;
1460 	int err;
1461 
1462 	if (lif->txqcqs) {
1463 		for (i = 0; i < lif->nxqs; i++) {
1464 			err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1465 			if (err == -ETIMEDOUT)
1466 				break;
1467 		}
1468 	}
1469 
1470 	if (lif->rxqcqs) {
1471 		for (i = 0; i < lif->nxqs; i++) {
1472 			err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1473 			if (err == -ETIMEDOUT)
1474 				break;
1475 		}
1476 	}
1477 }
1478 
1479 static void ionic_txrx_deinit(struct ionic_lif *lif)
1480 {
1481 	unsigned int i;
1482 
1483 	if (lif->txqcqs) {
1484 		for (i = 0; i < lif->nxqs; i++) {
1485 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1486 			ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
1487 			ionic_tx_empty(&lif->txqcqs[i].qcq->q);
1488 		}
1489 	}
1490 
1491 	if (lif->rxqcqs) {
1492 		for (i = 0; i < lif->nxqs; i++) {
1493 			ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1494 			ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
1495 			ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
1496 		}
1497 	}
1498 	lif->rx_mode = 0;
1499 }
1500 
1501 static void ionic_txrx_free(struct ionic_lif *lif)
1502 {
1503 	unsigned int i;
1504 
1505 	if (lif->txqcqs) {
1506 		for (i = 0; i < lif->nxqs; i++) {
1507 			ionic_qcq_free(lif, lif->txqcqs[i].qcq);
1508 			lif->txqcqs[i].qcq = NULL;
1509 		}
1510 	}
1511 
1512 	if (lif->rxqcqs) {
1513 		for (i = 0; i < lif->nxqs; i++) {
1514 			ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
1515 			lif->rxqcqs[i].qcq = NULL;
1516 		}
1517 	}
1518 }
1519 
1520 static int ionic_txrx_alloc(struct ionic_lif *lif)
1521 {
1522 	unsigned int sg_desc_sz;
1523 	unsigned int flags;
1524 	unsigned int i;
1525 	int err = 0;
1526 
1527 	if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
1528 	    lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
1529 					  sizeof(struct ionic_txq_sg_desc_v1))
1530 		sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
1531 	else
1532 		sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
1533 
1534 	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1535 	for (i = 0; i < lif->nxqs; i++) {
1536 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1537 				      lif->ntxq_descs,
1538 				      sizeof(struct ionic_txq_desc),
1539 				      sizeof(struct ionic_txq_comp),
1540 				      sg_desc_sz,
1541 				      lif->kern_pid, &lif->txqcqs[i].qcq);
1542 		if (err)
1543 			goto err_out;
1544 
1545 		lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
1546 		ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
1547 	}
1548 
1549 	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1550 	for (i = 0; i < lif->nxqs; i++) {
1551 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1552 				      lif->nrxq_descs,
1553 				      sizeof(struct ionic_rxq_desc),
1554 				      sizeof(struct ionic_rxq_comp),
1555 				      sizeof(struct ionic_rxq_sg_desc),
1556 				      lif->kern_pid, &lif->rxqcqs[i].qcq);
1557 		if (err)
1558 			goto err_out;
1559 
1560 		lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
1561 
1562 		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1563 				     lif->rxqcqs[i].qcq->intr.index,
1564 				     lif->rx_coalesce_hw);
1565 		ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
1566 					  lif->txqcqs[i].qcq);
1567 		ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
1568 	}
1569 
1570 	return 0;
1571 
1572 err_out:
1573 	ionic_txrx_free(lif);
1574 
1575 	return err;
1576 }
1577 
1578 static int ionic_txrx_init(struct ionic_lif *lif)
1579 {
1580 	unsigned int i;
1581 	int err;
1582 
1583 	for (i = 0; i < lif->nxqs; i++) {
1584 		err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
1585 		if (err)
1586 			goto err_out;
1587 
1588 		err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
1589 		if (err) {
1590 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1591 			goto err_out;
1592 		}
1593 	}
1594 
1595 	if (lif->netdev->features & NETIF_F_RXHASH)
1596 		ionic_lif_rss_init(lif);
1597 
1598 	ionic_set_rx_mode(lif->netdev);
1599 
1600 	return 0;
1601 
1602 err_out:
1603 	while (i--) {
1604 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1605 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1606 	}
1607 
1608 	return err;
1609 }
1610 
1611 static int ionic_txrx_enable(struct ionic_lif *lif)
1612 {
1613 	int i, err;
1614 
1615 	for (i = 0; i < lif->nxqs; i++) {
1616 		ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
1617 		err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
1618 		if (err)
1619 			goto err_out;
1620 
1621 		err = ionic_qcq_enable(lif->txqcqs[i].qcq);
1622 		if (err) {
1623 			if (err != -ETIMEDOUT)
1624 				ionic_qcq_disable(lif->rxqcqs[i].qcq);
1625 			goto err_out;
1626 		}
1627 	}
1628 
1629 	return 0;
1630 
1631 err_out:
1632 	while (i--) {
1633 		err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1634 		if (err == -ETIMEDOUT)
1635 			break;
1636 		err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1637 		if (err == -ETIMEDOUT)
1638 			break;
1639 	}
1640 
1641 	return err;
1642 }
1643 
1644 static int ionic_start_queues(struct ionic_lif *lif)
1645 {
1646 	int err;
1647 
1648 	if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
1649 		return 0;
1650 
1651 	err = ionic_txrx_enable(lif);
1652 	if (err) {
1653 		clear_bit(IONIC_LIF_F_UP, lif->state);
1654 		return err;
1655 	}
1656 	netif_tx_wake_all_queues(lif->netdev);
1657 
1658 	return 0;
1659 }
1660 
1661 int ionic_open(struct net_device *netdev)
1662 {
1663 	struct ionic_lif *lif = netdev_priv(netdev);
1664 	int err;
1665 
1666 	err = ionic_txrx_alloc(lif);
1667 	if (err)
1668 		return err;
1669 
1670 	err = ionic_txrx_init(lif);
1671 	if (err)
1672 		goto err_out;
1673 
1674 	/* don't start the queues until we have link */
1675 	if (netif_carrier_ok(netdev)) {
1676 		err = ionic_start_queues(lif);
1677 		if (err)
1678 			goto err_txrx_deinit;
1679 	}
1680 
1681 	return 0;
1682 
1683 err_txrx_deinit:
1684 	ionic_txrx_deinit(lif);
1685 err_out:
1686 	ionic_txrx_free(lif);
1687 	return err;
1688 }
1689 
1690 static void ionic_stop_queues(struct ionic_lif *lif)
1691 {
1692 	if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
1693 		return;
1694 
1695 	ionic_txrx_disable(lif);
1696 	netif_tx_disable(lif->netdev);
1697 }
1698 
1699 int ionic_stop(struct net_device *netdev)
1700 {
1701 	struct ionic_lif *lif = netdev_priv(netdev);
1702 
1703 	if (!netif_device_present(netdev))
1704 		return 0;
1705 
1706 	ionic_stop_queues(lif);
1707 	ionic_txrx_deinit(lif);
1708 	ionic_txrx_free(lif);
1709 
1710 	return 0;
1711 }
1712 
1713 static int ionic_get_vf_config(struct net_device *netdev,
1714 			       int vf, struct ifla_vf_info *ivf)
1715 {
1716 	struct ionic_lif *lif = netdev_priv(netdev);
1717 	struct ionic *ionic = lif->ionic;
1718 	int ret = 0;
1719 
1720 	if (!netif_device_present(netdev))
1721 		return -EBUSY;
1722 
1723 	down_read(&ionic->vf_op_lock);
1724 
1725 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1726 		ret = -EINVAL;
1727 	} else {
1728 		ivf->vf           = vf;
1729 		ivf->vlan         = ionic->vfs[vf].vlanid;
1730 		ivf->qos	  = 0;
1731 		ivf->spoofchk     = ionic->vfs[vf].spoofchk;
1732 		ivf->linkstate    = ionic->vfs[vf].linkstate;
1733 		ivf->max_tx_rate  = ionic->vfs[vf].maxrate;
1734 		ivf->trusted      = ionic->vfs[vf].trusted;
1735 		ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1736 	}
1737 
1738 	up_read(&ionic->vf_op_lock);
1739 	return ret;
1740 }
1741 
1742 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1743 			      struct ifla_vf_stats *vf_stats)
1744 {
1745 	struct ionic_lif *lif = netdev_priv(netdev);
1746 	struct ionic *ionic = lif->ionic;
1747 	struct ionic_lif_stats *vs;
1748 	int ret = 0;
1749 
1750 	if (!netif_device_present(netdev))
1751 		return -EBUSY;
1752 
1753 	down_read(&ionic->vf_op_lock);
1754 
1755 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1756 		ret = -EINVAL;
1757 	} else {
1758 		memset(vf_stats, 0, sizeof(*vf_stats));
1759 		vs = &ionic->vfs[vf].stats;
1760 
1761 		vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1762 		vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1763 		vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
1764 		vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
1765 		vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
1766 		vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
1767 		vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1768 				       le64_to_cpu(vs->rx_mcast_drop_packets) +
1769 				       le64_to_cpu(vs->rx_bcast_drop_packets);
1770 		vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1771 				       le64_to_cpu(vs->tx_mcast_drop_packets) +
1772 				       le64_to_cpu(vs->tx_bcast_drop_packets);
1773 	}
1774 
1775 	up_read(&ionic->vf_op_lock);
1776 	return ret;
1777 }
1778 
1779 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1780 {
1781 	struct ionic_lif *lif = netdev_priv(netdev);
1782 	struct ionic *ionic = lif->ionic;
1783 	int ret;
1784 
1785 	if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1786 		return -EINVAL;
1787 
1788 	if (!netif_device_present(netdev))
1789 		return -EBUSY;
1790 
1791 	down_write(&ionic->vf_op_lock);
1792 
1793 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1794 		ret = -EINVAL;
1795 	} else {
1796 		ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1797 		if (!ret)
1798 			ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1799 	}
1800 
1801 	up_write(&ionic->vf_op_lock);
1802 	return ret;
1803 }
1804 
1805 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1806 			     u8 qos, __be16 proto)
1807 {
1808 	struct ionic_lif *lif = netdev_priv(netdev);
1809 	struct ionic *ionic = lif->ionic;
1810 	int ret;
1811 
1812 	/* until someday when we support qos */
1813 	if (qos)
1814 		return -EINVAL;
1815 
1816 	if (vlan > 4095)
1817 		return -EINVAL;
1818 
1819 	if (proto != htons(ETH_P_8021Q))
1820 		return -EPROTONOSUPPORT;
1821 
1822 	if (!netif_device_present(netdev))
1823 		return -EBUSY;
1824 
1825 	down_write(&ionic->vf_op_lock);
1826 
1827 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1828 		ret = -EINVAL;
1829 	} else {
1830 		ret = ionic_set_vf_config(ionic, vf,
1831 					  IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
1832 		if (!ret)
1833 			ionic->vfs[vf].vlanid = vlan;
1834 	}
1835 
1836 	up_write(&ionic->vf_op_lock);
1837 	return ret;
1838 }
1839 
1840 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
1841 			     int tx_min, int tx_max)
1842 {
1843 	struct ionic_lif *lif = netdev_priv(netdev);
1844 	struct ionic *ionic = lif->ionic;
1845 	int ret;
1846 
1847 	/* setting the min just seems silly */
1848 	if (tx_min)
1849 		return -EINVAL;
1850 
1851 	if (!netif_device_present(netdev))
1852 		return -EBUSY;
1853 
1854 	down_write(&ionic->vf_op_lock);
1855 
1856 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1857 		ret = -EINVAL;
1858 	} else {
1859 		ret = ionic_set_vf_config(ionic, vf,
1860 					  IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
1861 		if (!ret)
1862 			lif->ionic->vfs[vf].maxrate = tx_max;
1863 	}
1864 
1865 	up_write(&ionic->vf_op_lock);
1866 	return ret;
1867 }
1868 
1869 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
1870 {
1871 	struct ionic_lif *lif = netdev_priv(netdev);
1872 	struct ionic *ionic = lif->ionic;
1873 	u8 data = set;  /* convert to u8 for config */
1874 	int ret;
1875 
1876 	if (!netif_device_present(netdev))
1877 		return -EBUSY;
1878 
1879 	down_write(&ionic->vf_op_lock);
1880 
1881 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1882 		ret = -EINVAL;
1883 	} else {
1884 		ret = ionic_set_vf_config(ionic, vf,
1885 					  IONIC_VF_ATTR_SPOOFCHK, &data);
1886 		if (!ret)
1887 			ionic->vfs[vf].spoofchk = data;
1888 	}
1889 
1890 	up_write(&ionic->vf_op_lock);
1891 	return ret;
1892 }
1893 
1894 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
1895 {
1896 	struct ionic_lif *lif = netdev_priv(netdev);
1897 	struct ionic *ionic = lif->ionic;
1898 	u8 data = set;  /* convert to u8 for config */
1899 	int ret;
1900 
1901 	if (!netif_device_present(netdev))
1902 		return -EBUSY;
1903 
1904 	down_write(&ionic->vf_op_lock);
1905 
1906 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1907 		ret = -EINVAL;
1908 	} else {
1909 		ret = ionic_set_vf_config(ionic, vf,
1910 					  IONIC_VF_ATTR_TRUST, &data);
1911 		if (!ret)
1912 			ionic->vfs[vf].trusted = data;
1913 	}
1914 
1915 	up_write(&ionic->vf_op_lock);
1916 	return ret;
1917 }
1918 
1919 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
1920 {
1921 	struct ionic_lif *lif = netdev_priv(netdev);
1922 	struct ionic *ionic = lif->ionic;
1923 	u8 data;
1924 	int ret;
1925 
1926 	switch (set) {
1927 	case IFLA_VF_LINK_STATE_ENABLE:
1928 		data = IONIC_VF_LINK_STATUS_UP;
1929 		break;
1930 	case IFLA_VF_LINK_STATE_DISABLE:
1931 		data = IONIC_VF_LINK_STATUS_DOWN;
1932 		break;
1933 	case IFLA_VF_LINK_STATE_AUTO:
1934 		data = IONIC_VF_LINK_STATUS_AUTO;
1935 		break;
1936 	default:
1937 		return -EINVAL;
1938 	}
1939 
1940 	if (!netif_device_present(netdev))
1941 		return -EBUSY;
1942 
1943 	down_write(&ionic->vf_op_lock);
1944 
1945 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1946 		ret = -EINVAL;
1947 	} else {
1948 		ret = ionic_set_vf_config(ionic, vf,
1949 					  IONIC_VF_ATTR_LINKSTATE, &data);
1950 		if (!ret)
1951 			ionic->vfs[vf].linkstate = set;
1952 	}
1953 
1954 	up_write(&ionic->vf_op_lock);
1955 	return ret;
1956 }
1957 
1958 static const struct net_device_ops ionic_netdev_ops = {
1959 	.ndo_open               = ionic_open,
1960 	.ndo_stop               = ionic_stop,
1961 	.ndo_start_xmit		= ionic_start_xmit,
1962 	.ndo_get_stats64	= ionic_get_stats64,
1963 	.ndo_set_rx_mode	= ionic_set_rx_mode,
1964 	.ndo_set_features	= ionic_set_features,
1965 	.ndo_set_mac_address	= ionic_set_mac_address,
1966 	.ndo_validate_addr	= eth_validate_addr,
1967 	.ndo_tx_timeout         = ionic_tx_timeout,
1968 	.ndo_change_mtu         = ionic_change_mtu,
1969 	.ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
1970 	.ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
1971 	.ndo_set_vf_vlan	= ionic_set_vf_vlan,
1972 	.ndo_set_vf_trust	= ionic_set_vf_trust,
1973 	.ndo_set_vf_mac		= ionic_set_vf_mac,
1974 	.ndo_set_vf_rate	= ionic_set_vf_rate,
1975 	.ndo_set_vf_spoofchk	= ionic_set_vf_spoofchk,
1976 	.ndo_get_vf_config	= ionic_get_vf_config,
1977 	.ndo_set_vf_link_state	= ionic_set_vf_link_state,
1978 	.ndo_get_vf_stats       = ionic_get_vf_stats,
1979 };
1980 
1981 int ionic_reset_queues(struct ionic_lif *lif)
1982 {
1983 	bool running;
1984 	int err = 0;
1985 
1986 	/* Put off the next watchdog timeout */
1987 	netif_trans_update(lif->netdev);
1988 
1989 	err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
1990 	if (err)
1991 		return err;
1992 
1993 	running = netif_running(lif->netdev);
1994 	if (running)
1995 		err = ionic_stop(lif->netdev);
1996 	if (!err && running)
1997 		ionic_open(lif->netdev);
1998 
1999 	clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
2000 
2001 	return err;
2002 }
2003 
2004 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
2005 {
2006 	struct device *dev = ionic->dev;
2007 	struct net_device *netdev;
2008 	struct ionic_lif *lif;
2009 	int tbl_sz;
2010 	int err;
2011 
2012 	netdev = alloc_etherdev_mqs(sizeof(*lif),
2013 				    ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2014 	if (!netdev) {
2015 		dev_err(dev, "Cannot allocate netdev, aborting\n");
2016 		return ERR_PTR(-ENOMEM);
2017 	}
2018 
2019 	SET_NETDEV_DEV(netdev, dev);
2020 
2021 	lif = netdev_priv(netdev);
2022 	lif->netdev = netdev;
2023 	ionic->master_lif = lif;
2024 	netdev->netdev_ops = &ionic_netdev_ops;
2025 	ionic_ethtool_set_ops(netdev);
2026 
2027 	netdev->watchdog_timeo = 2 * HZ;
2028 	netif_carrier_off(netdev);
2029 
2030 	netdev->min_mtu = IONIC_MIN_MTU;
2031 	netdev->max_mtu = IONIC_MAX_MTU;
2032 
2033 	lif->neqs = ionic->neqs_per_lif;
2034 	lif->nxqs = ionic->ntxqs_per_lif;
2035 
2036 	lif->ionic = ionic;
2037 	lif->index = index;
2038 	lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2039 	lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
2040 
2041 	/* Convert the default coalesce value to actual hw resolution */
2042 	lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
2043 	lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
2044 						    lif->rx_coalesce_usecs);
2045 
2046 	snprintf(lif->name, sizeof(lif->name), "lif%u", index);
2047 
2048 	spin_lock_init(&lif->adminq_lock);
2049 
2050 	spin_lock_init(&lif->deferred.lock);
2051 	INIT_LIST_HEAD(&lif->deferred.list);
2052 	INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2053 
2054 	/* allocate lif info */
2055 	lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2056 	lif->info = dma_alloc_coherent(dev, lif->info_sz,
2057 				       &lif->info_pa, GFP_KERNEL);
2058 	if (!lif->info) {
2059 		dev_err(dev, "Failed to allocate lif info, aborting\n");
2060 		err = -ENOMEM;
2061 		goto err_out_free_netdev;
2062 	}
2063 
2064 	ionic_debugfs_add_lif(lif);
2065 
2066 	/* allocate queues */
2067 	err = ionic_qcqs_alloc(lif);
2068 	if (err)
2069 		goto err_out_free_lif_info;
2070 
2071 	/* allocate rss indirection table */
2072 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2073 	lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2074 	lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2075 					      &lif->rss_ind_tbl_pa,
2076 					      GFP_KERNEL);
2077 
2078 	if (!lif->rss_ind_tbl) {
2079 		err = -ENOMEM;
2080 		dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2081 		goto err_out_free_qcqs;
2082 	}
2083 	netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
2084 
2085 	list_add_tail(&lif->list, &ionic->lifs);
2086 
2087 	return lif;
2088 
2089 err_out_free_qcqs:
2090 	ionic_qcqs_free(lif);
2091 err_out_free_lif_info:
2092 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2093 	lif->info = NULL;
2094 	lif->info_pa = 0;
2095 err_out_free_netdev:
2096 	free_netdev(lif->netdev);
2097 	lif = NULL;
2098 
2099 	return ERR_PTR(err);
2100 }
2101 
2102 int ionic_lifs_alloc(struct ionic *ionic)
2103 {
2104 	struct ionic_lif *lif;
2105 
2106 	INIT_LIST_HEAD(&ionic->lifs);
2107 
2108 	/* only build the first lif, others are for later features */
2109 	set_bit(0, ionic->lifbits);
2110 
2111 	lif = ionic_lif_alloc(ionic, 0);
2112 	if (IS_ERR_OR_NULL(lif)) {
2113 		clear_bit(0, ionic->lifbits);
2114 		return -ENOMEM;
2115 	}
2116 
2117 	lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
2118 	ionic_lif_queue_identify(lif);
2119 
2120 	return 0;
2121 }
2122 
2123 static void ionic_lif_reset(struct ionic_lif *lif)
2124 {
2125 	struct ionic_dev *idev = &lif->ionic->idev;
2126 
2127 	mutex_lock(&lif->ionic->dev_cmd_lock);
2128 	ionic_dev_cmd_lif_reset(idev, lif->index);
2129 	ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2130 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2131 }
2132 
2133 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2134 {
2135 	struct ionic *ionic = lif->ionic;
2136 
2137 	if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2138 		return;
2139 
2140 	dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2141 
2142 	netif_device_detach(lif->netdev);
2143 
2144 	if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2145 		dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
2146 		ionic_stop_queues(lif);
2147 	}
2148 
2149 	if (netif_running(lif->netdev)) {
2150 		ionic_txrx_deinit(lif);
2151 		ionic_txrx_free(lif);
2152 	}
2153 	ionic_lifs_deinit(ionic);
2154 	ionic_reset(ionic);
2155 	ionic_qcqs_free(lif);
2156 
2157 	dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2158 }
2159 
2160 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2161 {
2162 	struct ionic *ionic = lif->ionic;
2163 	int err;
2164 
2165 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2166 		return;
2167 
2168 	dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2169 
2170 	ionic_init_devinfo(ionic);
2171 	ionic_port_init(ionic);
2172 	err = ionic_qcqs_alloc(lif);
2173 	if (err)
2174 		goto err_out;
2175 
2176 	err = ionic_lifs_init(ionic);
2177 	if (err)
2178 		goto err_qcqs_free;
2179 
2180 	if (lif->registered)
2181 		ionic_lif_set_netdev_info(lif);
2182 
2183 	ionic_rx_filter_replay(lif);
2184 
2185 	if (netif_running(lif->netdev)) {
2186 		err = ionic_txrx_alloc(lif);
2187 		if (err)
2188 			goto err_lifs_deinit;
2189 
2190 		err = ionic_txrx_init(lif);
2191 		if (err)
2192 			goto err_txrx_free;
2193 	}
2194 
2195 	clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
2196 	ionic_link_status_check_request(lif);
2197 	netif_device_attach(lif->netdev);
2198 	dev_info(ionic->dev, "FW Up: LIFs restarted\n");
2199 
2200 	return;
2201 
2202 err_txrx_free:
2203 	ionic_txrx_free(lif);
2204 err_lifs_deinit:
2205 	ionic_lifs_deinit(ionic);
2206 err_qcqs_free:
2207 	ionic_qcqs_free(lif);
2208 err_out:
2209 	dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
2210 }
2211 
2212 static void ionic_lif_free(struct ionic_lif *lif)
2213 {
2214 	struct device *dev = lif->ionic->dev;
2215 
2216 	/* free rss indirection table */
2217 	dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2218 			  lif->rss_ind_tbl_pa);
2219 	lif->rss_ind_tbl = NULL;
2220 	lif->rss_ind_tbl_pa = 0;
2221 
2222 	/* free queues */
2223 	ionic_qcqs_free(lif);
2224 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2225 		ionic_lif_reset(lif);
2226 
2227 	/* free lif info */
2228 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2229 	lif->info = NULL;
2230 	lif->info_pa = 0;
2231 
2232 	/* unmap doorbell page */
2233 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2234 	lif->kern_dbpage = NULL;
2235 	kfree(lif->dbid_inuse);
2236 	lif->dbid_inuse = NULL;
2237 
2238 	/* free netdev & lif */
2239 	ionic_debugfs_del_lif(lif);
2240 	list_del(&lif->list);
2241 	free_netdev(lif->netdev);
2242 }
2243 
2244 void ionic_lifs_free(struct ionic *ionic)
2245 {
2246 	struct list_head *cur, *tmp;
2247 	struct ionic_lif *lif;
2248 
2249 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2250 		lif = list_entry(cur, struct ionic_lif, list);
2251 
2252 		ionic_lif_free(lif);
2253 	}
2254 }
2255 
2256 static void ionic_lif_deinit(struct ionic_lif *lif)
2257 {
2258 	if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
2259 		return;
2260 
2261 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2262 		cancel_work_sync(&lif->deferred.work);
2263 		cancel_work_sync(&lif->tx_timeout_work);
2264 		ionic_rx_filters_deinit(lif);
2265 	}
2266 
2267 	if (lif->netdev->features & NETIF_F_RXHASH)
2268 		ionic_lif_rss_deinit(lif);
2269 
2270 	napi_disable(&lif->adminqcq->napi);
2271 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2272 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2273 
2274 	ionic_lif_reset(lif);
2275 }
2276 
2277 void ionic_lifs_deinit(struct ionic *ionic)
2278 {
2279 	struct list_head *cur, *tmp;
2280 	struct ionic_lif *lif;
2281 
2282 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2283 		lif = list_entry(cur, struct ionic_lif, list);
2284 		ionic_lif_deinit(lif);
2285 	}
2286 }
2287 
2288 static int ionic_lif_adminq_init(struct ionic_lif *lif)
2289 {
2290 	struct device *dev = lif->ionic->dev;
2291 	struct ionic_q_init_comp comp;
2292 	struct ionic_dev *idev;
2293 	struct ionic_qcq *qcq;
2294 	struct ionic_queue *q;
2295 	int err;
2296 
2297 	idev = &lif->ionic->idev;
2298 	qcq = lif->adminqcq;
2299 	q = &qcq->q;
2300 
2301 	mutex_lock(&lif->ionic->dev_cmd_lock);
2302 	ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2303 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2304 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2305 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2306 	if (err) {
2307 		netdev_err(lif->netdev, "adminq init failed %d\n", err);
2308 		return err;
2309 	}
2310 
2311 	q->hw_type = comp.hw_type;
2312 	q->hw_index = le32_to_cpu(comp.hw_index);
2313 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2314 
2315 	dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2316 	dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2317 
2318 	netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2319 		       NAPI_POLL_WEIGHT);
2320 
2321 	napi_enable(&qcq->napi);
2322 
2323 	if (qcq->flags & IONIC_QCQ_F_INTR)
2324 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2325 				IONIC_INTR_MASK_CLEAR);
2326 
2327 	qcq->flags |= IONIC_QCQ_F_INITED;
2328 
2329 	return 0;
2330 }
2331 
2332 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2333 {
2334 	struct ionic_qcq *qcq = lif->notifyqcq;
2335 	struct device *dev = lif->ionic->dev;
2336 	struct ionic_queue *q = &qcq->q;
2337 	int err;
2338 
2339 	struct ionic_admin_ctx ctx = {
2340 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2341 		.cmd.q_init = {
2342 			.opcode = IONIC_CMD_Q_INIT,
2343 			.lif_index = cpu_to_le16(lif->index),
2344 			.type = q->type,
2345 			.ver = lif->qtype_info[q->type].version,
2346 			.index = cpu_to_le32(q->index),
2347 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2348 					     IONIC_QINIT_F_ENA),
2349 			.intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2350 			.pid = cpu_to_le16(q->pid),
2351 			.ring_size = ilog2(q->num_descs),
2352 			.ring_base = cpu_to_le64(q->base_pa),
2353 		}
2354 	};
2355 
2356 	dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2357 	dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2358 	dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2359 	dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2360 
2361 	err = ionic_adminq_post_wait(lif, &ctx);
2362 	if (err)
2363 		return err;
2364 
2365 	lif->last_eid = 0;
2366 	q->hw_type = ctx.comp.q_init.hw_type;
2367 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2368 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2369 
2370 	dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2371 	dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2372 
2373 	/* preset the callback info */
2374 	q->info[0].cb_arg = lif;
2375 
2376 	qcq->flags |= IONIC_QCQ_F_INITED;
2377 
2378 	return 0;
2379 }
2380 
2381 static int ionic_station_set(struct ionic_lif *lif)
2382 {
2383 	struct net_device *netdev = lif->netdev;
2384 	struct ionic_admin_ctx ctx = {
2385 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2386 		.cmd.lif_getattr = {
2387 			.opcode = IONIC_CMD_LIF_GETATTR,
2388 			.index = cpu_to_le16(lif->index),
2389 			.attr = IONIC_LIF_ATTR_MAC,
2390 		},
2391 	};
2392 	struct sockaddr addr;
2393 	int err;
2394 
2395 	err = ionic_adminq_post_wait(lif, &ctx);
2396 	if (err)
2397 		return err;
2398 	netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
2399 		   ctx.comp.lif_getattr.mac);
2400 	if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2401 		return 0;
2402 
2403 	if (!is_zero_ether_addr(netdev->dev_addr)) {
2404 		/* If the netdev mac is non-zero and doesn't match the default
2405 		 * device address, it was set by something earlier and we're
2406 		 * likely here again after a fw-upgrade reset.  We need to be
2407 		 * sure the netdev mac is in our filter list.
2408 		 */
2409 		if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
2410 				      netdev->dev_addr))
2411 			ionic_lif_addr(lif, netdev->dev_addr, true);
2412 	} else {
2413 		/* Update the netdev mac with the device's mac */
2414 		memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2415 		addr.sa_family = AF_INET;
2416 		err = eth_prepare_mac_addr_change(netdev, &addr);
2417 		if (err) {
2418 			netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
2419 				    addr.sa_data, err);
2420 			return 0;
2421 		}
2422 
2423 		eth_commit_mac_addr_change(netdev, &addr);
2424 	}
2425 
2426 	netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2427 		   netdev->dev_addr);
2428 	ionic_lif_addr(lif, netdev->dev_addr, true);
2429 
2430 	return 0;
2431 }
2432 
2433 static int ionic_lif_init(struct ionic_lif *lif)
2434 {
2435 	struct ionic_dev *idev = &lif->ionic->idev;
2436 	struct device *dev = lif->ionic->dev;
2437 	struct ionic_lif_init_comp comp;
2438 	int dbpage_num;
2439 	int err;
2440 
2441 	mutex_lock(&lif->ionic->dev_cmd_lock);
2442 	ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2443 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2444 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2445 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2446 	if (err)
2447 		return err;
2448 
2449 	lif->hw_index = le16_to_cpu(comp.hw_index);
2450 
2451 	/* now that we have the hw_index we can figure out our doorbell page */
2452 	lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2453 	if (!lif->dbid_count) {
2454 		dev_err(dev, "No doorbell pages, aborting\n");
2455 		return -EINVAL;
2456 	}
2457 
2458 	lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2459 	if (!lif->dbid_inuse) {
2460 		dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2461 		return -ENOMEM;
2462 	}
2463 
2464 	/* first doorbell id reserved for kernel (dbid aka pid == zero) */
2465 	set_bit(0, lif->dbid_inuse);
2466 	lif->kern_pid = 0;
2467 
2468 	dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2469 	lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2470 	if (!lif->kern_dbpage) {
2471 		dev_err(dev, "Cannot map dbpage, aborting\n");
2472 		err = -ENOMEM;
2473 		goto err_out_free_dbid;
2474 	}
2475 
2476 	err = ionic_lif_adminq_init(lif);
2477 	if (err)
2478 		goto err_out_adminq_deinit;
2479 
2480 	if (lif->ionic->nnqs_per_lif) {
2481 		err = ionic_lif_notifyq_init(lif);
2482 		if (err)
2483 			goto err_out_notifyq_deinit;
2484 	}
2485 
2486 	err = ionic_init_nic_features(lif);
2487 	if (err)
2488 		goto err_out_notifyq_deinit;
2489 
2490 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2491 		err = ionic_rx_filters_init(lif);
2492 		if (err)
2493 			goto err_out_notifyq_deinit;
2494 	}
2495 
2496 	err = ionic_station_set(lif);
2497 	if (err)
2498 		goto err_out_notifyq_deinit;
2499 
2500 	lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2501 
2502 	set_bit(IONIC_LIF_F_INITED, lif->state);
2503 
2504 	INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2505 
2506 	return 0;
2507 
2508 err_out_notifyq_deinit:
2509 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2510 err_out_adminq_deinit:
2511 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2512 	ionic_lif_reset(lif);
2513 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2514 	lif->kern_dbpage = NULL;
2515 err_out_free_dbid:
2516 	kfree(lif->dbid_inuse);
2517 	lif->dbid_inuse = NULL;
2518 
2519 	return err;
2520 }
2521 
2522 int ionic_lifs_init(struct ionic *ionic)
2523 {
2524 	struct list_head *cur, *tmp;
2525 	struct ionic_lif *lif;
2526 	int err;
2527 
2528 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2529 		lif = list_entry(cur, struct ionic_lif, list);
2530 		err = ionic_lif_init(lif);
2531 		if (err)
2532 			return err;
2533 	}
2534 
2535 	return 0;
2536 }
2537 
2538 static void ionic_lif_notify_work(struct work_struct *ws)
2539 {
2540 }
2541 
2542 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2543 {
2544 	struct ionic_admin_ctx ctx = {
2545 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2546 		.cmd.lif_setattr = {
2547 			.opcode = IONIC_CMD_LIF_SETATTR,
2548 			.index = cpu_to_le16(lif->index),
2549 			.attr = IONIC_LIF_ATTR_NAME,
2550 		},
2551 	};
2552 
2553 	strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2554 		sizeof(ctx.cmd.lif_setattr.name));
2555 
2556 	ionic_adminq_post_wait(lif, &ctx);
2557 }
2558 
2559 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2560 {
2561 	if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2562 		return NULL;
2563 
2564 	return netdev_priv(netdev);
2565 }
2566 
2567 static int ionic_lif_notify(struct notifier_block *nb,
2568 			    unsigned long event, void *info)
2569 {
2570 	struct net_device *ndev = netdev_notifier_info_to_dev(info);
2571 	struct ionic *ionic = container_of(nb, struct ionic, nb);
2572 	struct ionic_lif *lif = ionic_netdev_lif(ndev);
2573 
2574 	if (!lif || lif->ionic != ionic)
2575 		return NOTIFY_DONE;
2576 
2577 	switch (event) {
2578 	case NETDEV_CHANGENAME:
2579 		ionic_lif_set_netdev_info(lif);
2580 		break;
2581 	}
2582 
2583 	return NOTIFY_DONE;
2584 }
2585 
2586 int ionic_lifs_register(struct ionic *ionic)
2587 {
2588 	int err;
2589 
2590 	INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
2591 
2592 	ionic->nb.notifier_call = ionic_lif_notify;
2593 
2594 	err = register_netdevice_notifier(&ionic->nb);
2595 	if (err)
2596 		ionic->nb.notifier_call = NULL;
2597 
2598 	/* only register LIF0 for now */
2599 	err = register_netdev(ionic->master_lif->netdev);
2600 	if (err) {
2601 		dev_err(ionic->dev, "Cannot register net device, aborting\n");
2602 		return err;
2603 	}
2604 	ionic->master_lif->registered = true;
2605 
2606 	return 0;
2607 }
2608 
2609 void ionic_lifs_unregister(struct ionic *ionic)
2610 {
2611 	if (ionic->nb.notifier_call) {
2612 		unregister_netdevice_notifier(&ionic->nb);
2613 		cancel_work_sync(&ionic->nb_work);
2614 		ionic->nb.notifier_call = NULL;
2615 	}
2616 
2617 	/* There is only one lif ever registered in the
2618 	 * current model, so don't bother searching the
2619 	 * ionic->lif for candidates to unregister
2620 	 */
2621 	if (ionic->master_lif &&
2622 	    ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
2623 		unregister_netdev(ionic->master_lif->netdev);
2624 }
2625 
2626 static void ionic_lif_queue_identify(struct ionic_lif *lif)
2627 {
2628 	struct ionic *ionic = lif->ionic;
2629 	union ionic_q_identity *q_ident;
2630 	struct ionic_dev *idev;
2631 	int qtype;
2632 	int err;
2633 
2634 	idev = &lif->ionic->idev;
2635 	q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
2636 
2637 	for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
2638 		struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
2639 
2640 		/* filter out the ones we know about */
2641 		switch (qtype) {
2642 		case IONIC_QTYPE_ADMINQ:
2643 		case IONIC_QTYPE_NOTIFYQ:
2644 		case IONIC_QTYPE_RXQ:
2645 		case IONIC_QTYPE_TXQ:
2646 			break;
2647 		default:
2648 			continue;
2649 		}
2650 
2651 		memset(qti, 0, sizeof(*qti));
2652 
2653 		mutex_lock(&ionic->dev_cmd_lock);
2654 		ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
2655 					     ionic_qtype_versions[qtype]);
2656 		err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2657 		if (!err) {
2658 			qti->version   = q_ident->version;
2659 			qti->supported = q_ident->supported;
2660 			qti->features  = le64_to_cpu(q_ident->features);
2661 			qti->desc_sz   = le16_to_cpu(q_ident->desc_sz);
2662 			qti->comp_sz   = le16_to_cpu(q_ident->comp_sz);
2663 			qti->sg_desc_sz   = le16_to_cpu(q_ident->sg_desc_sz);
2664 			qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
2665 			qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
2666 		}
2667 		mutex_unlock(&ionic->dev_cmd_lock);
2668 
2669 		if (err == -EINVAL) {
2670 			dev_err(ionic->dev, "qtype %d not supported\n", qtype);
2671 			continue;
2672 		} else if (err == -EIO) {
2673 			dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
2674 			return;
2675 		} else if (err) {
2676 			dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
2677 				qtype, err);
2678 			return;
2679 		}
2680 
2681 		dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
2682 			qtype, qti->version);
2683 		dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
2684 			qtype, qti->supported);
2685 		dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
2686 			qtype, qti->features);
2687 		dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
2688 			qtype, qti->desc_sz);
2689 		dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
2690 			qtype, qti->comp_sz);
2691 		dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
2692 			qtype, qti->sg_desc_sz);
2693 		dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
2694 			qtype, qti->max_sg_elems);
2695 		dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
2696 			qtype, qti->sg_desc_stride);
2697 	}
2698 }
2699 
2700 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2701 		       union ionic_lif_identity *lid)
2702 {
2703 	struct ionic_dev *idev = &ionic->idev;
2704 	size_t sz;
2705 	int err;
2706 
2707 	sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2708 
2709 	mutex_lock(&ionic->dev_cmd_lock);
2710 	ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2711 	err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2712 	memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2713 	mutex_unlock(&ionic->dev_cmd_lock);
2714 	if (err)
2715 		return (err);
2716 
2717 	dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2718 		le64_to_cpu(lid->capabilities));
2719 
2720 	dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2721 		le32_to_cpu(lid->eth.max_ucast_filters));
2722 	dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2723 		le32_to_cpu(lid->eth.max_mcast_filters));
2724 	dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2725 		le64_to_cpu(lid->eth.config.features));
2726 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2727 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2728 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2729 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2730 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2731 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2732 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2733 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2734 	dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2735 	dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2736 	dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2737 		le32_to_cpu(lid->eth.config.mtu));
2738 
2739 	return 0;
2740 }
2741 
2742 int ionic_lifs_size(struct ionic *ionic)
2743 {
2744 	struct ionic_identity *ident = &ionic->ident;
2745 	unsigned int nintrs, dev_nintrs;
2746 	union ionic_lif_config *lc;
2747 	unsigned int ntxqs_per_lif;
2748 	unsigned int nrxqs_per_lif;
2749 	unsigned int neqs_per_lif;
2750 	unsigned int nnqs_per_lif;
2751 	unsigned int nxqs, neqs;
2752 	unsigned int min_intrs;
2753 	int err;
2754 
2755 	lc = &ident->lif.eth.config;
2756 	dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2757 	neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2758 	nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2759 	ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2760 	nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2761 
2762 	nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2763 	nxqs = min(nxqs, num_online_cpus());
2764 	neqs = min(neqs_per_lif, num_online_cpus());
2765 
2766 try_again:
2767 	/* interrupt usage:
2768 	 *    1 for master lif adminq/notifyq
2769 	 *    1 for each CPU for master lif TxRx queue pairs
2770 	 *    whatever's left is for RDMA queues
2771 	 */
2772 	nintrs = 1 + nxqs + neqs;
2773 	min_intrs = 2;  /* adminq + 1 TxRx queue pair */
2774 
2775 	if (nintrs > dev_nintrs)
2776 		goto try_fewer;
2777 
2778 	err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2779 	if (err < 0 && err != -ENOSPC) {
2780 		dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2781 		return err;
2782 	}
2783 	if (err == -ENOSPC)
2784 		goto try_fewer;
2785 
2786 	if (err != nintrs) {
2787 		ionic_bus_free_irq_vectors(ionic);
2788 		goto try_fewer;
2789 	}
2790 
2791 	ionic->nnqs_per_lif = nnqs_per_lif;
2792 	ionic->neqs_per_lif = neqs;
2793 	ionic->ntxqs_per_lif = nxqs;
2794 	ionic->nrxqs_per_lif = nxqs;
2795 	ionic->nintrs = nintrs;
2796 
2797 	ionic_debugfs_add_sizes(ionic);
2798 
2799 	return 0;
2800 
2801 try_fewer:
2802 	if (nnqs_per_lif > 1) {
2803 		nnqs_per_lif >>= 1;
2804 		goto try_again;
2805 	}
2806 	if (neqs > 1) {
2807 		neqs >>= 1;
2808 		goto try_again;
2809 	}
2810 	if (nxqs > 1) {
2811 		nxqs >>= 1;
2812 		goto try_again;
2813 	}
2814 	dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2815 	return -ENOSPC;
2816 }
2817