1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/cpumask.h>
12 
13 #include "ionic.h"
14 #include "ionic_bus.h"
15 #include "ionic_lif.h"
16 #include "ionic_txrx.h"
17 #include "ionic_ethtool.h"
18 #include "ionic_debugfs.h"
19 
20 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
21 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
22 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
23 static void ionic_link_status_check(struct ionic_lif *lif);
24 
25 static void ionic_lif_deferred_work(struct work_struct *work)
26 {
27 	struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
28 	struct ionic_deferred *def = &lif->deferred;
29 	struct ionic_deferred_work *w = NULL;
30 
31 	spin_lock_bh(&def->lock);
32 	if (!list_empty(&def->list)) {
33 		w = list_first_entry(&def->list,
34 				     struct ionic_deferred_work, list);
35 		list_del(&w->list);
36 	}
37 	spin_unlock_bh(&def->lock);
38 
39 	if (w) {
40 		switch (w->type) {
41 		case IONIC_DW_TYPE_RX_MODE:
42 			ionic_lif_rx_mode(lif, w->rx_mode);
43 			break;
44 		case IONIC_DW_TYPE_RX_ADDR_ADD:
45 			ionic_lif_addr_add(lif, w->addr);
46 			break;
47 		case IONIC_DW_TYPE_RX_ADDR_DEL:
48 			ionic_lif_addr_del(lif, w->addr);
49 			break;
50 		case IONIC_DW_TYPE_LINK_STATUS:
51 			ionic_link_status_check(lif);
52 			break;
53 		default:
54 			break;
55 		}
56 		kfree(w);
57 		schedule_work(&def->work);
58 	}
59 }
60 
61 static void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
62 				       struct ionic_deferred_work *work)
63 {
64 	spin_lock_bh(&def->lock);
65 	list_add_tail(&work->list, &def->list);
66 	spin_unlock_bh(&def->lock);
67 	schedule_work(&def->work);
68 }
69 
70 static void ionic_link_status_check(struct ionic_lif *lif)
71 {
72 	struct net_device *netdev = lif->netdev;
73 	u16 link_status;
74 	bool link_up;
75 
76 	link_status = le16_to_cpu(lif->info->status.link_status);
77 	link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
78 
79 	/* filter out the no-change cases */
80 	if (link_up == netif_carrier_ok(netdev))
81 		goto link_out;
82 
83 	if (link_up) {
84 		netdev_info(netdev, "Link up - %d Gbps\n",
85 			    le32_to_cpu(lif->info->status.link_speed) / 1000);
86 
87 		if (test_bit(IONIC_LIF_UP, lif->state)) {
88 			netif_tx_wake_all_queues(lif->netdev);
89 			netif_carrier_on(netdev);
90 		}
91 	} else {
92 		netdev_info(netdev, "Link down\n");
93 
94 		/* carrier off first to avoid watchdog timeout */
95 		netif_carrier_off(netdev);
96 		if (test_bit(IONIC_LIF_UP, lif->state))
97 			netif_tx_stop_all_queues(netdev);
98 	}
99 
100 link_out:
101 	clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state);
102 }
103 
104 static void ionic_link_status_check_request(struct ionic_lif *lif)
105 {
106 	struct ionic_deferred_work *work;
107 
108 	/* we only need one request outstanding at a time */
109 	if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state))
110 		return;
111 
112 	if (in_interrupt()) {
113 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
114 		if (!work)
115 			return;
116 
117 		work->type = IONIC_DW_TYPE_LINK_STATUS;
118 		ionic_lif_deferred_enqueue(&lif->deferred, work);
119 	} else {
120 		ionic_link_status_check(lif);
121 	}
122 }
123 
124 static irqreturn_t ionic_isr(int irq, void *data)
125 {
126 	struct napi_struct *napi = data;
127 
128 	napi_schedule_irqoff(napi);
129 
130 	return IRQ_HANDLED;
131 }
132 
133 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
134 {
135 	struct ionic_intr_info *intr = &qcq->intr;
136 	struct device *dev = lif->ionic->dev;
137 	struct ionic_queue *q = &qcq->q;
138 	const char *name;
139 
140 	if (lif->registered)
141 		name = lif->netdev->name;
142 	else
143 		name = dev_name(dev);
144 
145 	snprintf(intr->name, sizeof(intr->name),
146 		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
147 
148 	return devm_request_irq(dev, intr->vector, ionic_isr,
149 				0, intr->name, &qcq->napi);
150 }
151 
152 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
153 {
154 	struct ionic *ionic = lif->ionic;
155 	int index;
156 
157 	index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
158 	if (index == ionic->nintrs) {
159 		netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
160 			    __func__, index, ionic->nintrs);
161 		return -ENOSPC;
162 	}
163 
164 	set_bit(index, ionic->intrs);
165 	ionic_intr_init(&ionic->idev, intr, index);
166 
167 	return 0;
168 }
169 
170 static void ionic_intr_free(struct ionic_lif *lif, int index)
171 {
172 	if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
173 		clear_bit(index, lif->ionic->intrs);
174 }
175 
176 static int ionic_qcq_enable(struct ionic_qcq *qcq)
177 {
178 	struct ionic_queue *q = &qcq->q;
179 	struct ionic_lif *lif = q->lif;
180 	struct ionic_dev *idev;
181 	struct device *dev;
182 
183 	struct ionic_admin_ctx ctx = {
184 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
185 		.cmd.q_control = {
186 			.opcode = IONIC_CMD_Q_CONTROL,
187 			.lif_index = cpu_to_le16(lif->index),
188 			.type = q->type,
189 			.index = cpu_to_le32(q->index),
190 			.oper = IONIC_Q_ENABLE,
191 		},
192 	};
193 
194 	idev = &lif->ionic->idev;
195 	dev = lif->ionic->dev;
196 
197 	dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
198 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
199 
200 	if (qcq->flags & IONIC_QCQ_F_INTR) {
201 		irq_set_affinity_hint(qcq->intr.vector,
202 				      &qcq->intr.affinity_mask);
203 		napi_enable(&qcq->napi);
204 		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
205 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
206 				IONIC_INTR_MASK_CLEAR);
207 	}
208 
209 	return ionic_adminq_post_wait(lif, &ctx);
210 }
211 
212 static int ionic_qcq_disable(struct ionic_qcq *qcq)
213 {
214 	struct ionic_queue *q = &qcq->q;
215 	struct ionic_lif *lif = q->lif;
216 	struct ionic_dev *idev;
217 	struct device *dev;
218 
219 	struct ionic_admin_ctx ctx = {
220 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
221 		.cmd.q_control = {
222 			.opcode = IONIC_CMD_Q_CONTROL,
223 			.lif_index = cpu_to_le16(lif->index),
224 			.type = q->type,
225 			.index = cpu_to_le32(q->index),
226 			.oper = IONIC_Q_DISABLE,
227 		},
228 	};
229 
230 	idev = &lif->ionic->idev;
231 	dev = lif->ionic->dev;
232 
233 	dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
234 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
235 
236 	if (qcq->flags & IONIC_QCQ_F_INTR) {
237 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
238 				IONIC_INTR_MASK_SET);
239 		synchronize_irq(qcq->intr.vector);
240 		irq_set_affinity_hint(qcq->intr.vector, NULL);
241 		napi_disable(&qcq->napi);
242 	}
243 
244 	return ionic_adminq_post_wait(lif, &ctx);
245 }
246 
247 static void ionic_lif_quiesce(struct ionic_lif *lif)
248 {
249 	struct ionic_admin_ctx ctx = {
250 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
251 		.cmd.lif_setattr = {
252 			.opcode = IONIC_CMD_LIF_SETATTR,
253 			.attr = IONIC_LIF_ATTR_STATE,
254 			.index = lif->index,
255 			.state = IONIC_LIF_DISABLE
256 		},
257 	};
258 
259 	ionic_adminq_post_wait(lif, &ctx);
260 }
261 
262 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
263 {
264 	struct ionic_dev *idev = &lif->ionic->idev;
265 	struct device *dev = lif->ionic->dev;
266 
267 	if (!qcq)
268 		return;
269 
270 	ionic_debugfs_del_qcq(qcq);
271 
272 	if (!(qcq->flags & IONIC_QCQ_F_INITED))
273 		return;
274 
275 	if (qcq->flags & IONIC_QCQ_F_INTR) {
276 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
277 				IONIC_INTR_MASK_SET);
278 		devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
279 		netif_napi_del(&qcq->napi);
280 	}
281 
282 	qcq->flags &= ~IONIC_QCQ_F_INITED;
283 }
284 
285 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
286 {
287 	struct device *dev = lif->ionic->dev;
288 
289 	if (!qcq)
290 		return;
291 
292 	dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
293 	qcq->base = NULL;
294 	qcq->base_pa = 0;
295 
296 	if (qcq->flags & IONIC_QCQ_F_INTR)
297 		ionic_intr_free(lif, qcq->intr.index);
298 
299 	devm_kfree(dev, qcq->cq.info);
300 	qcq->cq.info = NULL;
301 	devm_kfree(dev, qcq->q.info);
302 	qcq->q.info = NULL;
303 	devm_kfree(dev, qcq);
304 }
305 
306 static void ionic_qcqs_free(struct ionic_lif *lif)
307 {
308 	struct device *dev = lif->ionic->dev;
309 	unsigned int i;
310 
311 	if (lif->notifyqcq) {
312 		ionic_qcq_free(lif, lif->notifyqcq);
313 		lif->notifyqcq = NULL;
314 	}
315 
316 	if (lif->adminqcq) {
317 		ionic_qcq_free(lif, lif->adminqcq);
318 		lif->adminqcq = NULL;
319 	}
320 
321 	for (i = 0; i < lif->nxqs; i++)
322 		if (lif->rxqcqs[i].stats)
323 			devm_kfree(dev, lif->rxqcqs[i].stats);
324 
325 	devm_kfree(dev, lif->rxqcqs);
326 	lif->rxqcqs = NULL;
327 
328 	for (i = 0; i < lif->nxqs; i++)
329 		if (lif->txqcqs[i].stats)
330 			devm_kfree(dev, lif->txqcqs[i].stats);
331 
332 	devm_kfree(dev, lif->txqcqs);
333 	lif->txqcqs = NULL;
334 }
335 
336 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
337 				      struct ionic_qcq *n_qcq)
338 {
339 	if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
340 		ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
341 		n_qcq->flags &= ~IONIC_QCQ_F_INTR;
342 	}
343 
344 	n_qcq->intr.vector = src_qcq->intr.vector;
345 	n_qcq->intr.index = src_qcq->intr.index;
346 }
347 
348 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
349 			   unsigned int index,
350 			   const char *name, unsigned int flags,
351 			   unsigned int num_descs, unsigned int desc_size,
352 			   unsigned int cq_desc_size,
353 			   unsigned int sg_desc_size,
354 			   unsigned int pid, struct ionic_qcq **qcq)
355 {
356 	struct ionic_dev *idev = &lif->ionic->idev;
357 	u32 q_size, cq_size, sg_size, total_size;
358 	struct device *dev = lif->ionic->dev;
359 	void *q_base, *cq_base, *sg_base;
360 	dma_addr_t cq_base_pa = 0;
361 	dma_addr_t sg_base_pa = 0;
362 	dma_addr_t q_base_pa = 0;
363 	struct ionic_qcq *new;
364 	int err;
365 
366 	*qcq = NULL;
367 
368 	q_size  = num_descs * desc_size;
369 	cq_size = num_descs * cq_desc_size;
370 	sg_size = num_descs * sg_desc_size;
371 
372 	total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
373 	/* Note: aligning q_size/cq_size is not enough due to cq_base
374 	 * address aligning as q_base could be not aligned to the page.
375 	 * Adding PAGE_SIZE.
376 	 */
377 	total_size += PAGE_SIZE;
378 	if (flags & IONIC_QCQ_F_SG) {
379 		total_size += ALIGN(sg_size, PAGE_SIZE);
380 		total_size += PAGE_SIZE;
381 	}
382 
383 	new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
384 	if (!new) {
385 		netdev_err(lif->netdev, "Cannot allocate queue structure\n");
386 		err = -ENOMEM;
387 		goto err_out;
388 	}
389 
390 	new->flags = flags;
391 
392 	new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
393 				   GFP_KERNEL);
394 	if (!new->q.info) {
395 		netdev_err(lif->netdev, "Cannot allocate queue info\n");
396 		err = -ENOMEM;
397 		goto err_out;
398 	}
399 
400 	new->q.type = type;
401 
402 	err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
403 			   desc_size, sg_desc_size, pid);
404 	if (err) {
405 		netdev_err(lif->netdev, "Cannot initialize queue\n");
406 		goto err_out;
407 	}
408 
409 	if (flags & IONIC_QCQ_F_INTR) {
410 		err = ionic_intr_alloc(lif, &new->intr);
411 		if (err) {
412 			netdev_warn(lif->netdev, "no intr for %s: %d\n",
413 				    name, err);
414 			goto err_out;
415 		}
416 
417 		err = ionic_bus_get_irq(lif->ionic, new->intr.index);
418 		if (err < 0) {
419 			netdev_warn(lif->netdev, "no vector for %s: %d\n",
420 				    name, err);
421 			goto err_out_free_intr;
422 		}
423 		new->intr.vector = err;
424 		ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
425 				       IONIC_INTR_MASK_SET);
426 
427 		new->intr.cpu = new->intr.index % num_online_cpus();
428 		if (cpu_online(new->intr.cpu))
429 			cpumask_set_cpu(new->intr.cpu,
430 					&new->intr.affinity_mask);
431 	} else {
432 		new->intr.index = INTR_INDEX_NOT_ASSIGNED;
433 	}
434 
435 	new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
436 				    GFP_KERNEL);
437 	if (!new->cq.info) {
438 		netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
439 		err = -ENOMEM;
440 		goto err_out_free_intr;
441 	}
442 
443 	err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
444 	if (err) {
445 		netdev_err(lif->netdev, "Cannot initialize completion queue\n");
446 		goto err_out_free_intr;
447 	}
448 
449 	new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
450 				       GFP_KERNEL);
451 	if (!new->base) {
452 		netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
453 		err = -ENOMEM;
454 		goto err_out_free_intr;
455 	}
456 
457 	new->total_size = total_size;
458 
459 	q_base = new->base;
460 	q_base_pa = new->base_pa;
461 
462 	cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
463 	cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
464 
465 	if (flags & IONIC_QCQ_F_SG) {
466 		sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
467 					PAGE_SIZE);
468 		sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
469 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
470 	}
471 
472 	ionic_q_map(&new->q, q_base, q_base_pa);
473 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
474 	ionic_cq_bind(&new->cq, &new->q);
475 
476 	*qcq = new;
477 
478 	return 0;
479 
480 err_out_free_intr:
481 	ionic_intr_free(lif, new->intr.index);
482 err_out:
483 	dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
484 	return err;
485 }
486 
487 static int ionic_qcqs_alloc(struct ionic_lif *lif)
488 {
489 	struct device *dev = lif->ionic->dev;
490 	unsigned int q_list_size;
491 	unsigned int flags;
492 	int err;
493 	int i;
494 
495 	flags = IONIC_QCQ_F_INTR;
496 	err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
497 			      IONIC_ADMINQ_LENGTH,
498 			      sizeof(struct ionic_admin_cmd),
499 			      sizeof(struct ionic_admin_comp),
500 			      0, lif->kern_pid, &lif->adminqcq);
501 	if (err)
502 		return err;
503 
504 	if (lif->ionic->nnqs_per_lif) {
505 		flags = IONIC_QCQ_F_NOTIFYQ;
506 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
507 				      flags, IONIC_NOTIFYQ_LENGTH,
508 				      sizeof(struct ionic_notifyq_cmd),
509 				      sizeof(union ionic_notifyq_comp),
510 				      0, lif->kern_pid, &lif->notifyqcq);
511 		if (err)
512 			goto err_out_free_adminqcq;
513 
514 		/* Let the notifyq ride on the adminq interrupt */
515 		ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
516 	}
517 
518 	q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
519 	err = -ENOMEM;
520 	lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
521 	if (!lif->txqcqs)
522 		goto err_out_free_notifyqcq;
523 	for (i = 0; i < lif->nxqs; i++) {
524 		lif->txqcqs[i].stats = devm_kzalloc(dev,
525 						    sizeof(struct ionic_q_stats),
526 						    GFP_KERNEL);
527 		if (!lif->txqcqs[i].stats)
528 			goto err_out_free_tx_stats;
529 	}
530 
531 	lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
532 	if (!lif->rxqcqs)
533 		goto err_out_free_tx_stats;
534 	for (i = 0; i < lif->nxqs; i++) {
535 		lif->rxqcqs[i].stats = devm_kzalloc(dev,
536 						    sizeof(struct ionic_q_stats),
537 						    GFP_KERNEL);
538 		if (!lif->rxqcqs[i].stats)
539 			goto err_out_free_rx_stats;
540 	}
541 
542 	return 0;
543 
544 err_out_free_rx_stats:
545 	for (i = 0; i < lif->nxqs; i++)
546 		if (lif->rxqcqs[i].stats)
547 			devm_kfree(dev, lif->rxqcqs[i].stats);
548 	devm_kfree(dev, lif->rxqcqs);
549 	lif->rxqcqs = NULL;
550 err_out_free_tx_stats:
551 	for (i = 0; i < lif->nxqs; i++)
552 		if (lif->txqcqs[i].stats)
553 			devm_kfree(dev, lif->txqcqs[i].stats);
554 	devm_kfree(dev, lif->txqcqs);
555 	lif->txqcqs = NULL;
556 err_out_free_notifyqcq:
557 	if (lif->notifyqcq) {
558 		ionic_qcq_free(lif, lif->notifyqcq);
559 		lif->notifyqcq = NULL;
560 	}
561 err_out_free_adminqcq:
562 	ionic_qcq_free(lif, lif->adminqcq);
563 	lif->adminqcq = NULL;
564 
565 	return err;
566 }
567 
568 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
569 {
570 	struct device *dev = lif->ionic->dev;
571 	struct ionic_queue *q = &qcq->q;
572 	struct ionic_cq *cq = &qcq->cq;
573 	struct ionic_admin_ctx ctx = {
574 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
575 		.cmd.q_init = {
576 			.opcode = IONIC_CMD_Q_INIT,
577 			.lif_index = cpu_to_le16(lif->index),
578 			.type = q->type,
579 			.index = cpu_to_le32(q->index),
580 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
581 					     IONIC_QINIT_F_SG),
582 			.intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
583 			.pid = cpu_to_le16(q->pid),
584 			.ring_size = ilog2(q->num_descs),
585 			.ring_base = cpu_to_le64(q->base_pa),
586 			.cq_ring_base = cpu_to_le64(cq->base_pa),
587 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
588 		},
589 	};
590 	int err;
591 
592 	dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
593 	dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
594 	dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
595 	dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
596 
597 	err = ionic_adminq_post_wait(lif, &ctx);
598 	if (err)
599 		return err;
600 
601 	q->hw_type = ctx.comp.q_init.hw_type;
602 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
603 	q->dbval = IONIC_DBELL_QID(q->hw_index);
604 
605 	dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
606 	dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
607 
608 	qcq->flags |= IONIC_QCQ_F_INITED;
609 
610 	ionic_debugfs_add_qcq(lif, qcq);
611 
612 	return 0;
613 }
614 
615 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
616 {
617 	struct device *dev = lif->ionic->dev;
618 	struct ionic_queue *q = &qcq->q;
619 	struct ionic_cq *cq = &qcq->cq;
620 	struct ionic_admin_ctx ctx = {
621 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
622 		.cmd.q_init = {
623 			.opcode = IONIC_CMD_Q_INIT,
624 			.lif_index = cpu_to_le16(lif->index),
625 			.type = q->type,
626 			.index = cpu_to_le32(q->index),
627 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
628 					     IONIC_QINIT_F_SG),
629 			.intr_index = cpu_to_le16(cq->bound_intr->index),
630 			.pid = cpu_to_le16(q->pid),
631 			.ring_size = ilog2(q->num_descs),
632 			.ring_base = cpu_to_le64(q->base_pa),
633 			.cq_ring_base = cpu_to_le64(cq->base_pa),
634 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
635 		},
636 	};
637 	int err;
638 
639 	dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
640 	dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
641 	dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
642 	dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
643 
644 	err = ionic_adminq_post_wait(lif, &ctx);
645 	if (err)
646 		return err;
647 
648 	q->hw_type = ctx.comp.q_init.hw_type;
649 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
650 	q->dbval = IONIC_DBELL_QID(q->hw_index);
651 
652 	dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
653 	dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
654 
655 	netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
656 		       NAPI_POLL_WEIGHT);
657 
658 	err = ionic_request_irq(lif, qcq);
659 	if (err) {
660 		netif_napi_del(&qcq->napi);
661 		return err;
662 	}
663 
664 	qcq->flags |= IONIC_QCQ_F_INITED;
665 
666 	ionic_debugfs_add_qcq(lif, qcq);
667 
668 	return 0;
669 }
670 
671 static bool ionic_notifyq_service(struct ionic_cq *cq,
672 				  struct ionic_cq_info *cq_info)
673 {
674 	union ionic_notifyq_comp *comp = cq_info->cq_desc;
675 	struct net_device *netdev;
676 	struct ionic_queue *q;
677 	struct ionic_lif *lif;
678 	u64 eid;
679 
680 	q = cq->bound_q;
681 	lif = q->info[0].cb_arg;
682 	netdev = lif->netdev;
683 	eid = le64_to_cpu(comp->event.eid);
684 
685 	/* Have we run out of new completions to process? */
686 	if (eid <= lif->last_eid)
687 		return false;
688 
689 	lif->last_eid = eid;
690 
691 	dev_dbg(lif->ionic->dev, "notifyq event:\n");
692 	dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
693 			 comp, sizeof(*comp), true);
694 
695 	switch (le16_to_cpu(comp->event.ecode)) {
696 	case IONIC_EVENT_LINK_CHANGE:
697 		ionic_link_status_check_request(lif);
698 		break;
699 	case IONIC_EVENT_RESET:
700 		netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n",
701 			    eid);
702 		netdev_info(netdev, "  reset_code=%d state=%d\n",
703 			    comp->reset.reset_code,
704 			    comp->reset.state);
705 		break;
706 	default:
707 		netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
708 			    comp->event.ecode, eid);
709 		break;
710 	}
711 
712 	return true;
713 }
714 
715 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
716 {
717 	struct ionic_dev *idev = &lif->ionic->idev;
718 	struct ionic_cq *cq = &lif->notifyqcq->cq;
719 	u32 work_done;
720 
721 	work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
722 				     NULL, NULL);
723 	if (work_done)
724 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
725 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
726 
727 	return work_done;
728 }
729 
730 static bool ionic_adminq_service(struct ionic_cq *cq,
731 				 struct ionic_cq_info *cq_info)
732 {
733 	struct ionic_admin_comp *comp = cq_info->cq_desc;
734 
735 	if (!color_match(comp->color, cq->done_color))
736 		return false;
737 
738 	ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
739 
740 	return true;
741 }
742 
743 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
744 {
745 	struct ionic_lif *lif = napi_to_cq(napi)->lif;
746 	int n_work = 0;
747 	int a_work = 0;
748 
749 	if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
750 		n_work = ionic_notifyq_clean(lif, budget);
751 	a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
752 
753 	return max(n_work, a_work);
754 }
755 
756 static void ionic_get_stats64(struct net_device *netdev,
757 			      struct rtnl_link_stats64 *ns)
758 {
759 	struct ionic_lif *lif = netdev_priv(netdev);
760 	struct ionic_lif_stats *ls;
761 
762 	memset(ns, 0, sizeof(*ns));
763 	ls = &lif->info->stats;
764 
765 	ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
766 			 le64_to_cpu(ls->rx_mcast_packets) +
767 			 le64_to_cpu(ls->rx_bcast_packets);
768 
769 	ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
770 			 le64_to_cpu(ls->tx_mcast_packets) +
771 			 le64_to_cpu(ls->tx_bcast_packets);
772 
773 	ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
774 		       le64_to_cpu(ls->rx_mcast_bytes) +
775 		       le64_to_cpu(ls->rx_bcast_bytes);
776 
777 	ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
778 		       le64_to_cpu(ls->tx_mcast_bytes) +
779 		       le64_to_cpu(ls->tx_bcast_bytes);
780 
781 	ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
782 			 le64_to_cpu(ls->rx_mcast_drop_packets) +
783 			 le64_to_cpu(ls->rx_bcast_drop_packets);
784 
785 	ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
786 			 le64_to_cpu(ls->tx_mcast_drop_packets) +
787 			 le64_to_cpu(ls->tx_bcast_drop_packets);
788 
789 	ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
790 
791 	ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
792 
793 	ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
794 			       le64_to_cpu(ls->rx_queue_disabled) +
795 			       le64_to_cpu(ls->rx_desc_fetch_error) +
796 			       le64_to_cpu(ls->rx_desc_data_error);
797 
798 	ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
799 				le64_to_cpu(ls->tx_queue_disabled) +
800 				le64_to_cpu(ls->tx_desc_fetch_error) +
801 				le64_to_cpu(ls->tx_desc_data_error);
802 
803 	ns->rx_errors = ns->rx_over_errors +
804 			ns->rx_missed_errors;
805 
806 	ns->tx_errors = ns->tx_aborted_errors;
807 }
808 
809 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
810 {
811 	struct ionic_admin_ctx ctx = {
812 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
813 		.cmd.rx_filter_add = {
814 			.opcode = IONIC_CMD_RX_FILTER_ADD,
815 			.lif_index = cpu_to_le16(lif->index),
816 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
817 		},
818 	};
819 	struct ionic_rx_filter *f;
820 	int err;
821 
822 	/* don't bother if we already have it */
823 	spin_lock_bh(&lif->rx_filters.lock);
824 	f = ionic_rx_filter_by_addr(lif, addr);
825 	spin_unlock_bh(&lif->rx_filters.lock);
826 	if (f)
827 		return 0;
828 
829 	netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
830 		   ctx.comp.rx_filter_add.filter_id);
831 
832 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
833 	err = ionic_adminq_post_wait(lif, &ctx);
834 	if (err)
835 		return err;
836 
837 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
838 }
839 
840 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
841 {
842 	struct ionic_admin_ctx ctx = {
843 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
844 		.cmd.rx_filter_del = {
845 			.opcode = IONIC_CMD_RX_FILTER_DEL,
846 			.lif_index = cpu_to_le16(lif->index),
847 		},
848 	};
849 	struct ionic_rx_filter *f;
850 	int err;
851 
852 	spin_lock_bh(&lif->rx_filters.lock);
853 	f = ionic_rx_filter_by_addr(lif, addr);
854 	if (!f) {
855 		spin_unlock_bh(&lif->rx_filters.lock);
856 		return -ENOENT;
857 	}
858 
859 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
860 	ionic_rx_filter_free(lif, f);
861 	spin_unlock_bh(&lif->rx_filters.lock);
862 
863 	err = ionic_adminq_post_wait(lif, &ctx);
864 	if (err)
865 		return err;
866 
867 	netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
868 		   ctx.cmd.rx_filter_del.filter_id);
869 
870 	return 0;
871 }
872 
873 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
874 {
875 	struct ionic *ionic = lif->ionic;
876 	struct ionic_deferred_work *work;
877 	unsigned int nmfilters;
878 	unsigned int nufilters;
879 
880 	if (add) {
881 		/* Do we have space for this filter?  We test the counters
882 		 * here before checking the need for deferral so that we
883 		 * can return an overflow error to the stack.
884 		 */
885 		nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
886 		nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
887 
888 		if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
889 			lif->nmcast++;
890 		else if (!is_multicast_ether_addr(addr) &&
891 			 lif->nucast < nufilters)
892 			lif->nucast++;
893 		else
894 			return -ENOSPC;
895 	} else {
896 		if (is_multicast_ether_addr(addr) && lif->nmcast)
897 			lif->nmcast--;
898 		else if (!is_multicast_ether_addr(addr) && lif->nucast)
899 			lif->nucast--;
900 	}
901 
902 	if (in_interrupt()) {
903 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
904 		if (!work) {
905 			netdev_err(lif->netdev, "%s OOM\n", __func__);
906 			return -ENOMEM;
907 		}
908 		work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
909 				   IONIC_DW_TYPE_RX_ADDR_DEL;
910 		memcpy(work->addr, addr, ETH_ALEN);
911 		netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
912 			   add ? "add" : "del", addr);
913 		ionic_lif_deferred_enqueue(&lif->deferred, work);
914 	} else {
915 		netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
916 			   add ? "add" : "del", addr);
917 		if (add)
918 			return ionic_lif_addr_add(lif, addr);
919 		else
920 			return ionic_lif_addr_del(lif, addr);
921 	}
922 
923 	return 0;
924 }
925 
926 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
927 {
928 	return ionic_lif_addr(netdev_priv(netdev), addr, true);
929 }
930 
931 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
932 {
933 	return ionic_lif_addr(netdev_priv(netdev), addr, false);
934 }
935 
936 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
937 {
938 	struct ionic_admin_ctx ctx = {
939 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
940 		.cmd.rx_mode_set = {
941 			.opcode = IONIC_CMD_RX_MODE_SET,
942 			.lif_index = cpu_to_le16(lif->index),
943 			.rx_mode = cpu_to_le16(rx_mode),
944 		},
945 	};
946 	char buf[128];
947 	int err;
948 	int i;
949 #define REMAIN(__x) (sizeof(buf) - (__x))
950 
951 	i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
952 		     lif->rx_mode, rx_mode);
953 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
954 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
955 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
956 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
957 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
958 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
959 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
960 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
961 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
962 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
963 	netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
964 
965 	err = ionic_adminq_post_wait(lif, &ctx);
966 	if (err)
967 		netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
968 			    rx_mode, err);
969 	else
970 		lif->rx_mode = rx_mode;
971 }
972 
973 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
974 {
975 	struct ionic_deferred_work *work;
976 
977 	if (in_interrupt()) {
978 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
979 		if (!work) {
980 			netdev_err(lif->netdev, "%s OOM\n", __func__);
981 			return;
982 		}
983 		work->type = IONIC_DW_TYPE_RX_MODE;
984 		work->rx_mode = rx_mode;
985 		netdev_dbg(lif->netdev, "deferred: rx_mode\n");
986 		ionic_lif_deferred_enqueue(&lif->deferred, work);
987 	} else {
988 		ionic_lif_rx_mode(lif, rx_mode);
989 	}
990 }
991 
992 static void ionic_set_rx_mode(struct net_device *netdev)
993 {
994 	struct ionic_lif *lif = netdev_priv(netdev);
995 	struct ionic_identity *ident;
996 	unsigned int nfilters;
997 	unsigned int rx_mode;
998 
999 	ident = &lif->ionic->ident;
1000 
1001 	rx_mode = IONIC_RX_MODE_F_UNICAST;
1002 	rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1003 	rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1004 	rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1005 	rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1006 
1007 	/* sync unicast addresses
1008 	 * next check to see if we're in an overflow state
1009 	 *    if so, we track that we overflowed and enable NIC PROMISC
1010 	 *    else if the overflow is set and not needed
1011 	 *       we remove our overflow flag and check the netdev flags
1012 	 *       to see if we can disable NIC PROMISC
1013 	 */
1014 	__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1015 	nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
1016 	if (netdev_uc_count(netdev) + 1 > nfilters) {
1017 		rx_mode |= IONIC_RX_MODE_F_PROMISC;
1018 		lif->uc_overflow = true;
1019 	} else if (lif->uc_overflow) {
1020 		lif->uc_overflow = false;
1021 		if (!(netdev->flags & IFF_PROMISC))
1022 			rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1023 	}
1024 
1025 	/* same for multicast */
1026 	__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1027 	nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1028 	if (netdev_mc_count(netdev) > nfilters) {
1029 		rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1030 		lif->mc_overflow = true;
1031 	} else if (lif->mc_overflow) {
1032 		lif->mc_overflow = false;
1033 		if (!(netdev->flags & IFF_ALLMULTI))
1034 			rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1035 	}
1036 
1037 	if (lif->rx_mode != rx_mode)
1038 		_ionic_lif_rx_mode(lif, rx_mode);
1039 }
1040 
1041 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1042 {
1043 	u64 wanted = 0;
1044 
1045 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
1046 		wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1047 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1048 		wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1049 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1050 		wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1051 	if (features & NETIF_F_RXHASH)
1052 		wanted |= IONIC_ETH_HW_RX_HASH;
1053 	if (features & NETIF_F_RXCSUM)
1054 		wanted |= IONIC_ETH_HW_RX_CSUM;
1055 	if (features & NETIF_F_SG)
1056 		wanted |= IONIC_ETH_HW_TX_SG;
1057 	if (features & NETIF_F_HW_CSUM)
1058 		wanted |= IONIC_ETH_HW_TX_CSUM;
1059 	if (features & NETIF_F_TSO)
1060 		wanted |= IONIC_ETH_HW_TSO;
1061 	if (features & NETIF_F_TSO6)
1062 		wanted |= IONIC_ETH_HW_TSO_IPV6;
1063 	if (features & NETIF_F_TSO_ECN)
1064 		wanted |= IONIC_ETH_HW_TSO_ECN;
1065 	if (features & NETIF_F_GSO_GRE)
1066 		wanted |= IONIC_ETH_HW_TSO_GRE;
1067 	if (features & NETIF_F_GSO_GRE_CSUM)
1068 		wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1069 	if (features & NETIF_F_GSO_IPXIP4)
1070 		wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1071 	if (features & NETIF_F_GSO_IPXIP6)
1072 		wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1073 	if (features & NETIF_F_GSO_UDP_TUNNEL)
1074 		wanted |= IONIC_ETH_HW_TSO_UDP;
1075 	if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1076 		wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1077 
1078 	return cpu_to_le64(wanted);
1079 }
1080 
1081 static int ionic_set_nic_features(struct ionic_lif *lif,
1082 				  netdev_features_t features)
1083 {
1084 	struct device *dev = lif->ionic->dev;
1085 	struct ionic_admin_ctx ctx = {
1086 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1087 		.cmd.lif_setattr = {
1088 			.opcode = IONIC_CMD_LIF_SETATTR,
1089 			.index = cpu_to_le16(lif->index),
1090 			.attr = IONIC_LIF_ATTR_FEATURES,
1091 		},
1092 	};
1093 	u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1094 			 IONIC_ETH_HW_VLAN_RX_STRIP |
1095 			 IONIC_ETH_HW_VLAN_RX_FILTER;
1096 	int err;
1097 
1098 	ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1099 	err = ionic_adminq_post_wait(lif, &ctx);
1100 	if (err)
1101 		return err;
1102 
1103 	lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1104 				       ctx.comp.lif_setattr.features);
1105 
1106 	if ((vlan_flags & features) &&
1107 	    !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1108 		dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1109 
1110 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1111 		dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1112 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1113 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1114 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1115 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1116 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1117 		dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1118 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1119 		dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1120 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1121 		dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1122 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1123 		dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1124 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1125 		dev_dbg(dev, "feature ETH_HW_TSO\n");
1126 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1127 		dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1128 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1129 		dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1130 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1131 		dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1132 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1133 		dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1134 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1135 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1136 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1137 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1138 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1139 		dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1140 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1141 		dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1142 
1143 	return 0;
1144 }
1145 
1146 static int ionic_init_nic_features(struct ionic_lif *lif)
1147 {
1148 	struct net_device *netdev = lif->netdev;
1149 	netdev_features_t features;
1150 	int err;
1151 
1152 	/* set up what we expect to support by default */
1153 	features = NETIF_F_HW_VLAN_CTAG_TX |
1154 		   NETIF_F_HW_VLAN_CTAG_RX |
1155 		   NETIF_F_HW_VLAN_CTAG_FILTER |
1156 		   NETIF_F_RXHASH |
1157 		   NETIF_F_SG |
1158 		   NETIF_F_HW_CSUM |
1159 		   NETIF_F_RXCSUM |
1160 		   NETIF_F_TSO |
1161 		   NETIF_F_TSO6 |
1162 		   NETIF_F_TSO_ECN;
1163 
1164 	err = ionic_set_nic_features(lif, features);
1165 	if (err)
1166 		return err;
1167 
1168 	/* tell the netdev what we actually can support */
1169 	netdev->features |= NETIF_F_HIGHDMA;
1170 
1171 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1172 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1173 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1174 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1175 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1176 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1177 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1178 		netdev->hw_features |= NETIF_F_RXHASH;
1179 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1180 		netdev->hw_features |= NETIF_F_SG;
1181 
1182 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1183 		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1184 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1185 		netdev->hw_enc_features |= NETIF_F_RXCSUM;
1186 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1187 		netdev->hw_enc_features |= NETIF_F_TSO;
1188 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1189 		netdev->hw_enc_features |= NETIF_F_TSO6;
1190 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1191 		netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1192 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1193 		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1194 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1195 		netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1196 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1197 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1198 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1199 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1200 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1201 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1202 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1203 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1204 
1205 	netdev->hw_features |= netdev->hw_enc_features;
1206 	netdev->features |= netdev->hw_features;
1207 
1208 	netdev->priv_flags |= IFF_UNICAST_FLT;
1209 
1210 	return 0;
1211 }
1212 
1213 static int ionic_set_features(struct net_device *netdev,
1214 			      netdev_features_t features)
1215 {
1216 	struct ionic_lif *lif = netdev_priv(netdev);
1217 	int err;
1218 
1219 	netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1220 		   __func__, (u64)lif->netdev->features, (u64)features);
1221 
1222 	err = ionic_set_nic_features(lif, features);
1223 
1224 	return err;
1225 }
1226 
1227 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1228 {
1229 	struct sockaddr *addr = sa;
1230 	u8 *mac;
1231 	int err;
1232 
1233 	mac = (u8 *)addr->sa_data;
1234 	if (ether_addr_equal(netdev->dev_addr, mac))
1235 		return 0;
1236 
1237 	err = eth_prepare_mac_addr_change(netdev, addr);
1238 	if (err)
1239 		return err;
1240 
1241 	if (!is_zero_ether_addr(netdev->dev_addr)) {
1242 		netdev_info(netdev, "deleting mac addr %pM\n",
1243 			    netdev->dev_addr);
1244 		ionic_addr_del(netdev, netdev->dev_addr);
1245 	}
1246 
1247 	eth_commit_mac_addr_change(netdev, addr);
1248 	netdev_info(netdev, "updating mac addr %pM\n", mac);
1249 
1250 	return ionic_addr_add(netdev, mac);
1251 }
1252 
1253 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1254 {
1255 	struct ionic_lif *lif = netdev_priv(netdev);
1256 	struct ionic_admin_ctx ctx = {
1257 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1258 		.cmd.lif_setattr = {
1259 			.opcode = IONIC_CMD_LIF_SETATTR,
1260 			.index = cpu_to_le16(lif->index),
1261 			.attr = IONIC_LIF_ATTR_MTU,
1262 			.mtu = cpu_to_le32(new_mtu),
1263 		},
1264 	};
1265 	int err;
1266 
1267 	err = ionic_adminq_post_wait(lif, &ctx);
1268 	if (err)
1269 		return err;
1270 
1271 	netdev->mtu = new_mtu;
1272 	err = ionic_reset_queues(lif);
1273 
1274 	return err;
1275 }
1276 
1277 static void ionic_tx_timeout_work(struct work_struct *ws)
1278 {
1279 	struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1280 
1281 	netdev_info(lif->netdev, "Tx Timeout recovery\n");
1282 
1283 	rtnl_lock();
1284 	ionic_reset_queues(lif);
1285 	rtnl_unlock();
1286 }
1287 
1288 static void ionic_tx_timeout(struct net_device *netdev)
1289 {
1290 	struct ionic_lif *lif = netdev_priv(netdev);
1291 
1292 	schedule_work(&lif->tx_timeout_work);
1293 }
1294 
1295 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1296 				 u16 vid)
1297 {
1298 	struct ionic_lif *lif = netdev_priv(netdev);
1299 	struct ionic_admin_ctx ctx = {
1300 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1301 		.cmd.rx_filter_add = {
1302 			.opcode = IONIC_CMD_RX_FILTER_ADD,
1303 			.lif_index = cpu_to_le16(lif->index),
1304 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1305 			.vlan.vlan = cpu_to_le16(vid),
1306 		},
1307 	};
1308 	int err;
1309 
1310 	err = ionic_adminq_post_wait(lif, &ctx);
1311 	if (err)
1312 		return err;
1313 
1314 	netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
1315 		   ctx.comp.rx_filter_add.filter_id);
1316 
1317 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1318 }
1319 
1320 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1321 				  u16 vid)
1322 {
1323 	struct ionic_lif *lif = netdev_priv(netdev);
1324 	struct ionic_admin_ctx ctx = {
1325 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1326 		.cmd.rx_filter_del = {
1327 			.opcode = IONIC_CMD_RX_FILTER_DEL,
1328 			.lif_index = cpu_to_le16(lif->index),
1329 		},
1330 	};
1331 	struct ionic_rx_filter *f;
1332 
1333 	spin_lock_bh(&lif->rx_filters.lock);
1334 
1335 	f = ionic_rx_filter_by_vlan(lif, vid);
1336 	if (!f) {
1337 		spin_unlock_bh(&lif->rx_filters.lock);
1338 		return -ENOENT;
1339 	}
1340 
1341 	netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
1342 		   le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
1343 
1344 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1345 	ionic_rx_filter_free(lif, f);
1346 	spin_unlock_bh(&lif->rx_filters.lock);
1347 
1348 	return ionic_adminq_post_wait(lif, &ctx);
1349 }
1350 
1351 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1352 			 const u8 *key, const u32 *indir)
1353 {
1354 	struct ionic_admin_ctx ctx = {
1355 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1356 		.cmd.lif_setattr = {
1357 			.opcode = IONIC_CMD_LIF_SETATTR,
1358 			.attr = IONIC_LIF_ATTR_RSS,
1359 			.rss.types = cpu_to_le16(types),
1360 			.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1361 		},
1362 	};
1363 	unsigned int i, tbl_sz;
1364 
1365 	lif->rss_types = types;
1366 
1367 	if (key)
1368 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1369 
1370 	if (indir) {
1371 		tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1372 		for (i = 0; i < tbl_sz; i++)
1373 			lif->rss_ind_tbl[i] = indir[i];
1374 	}
1375 
1376 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1377 	       IONIC_RSS_HASH_KEY_SIZE);
1378 
1379 	return ionic_adminq_post_wait(lif, &ctx);
1380 }
1381 
1382 static int ionic_lif_rss_init(struct ionic_lif *lif)
1383 {
1384 	u8 rss_key[IONIC_RSS_HASH_KEY_SIZE];
1385 	unsigned int tbl_sz;
1386 	unsigned int i;
1387 
1388 	netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE);
1389 
1390 	lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1391 			 IONIC_RSS_TYPE_IPV4_TCP |
1392 			 IONIC_RSS_TYPE_IPV4_UDP |
1393 			 IONIC_RSS_TYPE_IPV6     |
1394 			 IONIC_RSS_TYPE_IPV6_TCP |
1395 			 IONIC_RSS_TYPE_IPV6_UDP;
1396 
1397 	/* Fill indirection table with 'default' values */
1398 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1399 	for (i = 0; i < tbl_sz; i++)
1400 		lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1401 
1402 	return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL);
1403 }
1404 
1405 static int ionic_lif_rss_deinit(struct ionic_lif *lif)
1406 {
1407 	return ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1408 }
1409 
1410 static void ionic_txrx_disable(struct ionic_lif *lif)
1411 {
1412 	unsigned int i;
1413 
1414 	for (i = 0; i < lif->nxqs; i++) {
1415 		ionic_qcq_disable(lif->txqcqs[i].qcq);
1416 		ionic_qcq_disable(lif->rxqcqs[i].qcq);
1417 	}
1418 }
1419 
1420 static void ionic_txrx_deinit(struct ionic_lif *lif)
1421 {
1422 	unsigned int i;
1423 
1424 	for (i = 0; i < lif->nxqs; i++) {
1425 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1426 		ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
1427 
1428 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1429 		ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
1430 		ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
1431 	}
1432 }
1433 
1434 static void ionic_txrx_free(struct ionic_lif *lif)
1435 {
1436 	unsigned int i;
1437 
1438 	for (i = 0; i < lif->nxqs; i++) {
1439 		ionic_qcq_free(lif, lif->txqcqs[i].qcq);
1440 		lif->txqcqs[i].qcq = NULL;
1441 
1442 		ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
1443 		lif->rxqcqs[i].qcq = NULL;
1444 	}
1445 }
1446 
1447 static int ionic_txrx_alloc(struct ionic_lif *lif)
1448 {
1449 	unsigned int flags;
1450 	unsigned int i;
1451 	int err = 0;
1452 
1453 	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1454 	for (i = 0; i < lif->nxqs; i++) {
1455 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1456 				      lif->ntxq_descs,
1457 				      sizeof(struct ionic_txq_desc),
1458 				      sizeof(struct ionic_txq_comp),
1459 				      sizeof(struct ionic_txq_sg_desc),
1460 				      lif->kern_pid, &lif->txqcqs[i].qcq);
1461 		if (err)
1462 			goto err_out;
1463 
1464 		lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
1465 	}
1466 
1467 	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1468 	for (i = 0; i < lif->nxqs; i++) {
1469 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1470 				      lif->nrxq_descs,
1471 				      sizeof(struct ionic_rxq_desc),
1472 				      sizeof(struct ionic_rxq_comp),
1473 				      sizeof(struct ionic_rxq_sg_desc),
1474 				      lif->kern_pid, &lif->rxqcqs[i].qcq);
1475 		if (err)
1476 			goto err_out;
1477 
1478 		lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
1479 
1480 		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1481 				     lif->rxqcqs[i].qcq->intr.index,
1482 				     lif->rx_coalesce_hw);
1483 		ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
1484 					  lif->txqcqs[i].qcq);
1485 	}
1486 
1487 	return 0;
1488 
1489 err_out:
1490 	ionic_txrx_free(lif);
1491 
1492 	return err;
1493 }
1494 
1495 static int ionic_txrx_init(struct ionic_lif *lif)
1496 {
1497 	unsigned int i;
1498 	int err;
1499 
1500 	for (i = 0; i < lif->nxqs; i++) {
1501 		err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
1502 		if (err)
1503 			goto err_out;
1504 
1505 		err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
1506 		if (err) {
1507 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1508 			goto err_out;
1509 		}
1510 	}
1511 
1512 	if (lif->netdev->features & NETIF_F_RXHASH)
1513 		ionic_lif_rss_init(lif);
1514 
1515 	ionic_set_rx_mode(lif->netdev);
1516 
1517 	return 0;
1518 
1519 err_out:
1520 	while (i--) {
1521 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1522 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1523 	}
1524 
1525 	return err;
1526 }
1527 
1528 static int ionic_txrx_enable(struct ionic_lif *lif)
1529 {
1530 	int i, err;
1531 
1532 	for (i = 0; i < lif->nxqs; i++) {
1533 		err = ionic_qcq_enable(lif->txqcqs[i].qcq);
1534 		if (err)
1535 			goto err_out;
1536 
1537 		ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
1538 		err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
1539 		if (err) {
1540 			ionic_qcq_disable(lif->txqcqs[i].qcq);
1541 			goto err_out;
1542 		}
1543 	}
1544 
1545 	return 0;
1546 
1547 err_out:
1548 	while (i--) {
1549 		ionic_qcq_disable(lif->rxqcqs[i].qcq);
1550 		ionic_qcq_disable(lif->txqcqs[i].qcq);
1551 	}
1552 
1553 	return err;
1554 }
1555 
1556 int ionic_open(struct net_device *netdev)
1557 {
1558 	struct ionic_lif *lif = netdev_priv(netdev);
1559 	int err;
1560 
1561 	netif_carrier_off(netdev);
1562 
1563 	err = ionic_txrx_alloc(lif);
1564 	if (err)
1565 		return err;
1566 
1567 	err = ionic_txrx_init(lif);
1568 	if (err)
1569 		goto err_txrx_free;
1570 
1571 	err = ionic_txrx_enable(lif);
1572 	if (err)
1573 		goto err_txrx_deinit;
1574 
1575 	netif_set_real_num_tx_queues(netdev, lif->nxqs);
1576 	netif_set_real_num_rx_queues(netdev, lif->nxqs);
1577 
1578 	set_bit(IONIC_LIF_UP, lif->state);
1579 
1580 	ionic_link_status_check_request(lif);
1581 	if (netif_carrier_ok(netdev))
1582 		netif_tx_wake_all_queues(netdev);
1583 
1584 	return 0;
1585 
1586 err_txrx_deinit:
1587 	ionic_txrx_deinit(lif);
1588 err_txrx_free:
1589 	ionic_txrx_free(lif);
1590 	return err;
1591 }
1592 
1593 int ionic_stop(struct net_device *netdev)
1594 {
1595 	struct ionic_lif *lif = netdev_priv(netdev);
1596 	int err = 0;
1597 
1598 	if (!test_bit(IONIC_LIF_UP, lif->state)) {
1599 		dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n",
1600 			__func__, lif->name);
1601 		return 0;
1602 	}
1603 	dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name);
1604 	clear_bit(IONIC_LIF_UP, lif->state);
1605 
1606 	/* carrier off before disabling queues to avoid watchdog timeout */
1607 	netif_carrier_off(netdev);
1608 	netif_tx_stop_all_queues(netdev);
1609 	netif_tx_disable(netdev);
1610 
1611 	ionic_txrx_disable(lif);
1612 	ionic_lif_quiesce(lif);
1613 	ionic_txrx_deinit(lif);
1614 	ionic_txrx_free(lif);
1615 
1616 	return err;
1617 }
1618 
1619 static const struct net_device_ops ionic_netdev_ops = {
1620 	.ndo_open               = ionic_open,
1621 	.ndo_stop               = ionic_stop,
1622 	.ndo_start_xmit		= ionic_start_xmit,
1623 	.ndo_get_stats64	= ionic_get_stats64,
1624 	.ndo_set_rx_mode	= ionic_set_rx_mode,
1625 	.ndo_set_features	= ionic_set_features,
1626 	.ndo_set_mac_address	= ionic_set_mac_address,
1627 	.ndo_validate_addr	= eth_validate_addr,
1628 	.ndo_tx_timeout         = ionic_tx_timeout,
1629 	.ndo_change_mtu         = ionic_change_mtu,
1630 	.ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
1631 	.ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
1632 };
1633 
1634 int ionic_reset_queues(struct ionic_lif *lif)
1635 {
1636 	bool running;
1637 	int err = 0;
1638 
1639 	/* Put off the next watchdog timeout */
1640 	netif_trans_update(lif->netdev);
1641 
1642 	err = ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET);
1643 	if (err)
1644 		return err;
1645 
1646 	running = netif_running(lif->netdev);
1647 	if (running)
1648 		err = ionic_stop(lif->netdev);
1649 	if (!err && running)
1650 		ionic_open(lif->netdev);
1651 
1652 	clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
1653 
1654 	return err;
1655 }
1656 
1657 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
1658 {
1659 	struct device *dev = ionic->dev;
1660 	struct net_device *netdev;
1661 	struct ionic_lif *lif;
1662 	int tbl_sz;
1663 	int err;
1664 
1665 	netdev = alloc_etherdev_mqs(sizeof(*lif),
1666 				    ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
1667 	if (!netdev) {
1668 		dev_err(dev, "Cannot allocate netdev, aborting\n");
1669 		return ERR_PTR(-ENOMEM);
1670 	}
1671 
1672 	SET_NETDEV_DEV(netdev, dev);
1673 
1674 	lif = netdev_priv(netdev);
1675 	lif->netdev = netdev;
1676 	ionic->master_lif = lif;
1677 	netdev->netdev_ops = &ionic_netdev_ops;
1678 	ionic_ethtool_set_ops(netdev);
1679 
1680 	netdev->watchdog_timeo = 2 * HZ;
1681 	netdev->min_mtu = IONIC_MIN_MTU;
1682 	netdev->max_mtu = IONIC_MAX_MTU;
1683 
1684 	lif->neqs = ionic->neqs_per_lif;
1685 	lif->nxqs = ionic->ntxqs_per_lif;
1686 
1687 	lif->ionic = ionic;
1688 	lif->index = index;
1689 	lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
1690 	lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
1691 
1692 	/* Convert the default coalesce value to actual hw resolution */
1693 	lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
1694 	lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
1695 						    lif->rx_coalesce_usecs);
1696 
1697 	snprintf(lif->name, sizeof(lif->name), "lif%u", index);
1698 
1699 	spin_lock_init(&lif->adminq_lock);
1700 
1701 	spin_lock_init(&lif->deferred.lock);
1702 	INIT_LIST_HEAD(&lif->deferred.list);
1703 	INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
1704 
1705 	/* allocate lif info */
1706 	lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
1707 	lif->info = dma_alloc_coherent(dev, lif->info_sz,
1708 				       &lif->info_pa, GFP_KERNEL);
1709 	if (!lif->info) {
1710 		dev_err(dev, "Failed to allocate lif info, aborting\n");
1711 		err = -ENOMEM;
1712 		goto err_out_free_netdev;
1713 	}
1714 
1715 	/* allocate queues */
1716 	err = ionic_qcqs_alloc(lif);
1717 	if (err)
1718 		goto err_out_free_lif_info;
1719 
1720 	/* allocate rss indirection table */
1721 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1722 	lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
1723 	lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
1724 					      &lif->rss_ind_tbl_pa,
1725 					      GFP_KERNEL);
1726 
1727 	if (!lif->rss_ind_tbl) {
1728 		err = -ENOMEM;
1729 		dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
1730 		goto err_out_free_qcqs;
1731 	}
1732 
1733 	list_add_tail(&lif->list, &ionic->lifs);
1734 
1735 	return lif;
1736 
1737 err_out_free_qcqs:
1738 	ionic_qcqs_free(lif);
1739 err_out_free_lif_info:
1740 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
1741 	lif->info = NULL;
1742 	lif->info_pa = 0;
1743 err_out_free_netdev:
1744 	free_netdev(lif->netdev);
1745 	lif = NULL;
1746 
1747 	return ERR_PTR(err);
1748 }
1749 
1750 int ionic_lifs_alloc(struct ionic *ionic)
1751 {
1752 	struct ionic_lif *lif;
1753 
1754 	INIT_LIST_HEAD(&ionic->lifs);
1755 
1756 	/* only build the first lif, others are for later features */
1757 	set_bit(0, ionic->lifbits);
1758 	lif = ionic_lif_alloc(ionic, 0);
1759 
1760 	return PTR_ERR_OR_ZERO(lif);
1761 }
1762 
1763 static void ionic_lif_reset(struct ionic_lif *lif)
1764 {
1765 	struct ionic_dev *idev = &lif->ionic->idev;
1766 
1767 	mutex_lock(&lif->ionic->dev_cmd_lock);
1768 	ionic_dev_cmd_lif_reset(idev, lif->index);
1769 	ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
1770 	mutex_unlock(&lif->ionic->dev_cmd_lock);
1771 }
1772 
1773 static void ionic_lif_free(struct ionic_lif *lif)
1774 {
1775 	struct device *dev = lif->ionic->dev;
1776 
1777 	/* free rss indirection table */
1778 	dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
1779 			  lif->rss_ind_tbl_pa);
1780 	lif->rss_ind_tbl = NULL;
1781 	lif->rss_ind_tbl_pa = 0;
1782 
1783 	/* free queues */
1784 	ionic_qcqs_free(lif);
1785 	ionic_lif_reset(lif);
1786 
1787 	/* free lif info */
1788 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
1789 	lif->info = NULL;
1790 	lif->info_pa = 0;
1791 
1792 	/* unmap doorbell page */
1793 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
1794 	lif->kern_dbpage = NULL;
1795 	kfree(lif->dbid_inuse);
1796 	lif->dbid_inuse = NULL;
1797 
1798 	/* free netdev & lif */
1799 	ionic_debugfs_del_lif(lif);
1800 	list_del(&lif->list);
1801 	free_netdev(lif->netdev);
1802 }
1803 
1804 void ionic_lifs_free(struct ionic *ionic)
1805 {
1806 	struct list_head *cur, *tmp;
1807 	struct ionic_lif *lif;
1808 
1809 	list_for_each_safe(cur, tmp, &ionic->lifs) {
1810 		lif = list_entry(cur, struct ionic_lif, list);
1811 
1812 		ionic_lif_free(lif);
1813 	}
1814 }
1815 
1816 static void ionic_lif_deinit(struct ionic_lif *lif)
1817 {
1818 	if (!test_bit(IONIC_LIF_INITED, lif->state))
1819 		return;
1820 
1821 	clear_bit(IONIC_LIF_INITED, lif->state);
1822 
1823 	ionic_rx_filters_deinit(lif);
1824 	ionic_lif_rss_deinit(lif);
1825 
1826 	napi_disable(&lif->adminqcq->napi);
1827 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
1828 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
1829 
1830 	ionic_lif_reset(lif);
1831 }
1832 
1833 void ionic_lifs_deinit(struct ionic *ionic)
1834 {
1835 	struct list_head *cur, *tmp;
1836 	struct ionic_lif *lif;
1837 
1838 	list_for_each_safe(cur, tmp, &ionic->lifs) {
1839 		lif = list_entry(cur, struct ionic_lif, list);
1840 		ionic_lif_deinit(lif);
1841 	}
1842 }
1843 
1844 static int ionic_lif_adminq_init(struct ionic_lif *lif)
1845 {
1846 	struct device *dev = lif->ionic->dev;
1847 	struct ionic_q_init_comp comp;
1848 	struct ionic_dev *idev;
1849 	struct ionic_qcq *qcq;
1850 	struct ionic_queue *q;
1851 	int err;
1852 
1853 	idev = &lif->ionic->idev;
1854 	qcq = lif->adminqcq;
1855 	q = &qcq->q;
1856 
1857 	mutex_lock(&lif->ionic->dev_cmd_lock);
1858 	ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
1859 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
1860 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
1861 	mutex_unlock(&lif->ionic->dev_cmd_lock);
1862 	if (err) {
1863 		netdev_err(lif->netdev, "adminq init failed %d\n", err);
1864 		return err;
1865 	}
1866 
1867 	q->hw_type = comp.hw_type;
1868 	q->hw_index = le32_to_cpu(comp.hw_index);
1869 	q->dbval = IONIC_DBELL_QID(q->hw_index);
1870 
1871 	dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
1872 	dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
1873 
1874 	netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
1875 		       NAPI_POLL_WEIGHT);
1876 
1877 	err = ionic_request_irq(lif, qcq);
1878 	if (err) {
1879 		netdev_warn(lif->netdev, "adminq irq request failed %d\n", err);
1880 		netif_napi_del(&qcq->napi);
1881 		return err;
1882 	}
1883 
1884 	napi_enable(&qcq->napi);
1885 
1886 	if (qcq->flags & IONIC_QCQ_F_INTR)
1887 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
1888 				IONIC_INTR_MASK_CLEAR);
1889 
1890 	qcq->flags |= IONIC_QCQ_F_INITED;
1891 
1892 	ionic_debugfs_add_qcq(lif, qcq);
1893 
1894 	return 0;
1895 }
1896 
1897 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
1898 {
1899 	struct ionic_qcq *qcq = lif->notifyqcq;
1900 	struct device *dev = lif->ionic->dev;
1901 	struct ionic_queue *q = &qcq->q;
1902 	int err;
1903 
1904 	struct ionic_admin_ctx ctx = {
1905 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1906 		.cmd.q_init = {
1907 			.opcode = IONIC_CMD_Q_INIT,
1908 			.lif_index = cpu_to_le16(lif->index),
1909 			.type = q->type,
1910 			.index = cpu_to_le32(q->index),
1911 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
1912 					     IONIC_QINIT_F_ENA),
1913 			.intr_index = cpu_to_le16(lif->adminqcq->intr.index),
1914 			.pid = cpu_to_le16(q->pid),
1915 			.ring_size = ilog2(q->num_descs),
1916 			.ring_base = cpu_to_le64(q->base_pa),
1917 		}
1918 	};
1919 
1920 	dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
1921 	dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
1922 	dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
1923 	dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
1924 
1925 	err = ionic_adminq_post_wait(lif, &ctx);
1926 	if (err)
1927 		return err;
1928 
1929 	q->hw_type = ctx.comp.q_init.hw_type;
1930 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
1931 	q->dbval = IONIC_DBELL_QID(q->hw_index);
1932 
1933 	dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
1934 	dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
1935 
1936 	/* preset the callback info */
1937 	q->info[0].cb_arg = lif;
1938 
1939 	qcq->flags |= IONIC_QCQ_F_INITED;
1940 
1941 	ionic_debugfs_add_qcq(lif, qcq);
1942 
1943 	return 0;
1944 }
1945 
1946 static int ionic_station_set(struct ionic_lif *lif)
1947 {
1948 	struct net_device *netdev = lif->netdev;
1949 	struct ionic_admin_ctx ctx = {
1950 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1951 		.cmd.lif_getattr = {
1952 			.opcode = IONIC_CMD_LIF_GETATTR,
1953 			.index = cpu_to_le16(lif->index),
1954 			.attr = IONIC_LIF_ATTR_MAC,
1955 		},
1956 	};
1957 	struct sockaddr addr;
1958 	int err;
1959 
1960 	err = ionic_adminq_post_wait(lif, &ctx);
1961 	if (err)
1962 		return err;
1963 
1964 	memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
1965 	addr.sa_family = AF_INET;
1966 	err = eth_prepare_mac_addr_change(netdev, &addr);
1967 	if (err)
1968 		return err;
1969 
1970 	if (!is_zero_ether_addr(netdev->dev_addr)) {
1971 		netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
1972 			   netdev->dev_addr);
1973 		ionic_lif_addr(lif, netdev->dev_addr, false);
1974 	}
1975 
1976 	eth_commit_mac_addr_change(netdev, &addr);
1977 	netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
1978 		   netdev->dev_addr);
1979 	ionic_lif_addr(lif, netdev->dev_addr, true);
1980 
1981 	return 0;
1982 }
1983 
1984 static int ionic_lif_init(struct ionic_lif *lif)
1985 {
1986 	struct ionic_dev *idev = &lif->ionic->idev;
1987 	struct device *dev = lif->ionic->dev;
1988 	struct ionic_lif_init_comp comp;
1989 	int dbpage_num;
1990 	int err;
1991 
1992 	ionic_debugfs_add_lif(lif);
1993 
1994 	mutex_lock(&lif->ionic->dev_cmd_lock);
1995 	ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
1996 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
1997 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
1998 	mutex_unlock(&lif->ionic->dev_cmd_lock);
1999 	if (err)
2000 		return err;
2001 
2002 	lif->hw_index = le16_to_cpu(comp.hw_index);
2003 
2004 	/* now that we have the hw_index we can figure out our doorbell page */
2005 	lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2006 	if (!lif->dbid_count) {
2007 		dev_err(dev, "No doorbell pages, aborting\n");
2008 		return -EINVAL;
2009 	}
2010 
2011 	lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2012 	if (!lif->dbid_inuse) {
2013 		dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2014 		return -ENOMEM;
2015 	}
2016 
2017 	/* first doorbell id reserved for kernel (dbid aka pid == zero) */
2018 	set_bit(0, lif->dbid_inuse);
2019 	lif->kern_pid = 0;
2020 
2021 	dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2022 	lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2023 	if (!lif->kern_dbpage) {
2024 		dev_err(dev, "Cannot map dbpage, aborting\n");
2025 		err = -ENOMEM;
2026 		goto err_out_free_dbid;
2027 	}
2028 
2029 	err = ionic_lif_adminq_init(lif);
2030 	if (err)
2031 		goto err_out_adminq_deinit;
2032 
2033 	if (lif->ionic->nnqs_per_lif) {
2034 		err = ionic_lif_notifyq_init(lif);
2035 		if (err)
2036 			goto err_out_notifyq_deinit;
2037 	}
2038 
2039 	err = ionic_init_nic_features(lif);
2040 	if (err)
2041 		goto err_out_notifyq_deinit;
2042 
2043 	err = ionic_rx_filters_init(lif);
2044 	if (err)
2045 		goto err_out_notifyq_deinit;
2046 
2047 	err = ionic_station_set(lif);
2048 	if (err)
2049 		goto err_out_notifyq_deinit;
2050 
2051 	lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2052 
2053 	set_bit(IONIC_LIF_INITED, lif->state);
2054 
2055 	INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2056 
2057 	return 0;
2058 
2059 err_out_notifyq_deinit:
2060 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2061 err_out_adminq_deinit:
2062 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2063 	ionic_lif_reset(lif);
2064 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2065 	lif->kern_dbpage = NULL;
2066 err_out_free_dbid:
2067 	kfree(lif->dbid_inuse);
2068 	lif->dbid_inuse = NULL;
2069 
2070 	return err;
2071 }
2072 
2073 int ionic_lifs_init(struct ionic *ionic)
2074 {
2075 	struct list_head *cur, *tmp;
2076 	struct ionic_lif *lif;
2077 	int err;
2078 
2079 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2080 		lif = list_entry(cur, struct ionic_lif, list);
2081 		err = ionic_lif_init(lif);
2082 		if (err)
2083 			return err;
2084 	}
2085 
2086 	return 0;
2087 }
2088 
2089 static void ionic_lif_notify_work(struct work_struct *ws)
2090 {
2091 }
2092 
2093 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2094 {
2095 	struct ionic_admin_ctx ctx = {
2096 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2097 		.cmd.lif_setattr = {
2098 			.opcode = IONIC_CMD_LIF_SETATTR,
2099 			.index = cpu_to_le16(lif->index),
2100 			.attr = IONIC_LIF_ATTR_NAME,
2101 		},
2102 	};
2103 
2104 	strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2105 		sizeof(ctx.cmd.lif_setattr.name));
2106 
2107 	ionic_adminq_post_wait(lif, &ctx);
2108 }
2109 
2110 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2111 {
2112 	if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2113 		return NULL;
2114 
2115 	return netdev_priv(netdev);
2116 }
2117 
2118 static int ionic_lif_notify(struct notifier_block *nb,
2119 			    unsigned long event, void *info)
2120 {
2121 	struct net_device *ndev = netdev_notifier_info_to_dev(info);
2122 	struct ionic *ionic = container_of(nb, struct ionic, nb);
2123 	struct ionic_lif *lif = ionic_netdev_lif(ndev);
2124 
2125 	if (!lif || lif->ionic != ionic)
2126 		return NOTIFY_DONE;
2127 
2128 	switch (event) {
2129 	case NETDEV_CHANGENAME:
2130 		ionic_lif_set_netdev_info(lif);
2131 		break;
2132 	}
2133 
2134 	return NOTIFY_DONE;
2135 }
2136 
2137 int ionic_lifs_register(struct ionic *ionic)
2138 {
2139 	int err;
2140 
2141 	INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
2142 
2143 	ionic->nb.notifier_call = ionic_lif_notify;
2144 
2145 	err = register_netdevice_notifier(&ionic->nb);
2146 	if (err)
2147 		ionic->nb.notifier_call = NULL;
2148 
2149 	/* only register LIF0 for now */
2150 	err = register_netdev(ionic->master_lif->netdev);
2151 	if (err) {
2152 		dev_err(ionic->dev, "Cannot register net device, aborting\n");
2153 		return err;
2154 	}
2155 
2156 	ionic_link_status_check_request(ionic->master_lif);
2157 	ionic->master_lif->registered = true;
2158 
2159 	return 0;
2160 }
2161 
2162 void ionic_lifs_unregister(struct ionic *ionic)
2163 {
2164 	if (ionic->nb.notifier_call) {
2165 		unregister_netdevice_notifier(&ionic->nb);
2166 		cancel_work_sync(&ionic->nb_work);
2167 		ionic->nb.notifier_call = NULL;
2168 	}
2169 
2170 	/* There is only one lif ever registered in the
2171 	 * current model, so don't bother searching the
2172 	 * ionic->lif for candidates to unregister
2173 	 */
2174 	cancel_work_sync(&ionic->master_lif->deferred.work);
2175 	cancel_work_sync(&ionic->master_lif->tx_timeout_work);
2176 	if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
2177 		unregister_netdev(ionic->master_lif->netdev);
2178 }
2179 
2180 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2181 		       union ionic_lif_identity *lid)
2182 {
2183 	struct ionic_dev *idev = &ionic->idev;
2184 	size_t sz;
2185 	int err;
2186 
2187 	sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2188 
2189 	mutex_lock(&ionic->dev_cmd_lock);
2190 	ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2191 	err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2192 	memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2193 	mutex_unlock(&ionic->dev_cmd_lock);
2194 	if (err)
2195 		return (err);
2196 
2197 	dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2198 		le64_to_cpu(lid->capabilities));
2199 
2200 	dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2201 		le32_to_cpu(lid->eth.max_ucast_filters));
2202 	dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2203 		le32_to_cpu(lid->eth.max_mcast_filters));
2204 	dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2205 		le64_to_cpu(lid->eth.config.features));
2206 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2207 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2208 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2209 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2210 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2211 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2212 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2213 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2214 	dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2215 	dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2216 	dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2217 		le32_to_cpu(lid->eth.config.mtu));
2218 
2219 	return 0;
2220 }
2221 
2222 int ionic_lifs_size(struct ionic *ionic)
2223 {
2224 	struct ionic_identity *ident = &ionic->ident;
2225 	unsigned int nintrs, dev_nintrs;
2226 	union ionic_lif_config *lc;
2227 	unsigned int ntxqs_per_lif;
2228 	unsigned int nrxqs_per_lif;
2229 	unsigned int neqs_per_lif;
2230 	unsigned int nnqs_per_lif;
2231 	unsigned int nxqs, neqs;
2232 	unsigned int min_intrs;
2233 	int err;
2234 
2235 	lc = &ident->lif.eth.config;
2236 	dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2237 	neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2238 	nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2239 	ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2240 	nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2241 
2242 	nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2243 	nxqs = min(nxqs, num_online_cpus());
2244 	neqs = min(neqs_per_lif, num_online_cpus());
2245 
2246 try_again:
2247 	/* interrupt usage:
2248 	 *    1 for master lif adminq/notifyq
2249 	 *    1 for each CPU for master lif TxRx queue pairs
2250 	 *    whatever's left is for RDMA queues
2251 	 */
2252 	nintrs = 1 + nxqs + neqs;
2253 	min_intrs = 2;  /* adminq + 1 TxRx queue pair */
2254 
2255 	if (nintrs > dev_nintrs)
2256 		goto try_fewer;
2257 
2258 	err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2259 	if (err < 0 && err != -ENOSPC) {
2260 		dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2261 		return err;
2262 	}
2263 	if (err == -ENOSPC)
2264 		goto try_fewer;
2265 
2266 	if (err != nintrs) {
2267 		ionic_bus_free_irq_vectors(ionic);
2268 		goto try_fewer;
2269 	}
2270 
2271 	ionic->nnqs_per_lif = nnqs_per_lif;
2272 	ionic->neqs_per_lif = neqs;
2273 	ionic->ntxqs_per_lif = nxqs;
2274 	ionic->nrxqs_per_lif = nxqs;
2275 	ionic->nintrs = nintrs;
2276 
2277 	ionic_debugfs_add_sizes(ionic);
2278 
2279 	return 0;
2280 
2281 try_fewer:
2282 	if (nnqs_per_lif > 1) {
2283 		nnqs_per_lif >>= 1;
2284 		goto try_again;
2285 	}
2286 	if (neqs > 1) {
2287 		neqs >>= 1;
2288 		goto try_again;
2289 	}
2290 	if (nxqs > 1) {
2291 		nxqs >>= 1;
2292 		goto try_again;
2293 	}
2294 	dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2295 	return -ENOSPC;
2296 }
2297