1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/cpumask.h>
12 
13 #include "ionic.h"
14 #include "ionic_bus.h"
15 #include "ionic_lif.h"
16 #include "ionic_txrx.h"
17 #include "ionic_ethtool.h"
18 #include "ionic_debugfs.h"
19 
20 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
21 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
22 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
23 static void ionic_link_status_check(struct ionic_lif *lif);
24 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
25 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
26 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
27 
28 static int ionic_start_queues(struct ionic_lif *lif);
29 static void ionic_stop_queues(struct ionic_lif *lif);
30 
31 static void ionic_lif_deferred_work(struct work_struct *work)
32 {
33 	struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
34 	struct ionic_deferred *def = &lif->deferred;
35 	struct ionic_deferred_work *w = NULL;
36 
37 	spin_lock_bh(&def->lock);
38 	if (!list_empty(&def->list)) {
39 		w = list_first_entry(&def->list,
40 				     struct ionic_deferred_work, list);
41 		list_del(&w->list);
42 	}
43 	spin_unlock_bh(&def->lock);
44 
45 	if (w) {
46 		switch (w->type) {
47 		case IONIC_DW_TYPE_RX_MODE:
48 			ionic_lif_rx_mode(lif, w->rx_mode);
49 			break;
50 		case IONIC_DW_TYPE_RX_ADDR_ADD:
51 			ionic_lif_addr_add(lif, w->addr);
52 			break;
53 		case IONIC_DW_TYPE_RX_ADDR_DEL:
54 			ionic_lif_addr_del(lif, w->addr);
55 			break;
56 		case IONIC_DW_TYPE_LINK_STATUS:
57 			ionic_link_status_check(lif);
58 			break;
59 		case IONIC_DW_TYPE_LIF_RESET:
60 			if (w->fw_status)
61 				ionic_lif_handle_fw_up(lif);
62 			else
63 				ionic_lif_handle_fw_down(lif);
64 			break;
65 		default:
66 			break;
67 		}
68 		kfree(w);
69 		schedule_work(&def->work);
70 	}
71 }
72 
73 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
74 				struct ionic_deferred_work *work)
75 {
76 	spin_lock_bh(&def->lock);
77 	list_add_tail(&work->list, &def->list);
78 	spin_unlock_bh(&def->lock);
79 	schedule_work(&def->work);
80 }
81 
82 static void ionic_link_status_check(struct ionic_lif *lif)
83 {
84 	struct net_device *netdev = lif->netdev;
85 	u16 link_status;
86 	bool link_up;
87 
88 	if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
89 		return;
90 
91 	if (lif->ionic->is_mgmt_nic)
92 		return;
93 
94 	link_status = le16_to_cpu(lif->info->status.link_status);
95 	link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
96 
97 	if (link_up) {
98 		if (!netif_carrier_ok(netdev)) {
99 			u32 link_speed;
100 
101 			ionic_port_identify(lif->ionic);
102 			link_speed = le32_to_cpu(lif->info->status.link_speed);
103 			netdev_info(netdev, "Link up - %d Gbps\n",
104 				    link_speed / 1000);
105 			netif_carrier_on(netdev);
106 		}
107 
108 		if (netif_running(lif->netdev))
109 			ionic_start_queues(lif);
110 	} else {
111 		if (netif_carrier_ok(netdev)) {
112 			netdev_info(netdev, "Link down\n");
113 			netif_carrier_off(netdev);
114 		}
115 
116 		if (netif_running(lif->netdev))
117 			ionic_stop_queues(lif);
118 	}
119 
120 	clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
121 }
122 
123 void ionic_link_status_check_request(struct ionic_lif *lif)
124 {
125 	struct ionic_deferred_work *work;
126 
127 	/* we only need one request outstanding at a time */
128 	if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
129 		return;
130 
131 	if (in_interrupt()) {
132 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
133 		if (!work)
134 			return;
135 
136 		work->type = IONIC_DW_TYPE_LINK_STATUS;
137 		ionic_lif_deferred_enqueue(&lif->deferred, work);
138 	} else {
139 		ionic_link_status_check(lif);
140 	}
141 }
142 
143 static irqreturn_t ionic_isr(int irq, void *data)
144 {
145 	struct napi_struct *napi = data;
146 
147 	napi_schedule_irqoff(napi);
148 
149 	return IRQ_HANDLED;
150 }
151 
152 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
153 {
154 	struct ionic_intr_info *intr = &qcq->intr;
155 	struct device *dev = lif->ionic->dev;
156 	struct ionic_queue *q = &qcq->q;
157 	const char *name;
158 
159 	if (lif->registered)
160 		name = lif->netdev->name;
161 	else
162 		name = dev_name(dev);
163 
164 	snprintf(intr->name, sizeof(intr->name),
165 		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
166 
167 	return devm_request_irq(dev, intr->vector, ionic_isr,
168 				0, intr->name, &qcq->napi);
169 }
170 
171 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
172 {
173 	struct ionic *ionic = lif->ionic;
174 	int index;
175 
176 	index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
177 	if (index == ionic->nintrs) {
178 		netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
179 			    __func__, index, ionic->nintrs);
180 		return -ENOSPC;
181 	}
182 
183 	set_bit(index, ionic->intrs);
184 	ionic_intr_init(&ionic->idev, intr, index);
185 
186 	return 0;
187 }
188 
189 static void ionic_intr_free(struct ionic_lif *lif, int index)
190 {
191 	if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
192 		clear_bit(index, lif->ionic->intrs);
193 }
194 
195 static int ionic_qcq_enable(struct ionic_qcq *qcq)
196 {
197 	struct ionic_queue *q = &qcq->q;
198 	struct ionic_lif *lif = q->lif;
199 	struct ionic_dev *idev;
200 	struct device *dev;
201 
202 	struct ionic_admin_ctx ctx = {
203 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
204 		.cmd.q_control = {
205 			.opcode = IONIC_CMD_Q_CONTROL,
206 			.lif_index = cpu_to_le16(lif->index),
207 			.type = q->type,
208 			.index = cpu_to_le32(q->index),
209 			.oper = IONIC_Q_ENABLE,
210 		},
211 	};
212 
213 	idev = &lif->ionic->idev;
214 	dev = lif->ionic->dev;
215 
216 	dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
217 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
218 
219 	if (qcq->flags & IONIC_QCQ_F_INTR) {
220 		irq_set_affinity_hint(qcq->intr.vector,
221 				      &qcq->intr.affinity_mask);
222 		napi_enable(&qcq->napi);
223 		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
224 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
225 				IONIC_INTR_MASK_CLEAR);
226 	}
227 
228 	return ionic_adminq_post_wait(lif, &ctx);
229 }
230 
231 static int ionic_qcq_disable(struct ionic_qcq *qcq)
232 {
233 	struct ionic_queue *q = &qcq->q;
234 	struct ionic_lif *lif = q->lif;
235 	struct ionic_dev *idev;
236 	struct device *dev;
237 
238 	struct ionic_admin_ctx ctx = {
239 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
240 		.cmd.q_control = {
241 			.opcode = IONIC_CMD_Q_CONTROL,
242 			.lif_index = cpu_to_le16(lif->index),
243 			.type = q->type,
244 			.index = cpu_to_le32(q->index),
245 			.oper = IONIC_Q_DISABLE,
246 		},
247 	};
248 
249 	idev = &lif->ionic->idev;
250 	dev = lif->ionic->dev;
251 
252 	dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
253 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
254 
255 	if (qcq->flags & IONIC_QCQ_F_INTR) {
256 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
257 				IONIC_INTR_MASK_SET);
258 		synchronize_irq(qcq->intr.vector);
259 		irq_set_affinity_hint(qcq->intr.vector, NULL);
260 		napi_disable(&qcq->napi);
261 	}
262 
263 	return ionic_adminq_post_wait(lif, &ctx);
264 }
265 
266 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
267 {
268 	struct ionic_dev *idev = &lif->ionic->idev;
269 
270 	if (!qcq)
271 		return;
272 
273 	if (!(qcq->flags & IONIC_QCQ_F_INITED))
274 		return;
275 
276 	if (qcq->flags & IONIC_QCQ_F_INTR) {
277 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
278 				IONIC_INTR_MASK_SET);
279 		netif_napi_del(&qcq->napi);
280 	}
281 
282 	qcq->flags &= ~IONIC_QCQ_F_INITED;
283 }
284 
285 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
286 {
287 	struct device *dev = lif->ionic->dev;
288 
289 	if (!qcq)
290 		return;
291 
292 	ionic_debugfs_del_qcq(qcq);
293 
294 	dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
295 	qcq->base = NULL;
296 	qcq->base_pa = 0;
297 
298 	if (qcq->flags & IONIC_QCQ_F_INTR) {
299 		irq_set_affinity_hint(qcq->intr.vector, NULL);
300 		devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
301 		qcq->intr.vector = 0;
302 		ionic_intr_free(lif, qcq->intr.index);
303 	}
304 
305 	devm_kfree(dev, qcq->cq.info);
306 	qcq->cq.info = NULL;
307 	devm_kfree(dev, qcq->q.info);
308 	qcq->q.info = NULL;
309 	devm_kfree(dev, qcq);
310 }
311 
312 static void ionic_qcqs_free(struct ionic_lif *lif)
313 {
314 	struct device *dev = lif->ionic->dev;
315 	unsigned int i;
316 
317 	if (lif->notifyqcq) {
318 		ionic_qcq_free(lif, lif->notifyqcq);
319 		lif->notifyqcq = NULL;
320 	}
321 
322 	if (lif->adminqcq) {
323 		ionic_qcq_free(lif, lif->adminqcq);
324 		lif->adminqcq = NULL;
325 	}
326 
327 	if (lif->rxqcqs) {
328 		for (i = 0; i < lif->nxqs; i++)
329 			if (lif->rxqcqs[i].stats)
330 				devm_kfree(dev, lif->rxqcqs[i].stats);
331 		devm_kfree(dev, lif->rxqcqs);
332 		lif->rxqcqs = NULL;
333 	}
334 
335 	if (lif->txqcqs) {
336 		for (i = 0; i < lif->nxqs; i++)
337 			if (lif->txqcqs[i].stats)
338 				devm_kfree(dev, lif->txqcqs[i].stats);
339 		devm_kfree(dev, lif->txqcqs);
340 		lif->txqcqs = NULL;
341 	}
342 }
343 
344 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
345 				      struct ionic_qcq *n_qcq)
346 {
347 	if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
348 		ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
349 		n_qcq->flags &= ~IONIC_QCQ_F_INTR;
350 	}
351 
352 	n_qcq->intr.vector = src_qcq->intr.vector;
353 	n_qcq->intr.index = src_qcq->intr.index;
354 }
355 
356 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
357 			   unsigned int index,
358 			   const char *name, unsigned int flags,
359 			   unsigned int num_descs, unsigned int desc_size,
360 			   unsigned int cq_desc_size,
361 			   unsigned int sg_desc_size,
362 			   unsigned int pid, struct ionic_qcq **qcq)
363 {
364 	struct ionic_dev *idev = &lif->ionic->idev;
365 	u32 q_size, cq_size, sg_size, total_size;
366 	struct device *dev = lif->ionic->dev;
367 	void *q_base, *cq_base, *sg_base;
368 	dma_addr_t cq_base_pa = 0;
369 	dma_addr_t sg_base_pa = 0;
370 	dma_addr_t q_base_pa = 0;
371 	struct ionic_qcq *new;
372 	int err;
373 
374 	*qcq = NULL;
375 
376 	q_size  = num_descs * desc_size;
377 	cq_size = num_descs * cq_desc_size;
378 	sg_size = num_descs * sg_desc_size;
379 
380 	total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
381 	/* Note: aligning q_size/cq_size is not enough due to cq_base
382 	 * address aligning as q_base could be not aligned to the page.
383 	 * Adding PAGE_SIZE.
384 	 */
385 	total_size += PAGE_SIZE;
386 	if (flags & IONIC_QCQ_F_SG) {
387 		total_size += ALIGN(sg_size, PAGE_SIZE);
388 		total_size += PAGE_SIZE;
389 	}
390 
391 	new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
392 	if (!new) {
393 		netdev_err(lif->netdev, "Cannot allocate queue structure\n");
394 		err = -ENOMEM;
395 		goto err_out;
396 	}
397 
398 	new->flags = flags;
399 
400 	new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
401 				   GFP_KERNEL);
402 	if (!new->q.info) {
403 		netdev_err(lif->netdev, "Cannot allocate queue info\n");
404 		err = -ENOMEM;
405 		goto err_out;
406 	}
407 
408 	new->q.type = type;
409 
410 	err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
411 			   desc_size, sg_desc_size, pid);
412 	if (err) {
413 		netdev_err(lif->netdev, "Cannot initialize queue\n");
414 		goto err_out;
415 	}
416 
417 	if (flags & IONIC_QCQ_F_INTR) {
418 		err = ionic_intr_alloc(lif, &new->intr);
419 		if (err) {
420 			netdev_warn(lif->netdev, "no intr for %s: %d\n",
421 				    name, err);
422 			goto err_out;
423 		}
424 
425 		err = ionic_bus_get_irq(lif->ionic, new->intr.index);
426 		if (err < 0) {
427 			netdev_warn(lif->netdev, "no vector for %s: %d\n",
428 				    name, err);
429 			goto err_out_free_intr;
430 		}
431 		new->intr.vector = err;
432 		ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
433 				       IONIC_INTR_MASK_SET);
434 
435 		err = ionic_request_irq(lif, new);
436 		if (err) {
437 			netdev_warn(lif->netdev, "irq request failed %d\n", err);
438 			goto err_out_free_intr;
439 		}
440 
441 		new->intr.cpu = cpumask_local_spread(new->intr.index,
442 						     dev_to_node(dev));
443 		if (new->intr.cpu != -1)
444 			cpumask_set_cpu(new->intr.cpu,
445 					&new->intr.affinity_mask);
446 	} else {
447 		new->intr.index = INTR_INDEX_NOT_ASSIGNED;
448 	}
449 
450 	new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
451 				    GFP_KERNEL);
452 	if (!new->cq.info) {
453 		netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
454 		err = -ENOMEM;
455 		goto err_out_free_irq;
456 	}
457 
458 	err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
459 	if (err) {
460 		netdev_err(lif->netdev, "Cannot initialize completion queue\n");
461 		goto err_out_free_irq;
462 	}
463 
464 	new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
465 				       GFP_KERNEL);
466 	if (!new->base) {
467 		netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
468 		err = -ENOMEM;
469 		goto err_out_free_irq;
470 	}
471 
472 	new->total_size = total_size;
473 
474 	q_base = new->base;
475 	q_base_pa = new->base_pa;
476 
477 	cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
478 	cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
479 
480 	if (flags & IONIC_QCQ_F_SG) {
481 		sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
482 					PAGE_SIZE);
483 		sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
484 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
485 	}
486 
487 	ionic_q_map(&new->q, q_base, q_base_pa);
488 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
489 	ionic_cq_bind(&new->cq, &new->q);
490 
491 	*qcq = new;
492 
493 	return 0;
494 
495 err_out_free_irq:
496 	if (flags & IONIC_QCQ_F_INTR)
497 		devm_free_irq(dev, new->intr.vector, &new->napi);
498 err_out_free_intr:
499 	if (flags & IONIC_QCQ_F_INTR)
500 		ionic_intr_free(lif, new->intr.index);
501 err_out:
502 	dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
503 	return err;
504 }
505 
506 static int ionic_qcqs_alloc(struct ionic_lif *lif)
507 {
508 	struct device *dev = lif->ionic->dev;
509 	unsigned int q_list_size;
510 	unsigned int flags;
511 	int err;
512 	int i;
513 
514 	flags = IONIC_QCQ_F_INTR;
515 	err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
516 			      IONIC_ADMINQ_LENGTH,
517 			      sizeof(struct ionic_admin_cmd),
518 			      sizeof(struct ionic_admin_comp),
519 			      0, lif->kern_pid, &lif->adminqcq);
520 	if (err)
521 		return err;
522 	ionic_debugfs_add_qcq(lif, lif->adminqcq);
523 
524 	if (lif->ionic->nnqs_per_lif) {
525 		flags = IONIC_QCQ_F_NOTIFYQ;
526 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
527 				      flags, IONIC_NOTIFYQ_LENGTH,
528 				      sizeof(struct ionic_notifyq_cmd),
529 				      sizeof(union ionic_notifyq_comp),
530 				      0, lif->kern_pid, &lif->notifyqcq);
531 		if (err)
532 			goto err_out_free_adminqcq;
533 		ionic_debugfs_add_qcq(lif, lif->notifyqcq);
534 
535 		/* Let the notifyq ride on the adminq interrupt */
536 		ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
537 	}
538 
539 	q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
540 	err = -ENOMEM;
541 	lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
542 	if (!lif->txqcqs)
543 		goto err_out_free_notifyqcq;
544 	for (i = 0; i < lif->nxqs; i++) {
545 		lif->txqcqs[i].stats = devm_kzalloc(dev,
546 						    sizeof(struct ionic_q_stats),
547 						    GFP_KERNEL);
548 		if (!lif->txqcqs[i].stats)
549 			goto err_out_free_tx_stats;
550 	}
551 
552 	lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
553 	if (!lif->rxqcqs)
554 		goto err_out_free_tx_stats;
555 	for (i = 0; i < lif->nxqs; i++) {
556 		lif->rxqcqs[i].stats = devm_kzalloc(dev,
557 						    sizeof(struct ionic_q_stats),
558 						    GFP_KERNEL);
559 		if (!lif->rxqcqs[i].stats)
560 			goto err_out_free_rx_stats;
561 	}
562 
563 	return 0;
564 
565 err_out_free_rx_stats:
566 	for (i = 0; i < lif->nxqs; i++)
567 		if (lif->rxqcqs[i].stats)
568 			devm_kfree(dev, lif->rxqcqs[i].stats);
569 	devm_kfree(dev, lif->rxqcqs);
570 	lif->rxqcqs = NULL;
571 err_out_free_tx_stats:
572 	for (i = 0; i < lif->nxqs; i++)
573 		if (lif->txqcqs[i].stats)
574 			devm_kfree(dev, lif->txqcqs[i].stats);
575 	devm_kfree(dev, lif->txqcqs);
576 	lif->txqcqs = NULL;
577 err_out_free_notifyqcq:
578 	if (lif->notifyqcq) {
579 		ionic_qcq_free(lif, lif->notifyqcq);
580 		lif->notifyqcq = NULL;
581 	}
582 err_out_free_adminqcq:
583 	ionic_qcq_free(lif, lif->adminqcq);
584 	lif->adminqcq = NULL;
585 
586 	return err;
587 }
588 
589 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
590 {
591 	struct device *dev = lif->ionic->dev;
592 	struct ionic_queue *q = &qcq->q;
593 	struct ionic_cq *cq = &qcq->cq;
594 	struct ionic_admin_ctx ctx = {
595 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
596 		.cmd.q_init = {
597 			.opcode = IONIC_CMD_Q_INIT,
598 			.lif_index = cpu_to_le16(lif->index),
599 			.type = q->type,
600 			.index = cpu_to_le32(q->index),
601 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
602 					     IONIC_QINIT_F_SG),
603 			.intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
604 			.pid = cpu_to_le16(q->pid),
605 			.ring_size = ilog2(q->num_descs),
606 			.ring_base = cpu_to_le64(q->base_pa),
607 			.cq_ring_base = cpu_to_le64(cq->base_pa),
608 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
609 		},
610 	};
611 	int err;
612 
613 	dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
614 	dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
615 	dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
616 	dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
617 
618 	q->tail = q->info;
619 	q->head = q->tail;
620 	cq->tail = cq->info;
621 
622 	err = ionic_adminq_post_wait(lif, &ctx);
623 	if (err)
624 		return err;
625 
626 	q->hw_type = ctx.comp.q_init.hw_type;
627 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
628 	q->dbval = IONIC_DBELL_QID(q->hw_index);
629 
630 	dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
631 	dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
632 
633 	qcq->flags |= IONIC_QCQ_F_INITED;
634 
635 	return 0;
636 }
637 
638 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
639 {
640 	struct device *dev = lif->ionic->dev;
641 	struct ionic_queue *q = &qcq->q;
642 	struct ionic_cq *cq = &qcq->cq;
643 	struct ionic_admin_ctx ctx = {
644 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
645 		.cmd.q_init = {
646 			.opcode = IONIC_CMD_Q_INIT,
647 			.lif_index = cpu_to_le16(lif->index),
648 			.type = q->type,
649 			.index = cpu_to_le32(q->index),
650 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
651 					     IONIC_QINIT_F_SG),
652 			.intr_index = cpu_to_le16(cq->bound_intr->index),
653 			.pid = cpu_to_le16(q->pid),
654 			.ring_size = ilog2(q->num_descs),
655 			.ring_base = cpu_to_le64(q->base_pa),
656 			.cq_ring_base = cpu_to_le64(cq->base_pa),
657 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
658 		},
659 	};
660 	int err;
661 
662 	dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
663 	dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
664 	dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
665 	dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
666 
667 	q->tail = q->info;
668 	q->head = q->tail;
669 	cq->tail = cq->info;
670 
671 	err = ionic_adminq_post_wait(lif, &ctx);
672 	if (err)
673 		return err;
674 
675 	q->hw_type = ctx.comp.q_init.hw_type;
676 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
677 	q->dbval = IONIC_DBELL_QID(q->hw_index);
678 
679 	dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
680 	dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
681 
682 	netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
683 		       NAPI_POLL_WEIGHT);
684 
685 	qcq->flags |= IONIC_QCQ_F_INITED;
686 
687 	return 0;
688 }
689 
690 static bool ionic_notifyq_service(struct ionic_cq *cq,
691 				  struct ionic_cq_info *cq_info)
692 {
693 	union ionic_notifyq_comp *comp = cq_info->cq_desc;
694 	struct ionic_deferred_work *work;
695 	struct net_device *netdev;
696 	struct ionic_queue *q;
697 	struct ionic_lif *lif;
698 	u64 eid;
699 
700 	q = cq->bound_q;
701 	lif = q->info[0].cb_arg;
702 	netdev = lif->netdev;
703 	eid = le64_to_cpu(comp->event.eid);
704 
705 	/* Have we run out of new completions to process? */
706 	if (eid <= lif->last_eid)
707 		return false;
708 
709 	lif->last_eid = eid;
710 
711 	dev_dbg(lif->ionic->dev, "notifyq event:\n");
712 	dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
713 			 comp, sizeof(*comp), true);
714 
715 	switch (le16_to_cpu(comp->event.ecode)) {
716 	case IONIC_EVENT_LINK_CHANGE:
717 		ionic_link_status_check_request(lif);
718 		break;
719 	case IONIC_EVENT_RESET:
720 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
721 		if (!work) {
722 			netdev_err(lif->netdev, "%s OOM\n", __func__);
723 		} else {
724 			work->type = IONIC_DW_TYPE_LIF_RESET;
725 			ionic_lif_deferred_enqueue(&lif->deferred, work);
726 		}
727 		break;
728 	default:
729 		netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
730 			    comp->event.ecode, eid);
731 		break;
732 	}
733 
734 	return true;
735 }
736 
737 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
738 {
739 	struct ionic_dev *idev = &lif->ionic->idev;
740 	struct ionic_cq *cq = &lif->notifyqcq->cq;
741 	u32 work_done;
742 
743 	work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
744 				     NULL, NULL);
745 	if (work_done)
746 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
747 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
748 
749 	return work_done;
750 }
751 
752 static bool ionic_adminq_service(struct ionic_cq *cq,
753 				 struct ionic_cq_info *cq_info)
754 {
755 	struct ionic_admin_comp *comp = cq_info->cq_desc;
756 
757 	if (!color_match(comp->color, cq->done_color))
758 		return false;
759 
760 	ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
761 
762 	return true;
763 }
764 
765 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
766 {
767 	struct ionic_lif *lif = napi_to_cq(napi)->lif;
768 	int n_work = 0;
769 	int a_work = 0;
770 
771 	if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
772 		n_work = ionic_notifyq_clean(lif, budget);
773 	a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
774 
775 	return max(n_work, a_work);
776 }
777 
778 static void ionic_get_stats64(struct net_device *netdev,
779 			      struct rtnl_link_stats64 *ns)
780 {
781 	struct ionic_lif *lif = netdev_priv(netdev);
782 	struct ionic_lif_stats *ls;
783 
784 	memset(ns, 0, sizeof(*ns));
785 	ls = &lif->info->stats;
786 
787 	ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
788 			 le64_to_cpu(ls->rx_mcast_packets) +
789 			 le64_to_cpu(ls->rx_bcast_packets);
790 
791 	ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
792 			 le64_to_cpu(ls->tx_mcast_packets) +
793 			 le64_to_cpu(ls->tx_bcast_packets);
794 
795 	ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
796 		       le64_to_cpu(ls->rx_mcast_bytes) +
797 		       le64_to_cpu(ls->rx_bcast_bytes);
798 
799 	ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
800 		       le64_to_cpu(ls->tx_mcast_bytes) +
801 		       le64_to_cpu(ls->tx_bcast_bytes);
802 
803 	ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
804 			 le64_to_cpu(ls->rx_mcast_drop_packets) +
805 			 le64_to_cpu(ls->rx_bcast_drop_packets);
806 
807 	ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
808 			 le64_to_cpu(ls->tx_mcast_drop_packets) +
809 			 le64_to_cpu(ls->tx_bcast_drop_packets);
810 
811 	ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
812 
813 	ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
814 
815 	ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
816 			       le64_to_cpu(ls->rx_queue_disabled) +
817 			       le64_to_cpu(ls->rx_desc_fetch_error) +
818 			       le64_to_cpu(ls->rx_desc_data_error);
819 
820 	ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
821 				le64_to_cpu(ls->tx_queue_disabled) +
822 				le64_to_cpu(ls->tx_desc_fetch_error) +
823 				le64_to_cpu(ls->tx_desc_data_error);
824 
825 	ns->rx_errors = ns->rx_over_errors +
826 			ns->rx_missed_errors;
827 
828 	ns->tx_errors = ns->tx_aborted_errors;
829 }
830 
831 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
832 {
833 	struct ionic_admin_ctx ctx = {
834 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
835 		.cmd.rx_filter_add = {
836 			.opcode = IONIC_CMD_RX_FILTER_ADD,
837 			.lif_index = cpu_to_le16(lif->index),
838 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
839 		},
840 	};
841 	struct ionic_rx_filter *f;
842 	int err;
843 
844 	/* don't bother if we already have it */
845 	spin_lock_bh(&lif->rx_filters.lock);
846 	f = ionic_rx_filter_by_addr(lif, addr);
847 	spin_unlock_bh(&lif->rx_filters.lock);
848 	if (f)
849 		return 0;
850 
851 	netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
852 		   ctx.comp.rx_filter_add.filter_id);
853 
854 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
855 	err = ionic_adminq_post_wait(lif, &ctx);
856 	if (err && err != -EEXIST)
857 		return err;
858 
859 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
860 }
861 
862 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
863 {
864 	struct ionic_admin_ctx ctx = {
865 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
866 		.cmd.rx_filter_del = {
867 			.opcode = IONIC_CMD_RX_FILTER_DEL,
868 			.lif_index = cpu_to_le16(lif->index),
869 		},
870 	};
871 	struct ionic_rx_filter *f;
872 	int err;
873 
874 	spin_lock_bh(&lif->rx_filters.lock);
875 	f = ionic_rx_filter_by_addr(lif, addr);
876 	if (!f) {
877 		spin_unlock_bh(&lif->rx_filters.lock);
878 		return -ENOENT;
879 	}
880 
881 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
882 	ionic_rx_filter_free(lif, f);
883 	spin_unlock_bh(&lif->rx_filters.lock);
884 
885 	err = ionic_adminq_post_wait(lif, &ctx);
886 	if (err && err != -EEXIST)
887 		return err;
888 
889 	netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
890 		   ctx.cmd.rx_filter_del.filter_id);
891 
892 	return 0;
893 }
894 
895 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
896 {
897 	struct ionic *ionic = lif->ionic;
898 	struct ionic_deferred_work *work;
899 	unsigned int nmfilters;
900 	unsigned int nufilters;
901 
902 	if (add) {
903 		/* Do we have space for this filter?  We test the counters
904 		 * here before checking the need for deferral so that we
905 		 * can return an overflow error to the stack.
906 		 */
907 		nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
908 		nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
909 
910 		if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
911 			lif->nmcast++;
912 		else if (!is_multicast_ether_addr(addr) &&
913 			 lif->nucast < nufilters)
914 			lif->nucast++;
915 		else
916 			return -ENOSPC;
917 	} else {
918 		if (is_multicast_ether_addr(addr) && lif->nmcast)
919 			lif->nmcast--;
920 		else if (!is_multicast_ether_addr(addr) && lif->nucast)
921 			lif->nucast--;
922 	}
923 
924 	if (in_interrupt()) {
925 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
926 		if (!work) {
927 			netdev_err(lif->netdev, "%s OOM\n", __func__);
928 			return -ENOMEM;
929 		}
930 		work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
931 				   IONIC_DW_TYPE_RX_ADDR_DEL;
932 		memcpy(work->addr, addr, ETH_ALEN);
933 		netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
934 			   add ? "add" : "del", addr);
935 		ionic_lif_deferred_enqueue(&lif->deferred, work);
936 	} else {
937 		netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
938 			   add ? "add" : "del", addr);
939 		if (add)
940 			return ionic_lif_addr_add(lif, addr);
941 		else
942 			return ionic_lif_addr_del(lif, addr);
943 	}
944 
945 	return 0;
946 }
947 
948 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
949 {
950 	return ionic_lif_addr(netdev_priv(netdev), addr, true);
951 }
952 
953 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
954 {
955 	return ionic_lif_addr(netdev_priv(netdev), addr, false);
956 }
957 
958 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
959 {
960 	struct ionic_admin_ctx ctx = {
961 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
962 		.cmd.rx_mode_set = {
963 			.opcode = IONIC_CMD_RX_MODE_SET,
964 			.lif_index = cpu_to_le16(lif->index),
965 			.rx_mode = cpu_to_le16(rx_mode),
966 		},
967 	};
968 	char buf[128];
969 	int err;
970 	int i;
971 #define REMAIN(__x) (sizeof(buf) - (__x))
972 
973 	i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
974 		      lif->rx_mode, rx_mode);
975 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
976 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
977 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
978 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
979 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
980 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
981 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
982 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
983 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
984 		i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
985 	netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
986 
987 	err = ionic_adminq_post_wait(lif, &ctx);
988 	if (err)
989 		netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
990 			    rx_mode, err);
991 	else
992 		lif->rx_mode = rx_mode;
993 }
994 
995 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
996 {
997 	struct ionic_deferred_work *work;
998 
999 	if (in_interrupt()) {
1000 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
1001 		if (!work) {
1002 			netdev_err(lif->netdev, "%s OOM\n", __func__);
1003 			return;
1004 		}
1005 		work->type = IONIC_DW_TYPE_RX_MODE;
1006 		work->rx_mode = rx_mode;
1007 		netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1008 		ionic_lif_deferred_enqueue(&lif->deferred, work);
1009 	} else {
1010 		ionic_lif_rx_mode(lif, rx_mode);
1011 	}
1012 }
1013 
1014 static void ionic_set_rx_mode(struct net_device *netdev)
1015 {
1016 	struct ionic_lif *lif = netdev_priv(netdev);
1017 	struct ionic_identity *ident;
1018 	unsigned int nfilters;
1019 	unsigned int rx_mode;
1020 
1021 	ident = &lif->ionic->ident;
1022 
1023 	rx_mode = IONIC_RX_MODE_F_UNICAST;
1024 	rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1025 	rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1026 	rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1027 	rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1028 
1029 	/* sync unicast addresses
1030 	 * next check to see if we're in an overflow state
1031 	 *    if so, we track that we overflowed and enable NIC PROMISC
1032 	 *    else if the overflow is set and not needed
1033 	 *       we remove our overflow flag and check the netdev flags
1034 	 *       to see if we can disable NIC PROMISC
1035 	 */
1036 	__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1037 	nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
1038 	if (netdev_uc_count(netdev) + 1 > nfilters) {
1039 		rx_mode |= IONIC_RX_MODE_F_PROMISC;
1040 		lif->uc_overflow = true;
1041 	} else if (lif->uc_overflow) {
1042 		lif->uc_overflow = false;
1043 		if (!(netdev->flags & IFF_PROMISC))
1044 			rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1045 	}
1046 
1047 	/* same for multicast */
1048 	__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1049 	nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1050 	if (netdev_mc_count(netdev) > nfilters) {
1051 		rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1052 		lif->mc_overflow = true;
1053 	} else if (lif->mc_overflow) {
1054 		lif->mc_overflow = false;
1055 		if (!(netdev->flags & IFF_ALLMULTI))
1056 			rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1057 	}
1058 
1059 	if (lif->rx_mode != rx_mode)
1060 		_ionic_lif_rx_mode(lif, rx_mode);
1061 }
1062 
1063 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1064 {
1065 	u64 wanted = 0;
1066 
1067 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
1068 		wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1069 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1070 		wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1071 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1072 		wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1073 	if (features & NETIF_F_RXHASH)
1074 		wanted |= IONIC_ETH_HW_RX_HASH;
1075 	if (features & NETIF_F_RXCSUM)
1076 		wanted |= IONIC_ETH_HW_RX_CSUM;
1077 	if (features & NETIF_F_SG)
1078 		wanted |= IONIC_ETH_HW_TX_SG;
1079 	if (features & NETIF_F_HW_CSUM)
1080 		wanted |= IONIC_ETH_HW_TX_CSUM;
1081 	if (features & NETIF_F_TSO)
1082 		wanted |= IONIC_ETH_HW_TSO;
1083 	if (features & NETIF_F_TSO6)
1084 		wanted |= IONIC_ETH_HW_TSO_IPV6;
1085 	if (features & NETIF_F_TSO_ECN)
1086 		wanted |= IONIC_ETH_HW_TSO_ECN;
1087 	if (features & NETIF_F_GSO_GRE)
1088 		wanted |= IONIC_ETH_HW_TSO_GRE;
1089 	if (features & NETIF_F_GSO_GRE_CSUM)
1090 		wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1091 	if (features & NETIF_F_GSO_IPXIP4)
1092 		wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1093 	if (features & NETIF_F_GSO_IPXIP6)
1094 		wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1095 	if (features & NETIF_F_GSO_UDP_TUNNEL)
1096 		wanted |= IONIC_ETH_HW_TSO_UDP;
1097 	if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1098 		wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1099 
1100 	return cpu_to_le64(wanted);
1101 }
1102 
1103 static int ionic_set_nic_features(struct ionic_lif *lif,
1104 				  netdev_features_t features)
1105 {
1106 	struct device *dev = lif->ionic->dev;
1107 	struct ionic_admin_ctx ctx = {
1108 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1109 		.cmd.lif_setattr = {
1110 			.opcode = IONIC_CMD_LIF_SETATTR,
1111 			.index = cpu_to_le16(lif->index),
1112 			.attr = IONIC_LIF_ATTR_FEATURES,
1113 		},
1114 	};
1115 	u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1116 			 IONIC_ETH_HW_VLAN_RX_STRIP |
1117 			 IONIC_ETH_HW_VLAN_RX_FILTER;
1118 	u64 old_hw_features;
1119 	int err;
1120 
1121 	ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1122 	err = ionic_adminq_post_wait(lif, &ctx);
1123 	if (err)
1124 		return err;
1125 
1126 	old_hw_features = lif->hw_features;
1127 	lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1128 				       ctx.comp.lif_setattr.features);
1129 
1130 	if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1131 		ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1132 
1133 	if ((vlan_flags & features) &&
1134 	    !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1135 		dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1136 
1137 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1138 		dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1139 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1140 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1141 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1142 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1143 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1144 		dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1145 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1146 		dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1147 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1148 		dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1149 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1150 		dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1151 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1152 		dev_dbg(dev, "feature ETH_HW_TSO\n");
1153 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1154 		dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1155 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1156 		dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1157 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1158 		dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1159 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1160 		dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1161 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1162 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1163 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1164 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1165 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1166 		dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1167 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1168 		dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1169 
1170 	return 0;
1171 }
1172 
1173 static int ionic_init_nic_features(struct ionic_lif *lif)
1174 {
1175 	struct net_device *netdev = lif->netdev;
1176 	netdev_features_t features;
1177 	int err;
1178 
1179 	/* no netdev features on the management device */
1180 	if (lif->ionic->is_mgmt_nic)
1181 		return 0;
1182 
1183 	/* set up what we expect to support by default */
1184 	features = NETIF_F_HW_VLAN_CTAG_TX |
1185 		   NETIF_F_HW_VLAN_CTAG_RX |
1186 		   NETIF_F_HW_VLAN_CTAG_FILTER |
1187 		   NETIF_F_RXHASH |
1188 		   NETIF_F_SG |
1189 		   NETIF_F_HW_CSUM |
1190 		   NETIF_F_RXCSUM |
1191 		   NETIF_F_TSO |
1192 		   NETIF_F_TSO6 |
1193 		   NETIF_F_TSO_ECN;
1194 
1195 	err = ionic_set_nic_features(lif, features);
1196 	if (err)
1197 		return err;
1198 
1199 	/* tell the netdev what we actually can support */
1200 	netdev->features |= NETIF_F_HIGHDMA;
1201 
1202 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1203 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1204 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1205 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1206 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1207 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1208 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1209 		netdev->hw_features |= NETIF_F_RXHASH;
1210 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1211 		netdev->hw_features |= NETIF_F_SG;
1212 
1213 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1214 		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1215 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1216 		netdev->hw_enc_features |= NETIF_F_RXCSUM;
1217 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1218 		netdev->hw_enc_features |= NETIF_F_TSO;
1219 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1220 		netdev->hw_enc_features |= NETIF_F_TSO6;
1221 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1222 		netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1223 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1224 		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1225 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1226 		netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1227 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1228 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1229 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1230 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1231 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1232 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1233 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1234 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1235 
1236 	netdev->hw_features |= netdev->hw_enc_features;
1237 	netdev->features |= netdev->hw_features;
1238 
1239 	netdev->priv_flags |= IFF_UNICAST_FLT |
1240 			      IFF_LIVE_ADDR_CHANGE;
1241 
1242 	return 0;
1243 }
1244 
1245 static int ionic_set_features(struct net_device *netdev,
1246 			      netdev_features_t features)
1247 {
1248 	struct ionic_lif *lif = netdev_priv(netdev);
1249 	int err;
1250 
1251 	netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1252 		   __func__, (u64)lif->netdev->features, (u64)features);
1253 
1254 	err = ionic_set_nic_features(lif, features);
1255 
1256 	return err;
1257 }
1258 
1259 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1260 {
1261 	struct sockaddr *addr = sa;
1262 	u8 *mac;
1263 	int err;
1264 
1265 	mac = (u8 *)addr->sa_data;
1266 	if (ether_addr_equal(netdev->dev_addr, mac))
1267 		return 0;
1268 
1269 	err = eth_prepare_mac_addr_change(netdev, addr);
1270 	if (err)
1271 		return err;
1272 
1273 	if (!is_zero_ether_addr(netdev->dev_addr)) {
1274 		netdev_info(netdev, "deleting mac addr %pM\n",
1275 			    netdev->dev_addr);
1276 		ionic_addr_del(netdev, netdev->dev_addr);
1277 	}
1278 
1279 	eth_commit_mac_addr_change(netdev, addr);
1280 	netdev_info(netdev, "updating mac addr %pM\n", mac);
1281 
1282 	return ionic_addr_add(netdev, mac);
1283 }
1284 
1285 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1286 {
1287 	struct ionic_lif *lif = netdev_priv(netdev);
1288 	struct ionic_admin_ctx ctx = {
1289 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1290 		.cmd.lif_setattr = {
1291 			.opcode = IONIC_CMD_LIF_SETATTR,
1292 			.index = cpu_to_le16(lif->index),
1293 			.attr = IONIC_LIF_ATTR_MTU,
1294 			.mtu = cpu_to_le32(new_mtu),
1295 		},
1296 	};
1297 	int err;
1298 
1299 	err = ionic_adminq_post_wait(lif, &ctx);
1300 	if (err)
1301 		return err;
1302 
1303 	netdev->mtu = new_mtu;
1304 	err = ionic_reset_queues(lif);
1305 
1306 	return err;
1307 }
1308 
1309 static void ionic_tx_timeout_work(struct work_struct *ws)
1310 {
1311 	struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1312 
1313 	netdev_info(lif->netdev, "Tx Timeout recovery\n");
1314 
1315 	rtnl_lock();
1316 	ionic_reset_queues(lif);
1317 	rtnl_unlock();
1318 }
1319 
1320 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1321 {
1322 	struct ionic_lif *lif = netdev_priv(netdev);
1323 
1324 	schedule_work(&lif->tx_timeout_work);
1325 }
1326 
1327 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1328 				 u16 vid)
1329 {
1330 	struct ionic_lif *lif = netdev_priv(netdev);
1331 	struct ionic_admin_ctx ctx = {
1332 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1333 		.cmd.rx_filter_add = {
1334 			.opcode = IONIC_CMD_RX_FILTER_ADD,
1335 			.lif_index = cpu_to_le16(lif->index),
1336 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1337 			.vlan.vlan = cpu_to_le16(vid),
1338 		},
1339 	};
1340 	int err;
1341 
1342 	err = ionic_adminq_post_wait(lif, &ctx);
1343 	if (err)
1344 		return err;
1345 
1346 	netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
1347 		   ctx.comp.rx_filter_add.filter_id);
1348 
1349 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1350 }
1351 
1352 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1353 				  u16 vid)
1354 {
1355 	struct ionic_lif *lif = netdev_priv(netdev);
1356 	struct ionic_admin_ctx ctx = {
1357 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1358 		.cmd.rx_filter_del = {
1359 			.opcode = IONIC_CMD_RX_FILTER_DEL,
1360 			.lif_index = cpu_to_le16(lif->index),
1361 		},
1362 	};
1363 	struct ionic_rx_filter *f;
1364 
1365 	spin_lock_bh(&lif->rx_filters.lock);
1366 
1367 	f = ionic_rx_filter_by_vlan(lif, vid);
1368 	if (!f) {
1369 		spin_unlock_bh(&lif->rx_filters.lock);
1370 		return -ENOENT;
1371 	}
1372 
1373 	netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
1374 		   le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
1375 
1376 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1377 	ionic_rx_filter_free(lif, f);
1378 	spin_unlock_bh(&lif->rx_filters.lock);
1379 
1380 	return ionic_adminq_post_wait(lif, &ctx);
1381 }
1382 
1383 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1384 			 const u8 *key, const u32 *indir)
1385 {
1386 	struct ionic_admin_ctx ctx = {
1387 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1388 		.cmd.lif_setattr = {
1389 			.opcode = IONIC_CMD_LIF_SETATTR,
1390 			.attr = IONIC_LIF_ATTR_RSS,
1391 			.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1392 		},
1393 	};
1394 	unsigned int i, tbl_sz;
1395 
1396 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1397 		lif->rss_types = types;
1398 		ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1399 	}
1400 
1401 	if (key)
1402 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1403 
1404 	if (indir) {
1405 		tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1406 		for (i = 0; i < tbl_sz; i++)
1407 			lif->rss_ind_tbl[i] = indir[i];
1408 	}
1409 
1410 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1411 	       IONIC_RSS_HASH_KEY_SIZE);
1412 
1413 	return ionic_adminq_post_wait(lif, &ctx);
1414 }
1415 
1416 static int ionic_lif_rss_init(struct ionic_lif *lif)
1417 {
1418 	unsigned int tbl_sz;
1419 	unsigned int i;
1420 
1421 	lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1422 			 IONIC_RSS_TYPE_IPV4_TCP |
1423 			 IONIC_RSS_TYPE_IPV4_UDP |
1424 			 IONIC_RSS_TYPE_IPV6     |
1425 			 IONIC_RSS_TYPE_IPV6_TCP |
1426 			 IONIC_RSS_TYPE_IPV6_UDP;
1427 
1428 	/* Fill indirection table with 'default' values */
1429 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1430 	for (i = 0; i < tbl_sz; i++)
1431 		lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1432 
1433 	return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1434 }
1435 
1436 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1437 {
1438 	int tbl_sz;
1439 
1440 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1441 	memset(lif->rss_ind_tbl, 0, tbl_sz);
1442 	memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1443 
1444 	ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1445 }
1446 
1447 static void ionic_txrx_disable(struct ionic_lif *lif)
1448 {
1449 	unsigned int i;
1450 	int err;
1451 
1452 	if (lif->txqcqs) {
1453 		for (i = 0; i < lif->nxqs; i++) {
1454 			err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1455 			if (err == -ETIMEDOUT)
1456 				break;
1457 		}
1458 	}
1459 
1460 	if (lif->rxqcqs) {
1461 		for (i = 0; i < lif->nxqs; i++) {
1462 			err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1463 			if (err == -ETIMEDOUT)
1464 				break;
1465 		}
1466 	}
1467 }
1468 
1469 static void ionic_txrx_deinit(struct ionic_lif *lif)
1470 {
1471 	unsigned int i;
1472 
1473 	if (lif->txqcqs) {
1474 		for (i = 0; i < lif->nxqs; i++) {
1475 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1476 			ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
1477 			ionic_tx_empty(&lif->txqcqs[i].qcq->q);
1478 		}
1479 	}
1480 
1481 	if (lif->rxqcqs) {
1482 		for (i = 0; i < lif->nxqs; i++) {
1483 			ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1484 			ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
1485 			ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
1486 		}
1487 	}
1488 	lif->rx_mode = 0;
1489 }
1490 
1491 static void ionic_txrx_free(struct ionic_lif *lif)
1492 {
1493 	unsigned int i;
1494 
1495 	if (lif->txqcqs) {
1496 		for (i = 0; i < lif->nxqs; i++) {
1497 			ionic_qcq_free(lif, lif->txqcqs[i].qcq);
1498 			lif->txqcqs[i].qcq = NULL;
1499 		}
1500 	}
1501 
1502 	if (lif->rxqcqs) {
1503 		for (i = 0; i < lif->nxqs; i++) {
1504 			ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
1505 			lif->rxqcqs[i].qcq = NULL;
1506 		}
1507 	}
1508 }
1509 
1510 static int ionic_txrx_alloc(struct ionic_lif *lif)
1511 {
1512 	unsigned int flags;
1513 	unsigned int i;
1514 	int err = 0;
1515 
1516 	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1517 	for (i = 0; i < lif->nxqs; i++) {
1518 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1519 				      lif->ntxq_descs,
1520 				      sizeof(struct ionic_txq_desc),
1521 				      sizeof(struct ionic_txq_comp),
1522 				      sizeof(struct ionic_txq_sg_desc),
1523 				      lif->kern_pid, &lif->txqcqs[i].qcq);
1524 		if (err)
1525 			goto err_out;
1526 
1527 		lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
1528 		ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
1529 	}
1530 
1531 	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1532 	for (i = 0; i < lif->nxqs; i++) {
1533 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1534 				      lif->nrxq_descs,
1535 				      sizeof(struct ionic_rxq_desc),
1536 				      sizeof(struct ionic_rxq_comp),
1537 				      sizeof(struct ionic_rxq_sg_desc),
1538 				      lif->kern_pid, &lif->rxqcqs[i].qcq);
1539 		if (err)
1540 			goto err_out;
1541 
1542 		lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
1543 
1544 		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1545 				     lif->rxqcqs[i].qcq->intr.index,
1546 				     lif->rx_coalesce_hw);
1547 		ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
1548 					  lif->txqcqs[i].qcq);
1549 		ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
1550 	}
1551 
1552 	return 0;
1553 
1554 err_out:
1555 	ionic_txrx_free(lif);
1556 
1557 	return err;
1558 }
1559 
1560 static int ionic_txrx_init(struct ionic_lif *lif)
1561 {
1562 	unsigned int i;
1563 	int err;
1564 
1565 	for (i = 0; i < lif->nxqs; i++) {
1566 		err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
1567 		if (err)
1568 			goto err_out;
1569 
1570 		err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
1571 		if (err) {
1572 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1573 			goto err_out;
1574 		}
1575 	}
1576 
1577 	if (lif->netdev->features & NETIF_F_RXHASH)
1578 		ionic_lif_rss_init(lif);
1579 
1580 	ionic_set_rx_mode(lif->netdev);
1581 
1582 	return 0;
1583 
1584 err_out:
1585 	while (i--) {
1586 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1587 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1588 	}
1589 
1590 	return err;
1591 }
1592 
1593 static int ionic_txrx_enable(struct ionic_lif *lif)
1594 {
1595 	int i, err;
1596 
1597 	for (i = 0; i < lif->nxqs; i++) {
1598 		ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
1599 		err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
1600 		if (err)
1601 			goto err_out;
1602 
1603 		err = ionic_qcq_enable(lif->txqcqs[i].qcq);
1604 		if (err) {
1605 			if (err != -ETIMEDOUT)
1606 				ionic_qcq_disable(lif->rxqcqs[i].qcq);
1607 			goto err_out;
1608 		}
1609 	}
1610 
1611 	return 0;
1612 
1613 err_out:
1614 	while (i--) {
1615 		err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1616 		if (err == -ETIMEDOUT)
1617 			break;
1618 		err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1619 		if (err == -ETIMEDOUT)
1620 			break;
1621 	}
1622 
1623 	return err;
1624 }
1625 
1626 static int ionic_start_queues(struct ionic_lif *lif)
1627 {
1628 	int err;
1629 
1630 	if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
1631 		return 0;
1632 
1633 	err = ionic_txrx_enable(lif);
1634 	if (err) {
1635 		clear_bit(IONIC_LIF_F_UP, lif->state);
1636 		return err;
1637 	}
1638 	netif_tx_wake_all_queues(lif->netdev);
1639 
1640 	return 0;
1641 }
1642 
1643 int ionic_open(struct net_device *netdev)
1644 {
1645 	struct ionic_lif *lif = netdev_priv(netdev);
1646 	int err;
1647 
1648 	err = ionic_txrx_alloc(lif);
1649 	if (err)
1650 		return err;
1651 
1652 	err = ionic_txrx_init(lif);
1653 	if (err)
1654 		goto err_out;
1655 
1656 	/* don't start the queues until we have link */
1657 	if (netif_carrier_ok(netdev)) {
1658 		err = ionic_start_queues(lif);
1659 		if (err)
1660 			goto err_txrx_deinit;
1661 	}
1662 
1663 	return 0;
1664 
1665 err_txrx_deinit:
1666 	ionic_txrx_deinit(lif);
1667 err_out:
1668 	ionic_txrx_free(lif);
1669 	return err;
1670 }
1671 
1672 static void ionic_stop_queues(struct ionic_lif *lif)
1673 {
1674 	if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
1675 		return;
1676 
1677 	ionic_txrx_disable(lif);
1678 	netif_tx_disable(lif->netdev);
1679 }
1680 
1681 int ionic_stop(struct net_device *netdev)
1682 {
1683 	struct ionic_lif *lif = netdev_priv(netdev);
1684 
1685 	if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1686 		return 0;
1687 
1688 	ionic_stop_queues(lif);
1689 	ionic_txrx_deinit(lif);
1690 	ionic_txrx_free(lif);
1691 
1692 	return 0;
1693 }
1694 
1695 static int ionic_get_vf_config(struct net_device *netdev,
1696 			       int vf, struct ifla_vf_info *ivf)
1697 {
1698 	struct ionic_lif *lif = netdev_priv(netdev);
1699 	struct ionic *ionic = lif->ionic;
1700 	int ret = 0;
1701 
1702 	down_read(&ionic->vf_op_lock);
1703 
1704 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1705 		ret = -EINVAL;
1706 	} else {
1707 		ivf->vf           = vf;
1708 		ivf->vlan         = ionic->vfs[vf].vlanid;
1709 		ivf->qos	  = 0;
1710 		ivf->spoofchk     = ionic->vfs[vf].spoofchk;
1711 		ivf->linkstate    = ionic->vfs[vf].linkstate;
1712 		ivf->max_tx_rate  = ionic->vfs[vf].maxrate;
1713 		ivf->trusted      = ionic->vfs[vf].trusted;
1714 		ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1715 	}
1716 
1717 	up_read(&ionic->vf_op_lock);
1718 	return ret;
1719 }
1720 
1721 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1722 			      struct ifla_vf_stats *vf_stats)
1723 {
1724 	struct ionic_lif *lif = netdev_priv(netdev);
1725 	struct ionic *ionic = lif->ionic;
1726 	struct ionic_lif_stats *vs;
1727 	int ret = 0;
1728 
1729 	down_read(&ionic->vf_op_lock);
1730 
1731 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1732 		ret = -EINVAL;
1733 	} else {
1734 		memset(vf_stats, 0, sizeof(*vf_stats));
1735 		vs = &ionic->vfs[vf].stats;
1736 
1737 		vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1738 		vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1739 		vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
1740 		vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
1741 		vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
1742 		vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
1743 		vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1744 				       le64_to_cpu(vs->rx_mcast_drop_packets) +
1745 				       le64_to_cpu(vs->rx_bcast_drop_packets);
1746 		vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1747 				       le64_to_cpu(vs->tx_mcast_drop_packets) +
1748 				       le64_to_cpu(vs->tx_bcast_drop_packets);
1749 	}
1750 
1751 	up_read(&ionic->vf_op_lock);
1752 	return ret;
1753 }
1754 
1755 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1756 {
1757 	struct ionic_lif *lif = netdev_priv(netdev);
1758 	struct ionic *ionic = lif->ionic;
1759 	int ret;
1760 
1761 	if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1762 		return -EINVAL;
1763 
1764 	down_write(&ionic->vf_op_lock);
1765 
1766 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1767 		ret = -EINVAL;
1768 	} else {
1769 		ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1770 		if (!ret)
1771 			ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1772 	}
1773 
1774 	up_write(&ionic->vf_op_lock);
1775 	return ret;
1776 }
1777 
1778 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1779 			     u8 qos, __be16 proto)
1780 {
1781 	struct ionic_lif *lif = netdev_priv(netdev);
1782 	struct ionic *ionic = lif->ionic;
1783 	int ret;
1784 
1785 	/* until someday when we support qos */
1786 	if (qos)
1787 		return -EINVAL;
1788 
1789 	if (vlan > 4095)
1790 		return -EINVAL;
1791 
1792 	if (proto != htons(ETH_P_8021Q))
1793 		return -EPROTONOSUPPORT;
1794 
1795 	down_write(&ionic->vf_op_lock);
1796 
1797 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1798 		ret = -EINVAL;
1799 	} else {
1800 		ret = ionic_set_vf_config(ionic, vf,
1801 					  IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
1802 		if (!ret)
1803 			ionic->vfs[vf].vlanid = vlan;
1804 	}
1805 
1806 	up_write(&ionic->vf_op_lock);
1807 	return ret;
1808 }
1809 
1810 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
1811 			     int tx_min, int tx_max)
1812 {
1813 	struct ionic_lif *lif = netdev_priv(netdev);
1814 	struct ionic *ionic = lif->ionic;
1815 	int ret;
1816 
1817 	/* setting the min just seems silly */
1818 	if (tx_min)
1819 		return -EINVAL;
1820 
1821 	down_write(&ionic->vf_op_lock);
1822 
1823 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1824 		ret = -EINVAL;
1825 	} else {
1826 		ret = ionic_set_vf_config(ionic, vf,
1827 					  IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
1828 		if (!ret)
1829 			lif->ionic->vfs[vf].maxrate = tx_max;
1830 	}
1831 
1832 	up_write(&ionic->vf_op_lock);
1833 	return ret;
1834 }
1835 
1836 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
1837 {
1838 	struct ionic_lif *lif = netdev_priv(netdev);
1839 	struct ionic *ionic = lif->ionic;
1840 	u8 data = set;  /* convert to u8 for config */
1841 	int ret;
1842 
1843 	down_write(&ionic->vf_op_lock);
1844 
1845 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1846 		ret = -EINVAL;
1847 	} else {
1848 		ret = ionic_set_vf_config(ionic, vf,
1849 					  IONIC_VF_ATTR_SPOOFCHK, &data);
1850 		if (!ret)
1851 			ionic->vfs[vf].spoofchk = data;
1852 	}
1853 
1854 	up_write(&ionic->vf_op_lock);
1855 	return ret;
1856 }
1857 
1858 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
1859 {
1860 	struct ionic_lif *lif = netdev_priv(netdev);
1861 	struct ionic *ionic = lif->ionic;
1862 	u8 data = set;  /* convert to u8 for config */
1863 	int ret;
1864 
1865 	down_write(&ionic->vf_op_lock);
1866 
1867 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1868 		ret = -EINVAL;
1869 	} else {
1870 		ret = ionic_set_vf_config(ionic, vf,
1871 					  IONIC_VF_ATTR_TRUST, &data);
1872 		if (!ret)
1873 			ionic->vfs[vf].trusted = data;
1874 	}
1875 
1876 	up_write(&ionic->vf_op_lock);
1877 	return ret;
1878 }
1879 
1880 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
1881 {
1882 	struct ionic_lif *lif = netdev_priv(netdev);
1883 	struct ionic *ionic = lif->ionic;
1884 	u8 data;
1885 	int ret;
1886 
1887 	switch (set) {
1888 	case IFLA_VF_LINK_STATE_ENABLE:
1889 		data = IONIC_VF_LINK_STATUS_UP;
1890 		break;
1891 	case IFLA_VF_LINK_STATE_DISABLE:
1892 		data = IONIC_VF_LINK_STATUS_DOWN;
1893 		break;
1894 	case IFLA_VF_LINK_STATE_AUTO:
1895 		data = IONIC_VF_LINK_STATUS_AUTO;
1896 		break;
1897 	default:
1898 		return -EINVAL;
1899 	}
1900 
1901 	down_write(&ionic->vf_op_lock);
1902 
1903 	if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1904 		ret = -EINVAL;
1905 	} else {
1906 		ret = ionic_set_vf_config(ionic, vf,
1907 					  IONIC_VF_ATTR_LINKSTATE, &data);
1908 		if (!ret)
1909 			ionic->vfs[vf].linkstate = set;
1910 	}
1911 
1912 	up_write(&ionic->vf_op_lock);
1913 	return ret;
1914 }
1915 
1916 static const struct net_device_ops ionic_netdev_ops = {
1917 	.ndo_open               = ionic_open,
1918 	.ndo_stop               = ionic_stop,
1919 	.ndo_start_xmit		= ionic_start_xmit,
1920 	.ndo_get_stats64	= ionic_get_stats64,
1921 	.ndo_set_rx_mode	= ionic_set_rx_mode,
1922 	.ndo_set_features	= ionic_set_features,
1923 	.ndo_set_mac_address	= ionic_set_mac_address,
1924 	.ndo_validate_addr	= eth_validate_addr,
1925 	.ndo_tx_timeout         = ionic_tx_timeout,
1926 	.ndo_change_mtu         = ionic_change_mtu,
1927 	.ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
1928 	.ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
1929 	.ndo_set_vf_vlan	= ionic_set_vf_vlan,
1930 	.ndo_set_vf_trust	= ionic_set_vf_trust,
1931 	.ndo_set_vf_mac		= ionic_set_vf_mac,
1932 	.ndo_set_vf_rate	= ionic_set_vf_rate,
1933 	.ndo_set_vf_spoofchk	= ionic_set_vf_spoofchk,
1934 	.ndo_get_vf_config	= ionic_get_vf_config,
1935 	.ndo_set_vf_link_state	= ionic_set_vf_link_state,
1936 	.ndo_get_vf_stats       = ionic_get_vf_stats,
1937 };
1938 
1939 int ionic_reset_queues(struct ionic_lif *lif)
1940 {
1941 	bool running;
1942 	int err = 0;
1943 
1944 	/* Put off the next watchdog timeout */
1945 	netif_trans_update(lif->netdev);
1946 
1947 	err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
1948 	if (err)
1949 		return err;
1950 
1951 	running = netif_running(lif->netdev);
1952 	if (running)
1953 		err = ionic_stop(lif->netdev);
1954 	if (!err && running)
1955 		ionic_open(lif->netdev);
1956 
1957 	clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
1958 
1959 	return err;
1960 }
1961 
1962 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
1963 {
1964 	struct device *dev = ionic->dev;
1965 	struct net_device *netdev;
1966 	struct ionic_lif *lif;
1967 	int tbl_sz;
1968 	int err;
1969 
1970 	netdev = alloc_etherdev_mqs(sizeof(*lif),
1971 				    ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
1972 	if (!netdev) {
1973 		dev_err(dev, "Cannot allocate netdev, aborting\n");
1974 		return ERR_PTR(-ENOMEM);
1975 	}
1976 
1977 	SET_NETDEV_DEV(netdev, dev);
1978 
1979 	lif = netdev_priv(netdev);
1980 	lif->netdev = netdev;
1981 	ionic->master_lif = lif;
1982 	netdev->netdev_ops = &ionic_netdev_ops;
1983 	ionic_ethtool_set_ops(netdev);
1984 
1985 	netdev->watchdog_timeo = 2 * HZ;
1986 	netif_carrier_off(netdev);
1987 
1988 	netdev->min_mtu = IONIC_MIN_MTU;
1989 	netdev->max_mtu = IONIC_MAX_MTU;
1990 
1991 	lif->neqs = ionic->neqs_per_lif;
1992 	lif->nxqs = ionic->ntxqs_per_lif;
1993 
1994 	lif->ionic = ionic;
1995 	lif->index = index;
1996 	lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
1997 	lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
1998 
1999 	/* Convert the default coalesce value to actual hw resolution */
2000 	lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
2001 	lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
2002 						    lif->rx_coalesce_usecs);
2003 
2004 	snprintf(lif->name, sizeof(lif->name), "lif%u", index);
2005 
2006 	spin_lock_init(&lif->adminq_lock);
2007 
2008 	spin_lock_init(&lif->deferred.lock);
2009 	INIT_LIST_HEAD(&lif->deferred.list);
2010 	INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2011 
2012 	/* allocate lif info */
2013 	lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2014 	lif->info = dma_alloc_coherent(dev, lif->info_sz,
2015 				       &lif->info_pa, GFP_KERNEL);
2016 	if (!lif->info) {
2017 		dev_err(dev, "Failed to allocate lif info, aborting\n");
2018 		err = -ENOMEM;
2019 		goto err_out_free_netdev;
2020 	}
2021 
2022 	ionic_debugfs_add_lif(lif);
2023 
2024 	/* allocate queues */
2025 	err = ionic_qcqs_alloc(lif);
2026 	if (err)
2027 		goto err_out_free_lif_info;
2028 
2029 	/* allocate rss indirection table */
2030 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2031 	lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2032 	lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2033 					      &lif->rss_ind_tbl_pa,
2034 					      GFP_KERNEL);
2035 
2036 	if (!lif->rss_ind_tbl) {
2037 		err = -ENOMEM;
2038 		dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2039 		goto err_out_free_qcqs;
2040 	}
2041 	netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
2042 
2043 	list_add_tail(&lif->list, &ionic->lifs);
2044 
2045 	return lif;
2046 
2047 err_out_free_qcqs:
2048 	ionic_qcqs_free(lif);
2049 err_out_free_lif_info:
2050 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2051 	lif->info = NULL;
2052 	lif->info_pa = 0;
2053 err_out_free_netdev:
2054 	free_netdev(lif->netdev);
2055 	lif = NULL;
2056 
2057 	return ERR_PTR(err);
2058 }
2059 
2060 int ionic_lifs_alloc(struct ionic *ionic)
2061 {
2062 	struct ionic_lif *lif;
2063 
2064 	INIT_LIST_HEAD(&ionic->lifs);
2065 
2066 	/* only build the first lif, others are for later features */
2067 	set_bit(0, ionic->lifbits);
2068 	lif = ionic_lif_alloc(ionic, 0);
2069 
2070 	return PTR_ERR_OR_ZERO(lif);
2071 }
2072 
2073 static void ionic_lif_reset(struct ionic_lif *lif)
2074 {
2075 	struct ionic_dev *idev = &lif->ionic->idev;
2076 
2077 	mutex_lock(&lif->ionic->dev_cmd_lock);
2078 	ionic_dev_cmd_lif_reset(idev, lif->index);
2079 	ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2080 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2081 }
2082 
2083 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2084 {
2085 	struct ionic *ionic = lif->ionic;
2086 
2087 	if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2088 		return;
2089 
2090 	dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2091 
2092 	netif_device_detach(lif->netdev);
2093 
2094 	if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2095 		dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
2096 		ionic_stop_queues(lif);
2097 	}
2098 
2099 	if (netif_running(lif->netdev)) {
2100 		ionic_txrx_deinit(lif);
2101 		ionic_txrx_free(lif);
2102 	}
2103 	ionic_lifs_deinit(ionic);
2104 	ionic_qcqs_free(lif);
2105 
2106 	dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2107 }
2108 
2109 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2110 {
2111 	struct ionic *ionic = lif->ionic;
2112 	int err;
2113 
2114 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2115 		return;
2116 
2117 	dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2118 
2119 	err = ionic_qcqs_alloc(lif);
2120 	if (err)
2121 		goto err_out;
2122 
2123 	err = ionic_lifs_init(ionic);
2124 	if (err)
2125 		goto err_qcqs_free;
2126 
2127 	if (lif->registered)
2128 		ionic_lif_set_netdev_info(lif);
2129 
2130 	ionic_rx_filter_replay(lif);
2131 
2132 	if (netif_running(lif->netdev)) {
2133 		err = ionic_txrx_alloc(lif);
2134 		if (err)
2135 			goto err_lifs_deinit;
2136 
2137 		err = ionic_txrx_init(lif);
2138 		if (err)
2139 			goto err_txrx_free;
2140 	}
2141 
2142 	clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
2143 	ionic_link_status_check_request(lif);
2144 	netif_device_attach(lif->netdev);
2145 	dev_info(ionic->dev, "FW Up: LIFs restarted\n");
2146 
2147 	return;
2148 
2149 err_txrx_free:
2150 	ionic_txrx_free(lif);
2151 err_lifs_deinit:
2152 	ionic_lifs_deinit(ionic);
2153 err_qcqs_free:
2154 	ionic_qcqs_free(lif);
2155 err_out:
2156 	dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
2157 }
2158 
2159 static void ionic_lif_free(struct ionic_lif *lif)
2160 {
2161 	struct device *dev = lif->ionic->dev;
2162 
2163 	/* free rss indirection table */
2164 	dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2165 			  lif->rss_ind_tbl_pa);
2166 	lif->rss_ind_tbl = NULL;
2167 	lif->rss_ind_tbl_pa = 0;
2168 
2169 	/* free queues */
2170 	ionic_qcqs_free(lif);
2171 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2172 		ionic_lif_reset(lif);
2173 
2174 	/* free lif info */
2175 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2176 	lif->info = NULL;
2177 	lif->info_pa = 0;
2178 
2179 	/* unmap doorbell page */
2180 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2181 	lif->kern_dbpage = NULL;
2182 	kfree(lif->dbid_inuse);
2183 	lif->dbid_inuse = NULL;
2184 
2185 	/* free netdev & lif */
2186 	ionic_debugfs_del_lif(lif);
2187 	list_del(&lif->list);
2188 	free_netdev(lif->netdev);
2189 }
2190 
2191 void ionic_lifs_free(struct ionic *ionic)
2192 {
2193 	struct list_head *cur, *tmp;
2194 	struct ionic_lif *lif;
2195 
2196 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2197 		lif = list_entry(cur, struct ionic_lif, list);
2198 
2199 		ionic_lif_free(lif);
2200 	}
2201 }
2202 
2203 static void ionic_lif_deinit(struct ionic_lif *lif)
2204 {
2205 	if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
2206 		return;
2207 
2208 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2209 		cancel_work_sync(&lif->deferred.work);
2210 		cancel_work_sync(&lif->tx_timeout_work);
2211 		ionic_rx_filters_deinit(lif);
2212 	}
2213 
2214 	if (lif->netdev->features & NETIF_F_RXHASH)
2215 		ionic_lif_rss_deinit(lif);
2216 
2217 	napi_disable(&lif->adminqcq->napi);
2218 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2219 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2220 
2221 	ionic_lif_reset(lif);
2222 }
2223 
2224 void ionic_lifs_deinit(struct ionic *ionic)
2225 {
2226 	struct list_head *cur, *tmp;
2227 	struct ionic_lif *lif;
2228 
2229 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2230 		lif = list_entry(cur, struct ionic_lif, list);
2231 		ionic_lif_deinit(lif);
2232 	}
2233 }
2234 
2235 static int ionic_lif_adminq_init(struct ionic_lif *lif)
2236 {
2237 	struct device *dev = lif->ionic->dev;
2238 	struct ionic_q_init_comp comp;
2239 	struct ionic_dev *idev;
2240 	struct ionic_qcq *qcq;
2241 	struct ionic_queue *q;
2242 	int err;
2243 
2244 	idev = &lif->ionic->idev;
2245 	qcq = lif->adminqcq;
2246 	q = &qcq->q;
2247 
2248 	mutex_lock(&lif->ionic->dev_cmd_lock);
2249 	ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2250 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2251 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2252 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2253 	if (err) {
2254 		netdev_err(lif->netdev, "adminq init failed %d\n", err);
2255 		return err;
2256 	}
2257 
2258 	q->hw_type = comp.hw_type;
2259 	q->hw_index = le32_to_cpu(comp.hw_index);
2260 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2261 
2262 	dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2263 	dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2264 
2265 	netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2266 		       NAPI_POLL_WEIGHT);
2267 
2268 	napi_enable(&qcq->napi);
2269 
2270 	if (qcq->flags & IONIC_QCQ_F_INTR)
2271 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2272 				IONIC_INTR_MASK_CLEAR);
2273 
2274 	qcq->flags |= IONIC_QCQ_F_INITED;
2275 
2276 	return 0;
2277 }
2278 
2279 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2280 {
2281 	struct ionic_qcq *qcq = lif->notifyqcq;
2282 	struct device *dev = lif->ionic->dev;
2283 	struct ionic_queue *q = &qcq->q;
2284 	int err;
2285 
2286 	struct ionic_admin_ctx ctx = {
2287 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2288 		.cmd.q_init = {
2289 			.opcode = IONIC_CMD_Q_INIT,
2290 			.lif_index = cpu_to_le16(lif->index),
2291 			.type = q->type,
2292 			.index = cpu_to_le32(q->index),
2293 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2294 					     IONIC_QINIT_F_ENA),
2295 			.intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2296 			.pid = cpu_to_le16(q->pid),
2297 			.ring_size = ilog2(q->num_descs),
2298 			.ring_base = cpu_to_le64(q->base_pa),
2299 		}
2300 	};
2301 
2302 	dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2303 	dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2304 	dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2305 	dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2306 
2307 	err = ionic_adminq_post_wait(lif, &ctx);
2308 	if (err)
2309 		return err;
2310 
2311 	lif->last_eid = 0;
2312 	q->hw_type = ctx.comp.q_init.hw_type;
2313 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2314 	q->dbval = IONIC_DBELL_QID(q->hw_index);
2315 
2316 	dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2317 	dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2318 
2319 	/* preset the callback info */
2320 	q->info[0].cb_arg = lif;
2321 
2322 	qcq->flags |= IONIC_QCQ_F_INITED;
2323 
2324 	return 0;
2325 }
2326 
2327 static int ionic_station_set(struct ionic_lif *lif)
2328 {
2329 	struct net_device *netdev = lif->netdev;
2330 	struct ionic_admin_ctx ctx = {
2331 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2332 		.cmd.lif_getattr = {
2333 			.opcode = IONIC_CMD_LIF_GETATTR,
2334 			.index = cpu_to_le16(lif->index),
2335 			.attr = IONIC_LIF_ATTR_MAC,
2336 		},
2337 	};
2338 	struct sockaddr addr;
2339 	int err;
2340 
2341 	err = ionic_adminq_post_wait(lif, &ctx);
2342 	if (err)
2343 		return err;
2344 	netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
2345 		   ctx.comp.lif_getattr.mac);
2346 	if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2347 		return 0;
2348 
2349 	if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) {
2350 		memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2351 		addr.sa_family = AF_INET;
2352 		err = eth_prepare_mac_addr_change(netdev, &addr);
2353 		if (err) {
2354 			netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
2355 				    addr.sa_data, err);
2356 			return 0;
2357 		}
2358 
2359 		if (!is_zero_ether_addr(netdev->dev_addr)) {
2360 			netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
2361 				   netdev->dev_addr);
2362 			ionic_lif_addr(lif, netdev->dev_addr, false);
2363 		}
2364 
2365 		eth_commit_mac_addr_change(netdev, &addr);
2366 	}
2367 
2368 	netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2369 		   netdev->dev_addr);
2370 	ionic_lif_addr(lif, netdev->dev_addr, true);
2371 
2372 	return 0;
2373 }
2374 
2375 static int ionic_lif_init(struct ionic_lif *lif)
2376 {
2377 	struct ionic_dev *idev = &lif->ionic->idev;
2378 	struct device *dev = lif->ionic->dev;
2379 	struct ionic_lif_init_comp comp;
2380 	int dbpage_num;
2381 	int err;
2382 
2383 	mutex_lock(&lif->ionic->dev_cmd_lock);
2384 	ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2385 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2386 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2387 	mutex_unlock(&lif->ionic->dev_cmd_lock);
2388 	if (err)
2389 		return err;
2390 
2391 	lif->hw_index = le16_to_cpu(comp.hw_index);
2392 
2393 	/* now that we have the hw_index we can figure out our doorbell page */
2394 	lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2395 	if (!lif->dbid_count) {
2396 		dev_err(dev, "No doorbell pages, aborting\n");
2397 		return -EINVAL;
2398 	}
2399 
2400 	lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2401 	if (!lif->dbid_inuse) {
2402 		dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2403 		return -ENOMEM;
2404 	}
2405 
2406 	/* first doorbell id reserved for kernel (dbid aka pid == zero) */
2407 	set_bit(0, lif->dbid_inuse);
2408 	lif->kern_pid = 0;
2409 
2410 	dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2411 	lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2412 	if (!lif->kern_dbpage) {
2413 		dev_err(dev, "Cannot map dbpage, aborting\n");
2414 		err = -ENOMEM;
2415 		goto err_out_free_dbid;
2416 	}
2417 
2418 	err = ionic_lif_adminq_init(lif);
2419 	if (err)
2420 		goto err_out_adminq_deinit;
2421 
2422 	if (lif->ionic->nnqs_per_lif) {
2423 		err = ionic_lif_notifyq_init(lif);
2424 		if (err)
2425 			goto err_out_notifyq_deinit;
2426 	}
2427 
2428 	err = ionic_init_nic_features(lif);
2429 	if (err)
2430 		goto err_out_notifyq_deinit;
2431 
2432 	if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2433 		err = ionic_rx_filters_init(lif);
2434 		if (err)
2435 			goto err_out_notifyq_deinit;
2436 	}
2437 
2438 	err = ionic_station_set(lif);
2439 	if (err)
2440 		goto err_out_notifyq_deinit;
2441 
2442 	lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2443 
2444 	set_bit(IONIC_LIF_F_INITED, lif->state);
2445 
2446 	INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2447 
2448 	return 0;
2449 
2450 err_out_notifyq_deinit:
2451 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2452 err_out_adminq_deinit:
2453 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2454 	ionic_lif_reset(lif);
2455 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2456 	lif->kern_dbpage = NULL;
2457 err_out_free_dbid:
2458 	kfree(lif->dbid_inuse);
2459 	lif->dbid_inuse = NULL;
2460 
2461 	return err;
2462 }
2463 
2464 int ionic_lifs_init(struct ionic *ionic)
2465 {
2466 	struct list_head *cur, *tmp;
2467 	struct ionic_lif *lif;
2468 	int err;
2469 
2470 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2471 		lif = list_entry(cur, struct ionic_lif, list);
2472 		err = ionic_lif_init(lif);
2473 		if (err)
2474 			return err;
2475 	}
2476 
2477 	return 0;
2478 }
2479 
2480 static void ionic_lif_notify_work(struct work_struct *ws)
2481 {
2482 }
2483 
2484 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2485 {
2486 	struct ionic_admin_ctx ctx = {
2487 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2488 		.cmd.lif_setattr = {
2489 			.opcode = IONIC_CMD_LIF_SETATTR,
2490 			.index = cpu_to_le16(lif->index),
2491 			.attr = IONIC_LIF_ATTR_NAME,
2492 		},
2493 	};
2494 
2495 	strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2496 		sizeof(ctx.cmd.lif_setattr.name));
2497 
2498 	ionic_adminq_post_wait(lif, &ctx);
2499 }
2500 
2501 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2502 {
2503 	if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2504 		return NULL;
2505 
2506 	return netdev_priv(netdev);
2507 }
2508 
2509 static int ionic_lif_notify(struct notifier_block *nb,
2510 			    unsigned long event, void *info)
2511 {
2512 	struct net_device *ndev = netdev_notifier_info_to_dev(info);
2513 	struct ionic *ionic = container_of(nb, struct ionic, nb);
2514 	struct ionic_lif *lif = ionic_netdev_lif(ndev);
2515 
2516 	if (!lif || lif->ionic != ionic)
2517 		return NOTIFY_DONE;
2518 
2519 	switch (event) {
2520 	case NETDEV_CHANGENAME:
2521 		ionic_lif_set_netdev_info(lif);
2522 		break;
2523 	}
2524 
2525 	return NOTIFY_DONE;
2526 }
2527 
2528 int ionic_lifs_register(struct ionic *ionic)
2529 {
2530 	int err;
2531 
2532 	/* the netdev is not registered on the management device, it is
2533 	 * only used as a vehicle for napi operations on the adminq
2534 	 */
2535 	if (ionic->is_mgmt_nic)
2536 		return 0;
2537 
2538 	INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
2539 
2540 	ionic->nb.notifier_call = ionic_lif_notify;
2541 
2542 	err = register_netdevice_notifier(&ionic->nb);
2543 	if (err)
2544 		ionic->nb.notifier_call = NULL;
2545 
2546 	/* only register LIF0 for now */
2547 	err = register_netdev(ionic->master_lif->netdev);
2548 	if (err) {
2549 		dev_err(ionic->dev, "Cannot register net device, aborting\n");
2550 		return err;
2551 	}
2552 
2553 	ionic_link_status_check_request(ionic->master_lif);
2554 	ionic->master_lif->registered = true;
2555 
2556 	return 0;
2557 }
2558 
2559 void ionic_lifs_unregister(struct ionic *ionic)
2560 {
2561 	if (ionic->nb.notifier_call) {
2562 		unregister_netdevice_notifier(&ionic->nb);
2563 		cancel_work_sync(&ionic->nb_work);
2564 		ionic->nb.notifier_call = NULL;
2565 	}
2566 
2567 	/* There is only one lif ever registered in the
2568 	 * current model, so don't bother searching the
2569 	 * ionic->lif for candidates to unregister
2570 	 */
2571 	if (ionic->master_lif &&
2572 	    ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
2573 		unregister_netdev(ionic->master_lif->netdev);
2574 }
2575 
2576 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2577 		       union ionic_lif_identity *lid)
2578 {
2579 	struct ionic_dev *idev = &ionic->idev;
2580 	size_t sz;
2581 	int err;
2582 
2583 	sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2584 
2585 	mutex_lock(&ionic->dev_cmd_lock);
2586 	ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2587 	err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2588 	memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2589 	mutex_unlock(&ionic->dev_cmd_lock);
2590 	if (err)
2591 		return (err);
2592 
2593 	dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2594 		le64_to_cpu(lid->capabilities));
2595 
2596 	dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2597 		le32_to_cpu(lid->eth.max_ucast_filters));
2598 	dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2599 		le32_to_cpu(lid->eth.max_mcast_filters));
2600 	dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2601 		le64_to_cpu(lid->eth.config.features));
2602 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2603 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2604 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2605 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2606 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2607 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2608 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2609 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2610 	dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2611 	dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2612 	dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2613 		le32_to_cpu(lid->eth.config.mtu));
2614 
2615 	return 0;
2616 }
2617 
2618 int ionic_lifs_size(struct ionic *ionic)
2619 {
2620 	struct ionic_identity *ident = &ionic->ident;
2621 	unsigned int nintrs, dev_nintrs;
2622 	union ionic_lif_config *lc;
2623 	unsigned int ntxqs_per_lif;
2624 	unsigned int nrxqs_per_lif;
2625 	unsigned int neqs_per_lif;
2626 	unsigned int nnqs_per_lif;
2627 	unsigned int nxqs, neqs;
2628 	unsigned int min_intrs;
2629 	int err;
2630 
2631 	lc = &ident->lif.eth.config;
2632 	dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2633 	neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2634 	nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2635 	ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2636 	nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2637 
2638 	nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2639 	nxqs = min(nxqs, num_online_cpus());
2640 	neqs = min(neqs_per_lif, num_online_cpus());
2641 
2642 try_again:
2643 	/* interrupt usage:
2644 	 *    1 for master lif adminq/notifyq
2645 	 *    1 for each CPU for master lif TxRx queue pairs
2646 	 *    whatever's left is for RDMA queues
2647 	 */
2648 	nintrs = 1 + nxqs + neqs;
2649 	min_intrs = 2;  /* adminq + 1 TxRx queue pair */
2650 
2651 	if (nintrs > dev_nintrs)
2652 		goto try_fewer;
2653 
2654 	err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2655 	if (err < 0 && err != -ENOSPC) {
2656 		dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2657 		return err;
2658 	}
2659 	if (err == -ENOSPC)
2660 		goto try_fewer;
2661 
2662 	if (err != nintrs) {
2663 		ionic_bus_free_irq_vectors(ionic);
2664 		goto try_fewer;
2665 	}
2666 
2667 	ionic->nnqs_per_lif = nnqs_per_lif;
2668 	ionic->neqs_per_lif = neqs;
2669 	ionic->ntxqs_per_lif = nxqs;
2670 	ionic->nrxqs_per_lif = nxqs;
2671 	ionic->nintrs = nintrs;
2672 
2673 	ionic_debugfs_add_sizes(ionic);
2674 
2675 	return 0;
2676 
2677 try_fewer:
2678 	if (nnqs_per_lif > 1) {
2679 		nnqs_per_lif >>= 1;
2680 		goto try_again;
2681 	}
2682 	if (neqs > 1) {
2683 		neqs >>= 1;
2684 		goto try_again;
2685 	}
2686 	if (nxqs > 1) {
2687 		nxqs >>= 1;
2688 		goto try_again;
2689 	}
2690 	dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2691 	return -ENOSPC;
2692 }
2693