xref: /openbmc/linux/net/ncsi/ncsi-manage.c (revision 48d54403)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright Gavin Shan, IBM Corporation 2016.
4  */
5 
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13 
14 #include <net/ncsi.h>
15 #include <net/net_namespace.h>
16 #include <net/sock.h>
17 #include <net/addrconf.h>
18 #include <net/ipv6.h>
19 #include <net/genetlink.h>
20 
21 #include "internal.h"
22 #include "ncsi-pkt.h"
23 #include "ncsi-netlink.h"
24 
25 LIST_HEAD(ncsi_dev_list);
26 DEFINE_SPINLOCK(ncsi_dev_lock);
27 
28 bool ncsi_channel_has_link(struct ncsi_channel *channel)
29 {
30 	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
31 }
32 
33 bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
34 			  struct ncsi_channel *channel)
35 {
36 	struct ncsi_package *np;
37 	struct ncsi_channel *nc;
38 
39 	NCSI_FOR_EACH_PACKAGE(ndp, np)
40 		NCSI_FOR_EACH_CHANNEL(np, nc) {
41 			if (nc == channel)
42 				continue;
43 			if (nc->state == NCSI_CHANNEL_ACTIVE &&
44 			    ncsi_channel_has_link(nc))
45 				return false;
46 		}
47 
48 	return true;
49 }
50 
51 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
52 {
53 	struct ncsi_dev *nd = &ndp->ndev;
54 	struct ncsi_package *np;
55 	struct ncsi_channel *nc;
56 	unsigned long flags;
57 
58 	nd->state = ncsi_dev_state_functional;
59 	if (force_down) {
60 		nd->link_up = 0;
61 		goto report;
62 	}
63 
64 	nd->link_up = 0;
65 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
66 		NCSI_FOR_EACH_CHANNEL(np, nc) {
67 			spin_lock_irqsave(&nc->lock, flags);
68 
69 			if (!list_empty(&nc->link) ||
70 			    nc->state != NCSI_CHANNEL_ACTIVE) {
71 				spin_unlock_irqrestore(&nc->lock, flags);
72 				continue;
73 			}
74 
75 			if (ncsi_channel_has_link(nc)) {
76 				spin_unlock_irqrestore(&nc->lock, flags);
77 				nd->link_up = 1;
78 				goto report;
79 			}
80 
81 			spin_unlock_irqrestore(&nc->lock, flags);
82 		}
83 	}
84 
85 report:
86 	nd->handler(nd);
87 }
88 
89 static void ncsi_channel_monitor(struct timer_list *t)
90 {
91 	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
92 	struct ncsi_package *np = nc->package;
93 	struct ncsi_dev_priv *ndp = np->ndp;
94 	struct ncsi_channel_mode *ncm;
95 	struct ncsi_cmd_arg nca;
96 	bool enabled, chained;
97 	unsigned int monitor_state;
98 	unsigned long flags;
99 	int state, ret;
100 
101 	spin_lock_irqsave(&nc->lock, flags);
102 	state = nc->state;
103 	chained = !list_empty(&nc->link);
104 	enabled = nc->monitor.enabled;
105 	monitor_state = nc->monitor.state;
106 	spin_unlock_irqrestore(&nc->lock, flags);
107 
108 	if (!enabled)
109 		return;		/* expected race disabling timer */
110 	if (WARN_ON_ONCE(chained))
111 		goto bad_state;
112 
113 	if (state != NCSI_CHANNEL_INACTIVE &&
114 	    state != NCSI_CHANNEL_ACTIVE) {
115 bad_state:
116 		netdev_warn(ndp->ndev.dev,
117 			    "Bad NCSI monitor state channel %d 0x%x %s queue\n",
118 			    nc->id, state, chained ? "on" : "off");
119 		spin_lock_irqsave(&nc->lock, flags);
120 		nc->monitor.enabled = false;
121 		spin_unlock_irqrestore(&nc->lock, flags);
122 		return;
123 	}
124 
125 	switch (monitor_state) {
126 	case NCSI_CHANNEL_MONITOR_START:
127 	case NCSI_CHANNEL_MONITOR_RETRY:
128 		nca.ndp = ndp;
129 		nca.package = np->id;
130 		nca.channel = nc->id;
131 		nca.type = NCSI_PKT_CMD_GLS;
132 		nca.req_flags = 0;
133 		ret = ncsi_xmit_cmd(&nca);
134 		if (ret)
135 			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
136 				   ret);
137 		break;
138 	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
139 		break;
140 	default:
141 		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
142 			   nc->id);
143 		ncsi_report_link(ndp, true);
144 		ndp->flags |= NCSI_DEV_RESHUFFLE;
145 
146 		ncm = &nc->modes[NCSI_MODE_LINK];
147 		spin_lock_irqsave(&nc->lock, flags);
148 		nc->monitor.enabled = false;
149 		nc->state = NCSI_CHANNEL_INVISIBLE;
150 		ncm->data[2] &= ~0x1;
151 		spin_unlock_irqrestore(&nc->lock, flags);
152 
153 		spin_lock_irqsave(&ndp->lock, flags);
154 		nc->state = NCSI_CHANNEL_ACTIVE;
155 		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
156 		spin_unlock_irqrestore(&ndp->lock, flags);
157 		ncsi_process_next_channel(ndp);
158 		return;
159 	}
160 
161 	spin_lock_irqsave(&nc->lock, flags);
162 	nc->monitor.state++;
163 	spin_unlock_irqrestore(&nc->lock, flags);
164 	mod_timer(&nc->monitor.timer, jiffies + HZ);
165 }
166 
167 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
168 {
169 	unsigned long flags;
170 
171 	spin_lock_irqsave(&nc->lock, flags);
172 	WARN_ON_ONCE(nc->monitor.enabled);
173 	nc->monitor.enabled = true;
174 	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
175 	spin_unlock_irqrestore(&nc->lock, flags);
176 
177 	mod_timer(&nc->monitor.timer, jiffies + HZ);
178 }
179 
180 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
181 {
182 	unsigned long flags;
183 
184 	spin_lock_irqsave(&nc->lock, flags);
185 	if (!nc->monitor.enabled) {
186 		spin_unlock_irqrestore(&nc->lock, flags);
187 		return;
188 	}
189 	nc->monitor.enabled = false;
190 	spin_unlock_irqrestore(&nc->lock, flags);
191 
192 	del_timer_sync(&nc->monitor.timer);
193 }
194 
195 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
196 				       unsigned char id)
197 {
198 	struct ncsi_channel *nc;
199 
200 	NCSI_FOR_EACH_CHANNEL(np, nc) {
201 		if (nc->id == id)
202 			return nc;
203 	}
204 
205 	return NULL;
206 }
207 
208 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
209 {
210 	struct ncsi_channel *nc, *tmp;
211 	int index;
212 	unsigned long flags;
213 
214 	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
215 	if (!nc)
216 		return NULL;
217 
218 	nc->id = id;
219 	nc->package = np;
220 	nc->state = NCSI_CHANNEL_INACTIVE;
221 	nc->monitor.enabled = false;
222 	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
223 	spin_lock_init(&nc->lock);
224 	INIT_LIST_HEAD(&nc->link);
225 	for (index = 0; index < NCSI_CAP_MAX; index++)
226 		nc->caps[index].index = index;
227 	for (index = 0; index < NCSI_MODE_MAX; index++)
228 		nc->modes[index].index = index;
229 
230 	spin_lock_irqsave(&np->lock, flags);
231 	tmp = ncsi_find_channel(np, id);
232 	if (tmp) {
233 		spin_unlock_irqrestore(&np->lock, flags);
234 		kfree(nc);
235 		return tmp;
236 	}
237 
238 	list_add_tail_rcu(&nc->node, &np->channels);
239 	np->channel_num++;
240 	spin_unlock_irqrestore(&np->lock, flags);
241 
242 	return nc;
243 }
244 
245 static void ncsi_remove_channel(struct ncsi_channel *nc)
246 {
247 	struct ncsi_package *np = nc->package;
248 	unsigned long flags;
249 
250 	spin_lock_irqsave(&nc->lock, flags);
251 
252 	/* Release filters */
253 	kfree(nc->mac_filter.addrs);
254 	kfree(nc->vlan_filter.vids);
255 
256 	nc->state = NCSI_CHANNEL_INACTIVE;
257 	spin_unlock_irqrestore(&nc->lock, flags);
258 	ncsi_stop_channel_monitor(nc);
259 
260 	/* Remove and free channel */
261 	spin_lock_irqsave(&np->lock, flags);
262 	list_del_rcu(&nc->node);
263 	np->channel_num--;
264 	spin_unlock_irqrestore(&np->lock, flags);
265 
266 	kfree(nc);
267 }
268 
269 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
270 				       unsigned char id)
271 {
272 	struct ncsi_package *np;
273 
274 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
275 		if (np->id == id)
276 			return np;
277 	}
278 
279 	return NULL;
280 }
281 
282 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
283 				      unsigned char id)
284 {
285 	struct ncsi_package *np, *tmp;
286 	unsigned long flags;
287 
288 	np = kzalloc(sizeof(*np), GFP_ATOMIC);
289 	if (!np)
290 		return NULL;
291 
292 	np->id = id;
293 	np->ndp = ndp;
294 	spin_lock_init(&np->lock);
295 	INIT_LIST_HEAD(&np->channels);
296 	np->channel_whitelist = UINT_MAX;
297 
298 	spin_lock_irqsave(&ndp->lock, flags);
299 	tmp = ncsi_find_package(ndp, id);
300 	if (tmp) {
301 		spin_unlock_irqrestore(&ndp->lock, flags);
302 		kfree(np);
303 		return tmp;
304 	}
305 
306 	list_add_tail_rcu(&np->node, &ndp->packages);
307 	ndp->package_num++;
308 	spin_unlock_irqrestore(&ndp->lock, flags);
309 
310 	return np;
311 }
312 
313 void ncsi_remove_package(struct ncsi_package *np)
314 {
315 	struct ncsi_dev_priv *ndp = np->ndp;
316 	struct ncsi_channel *nc, *tmp;
317 	unsigned long flags;
318 
319 	/* Release all child channels */
320 	list_for_each_entry_safe(nc, tmp, &np->channels, node)
321 		ncsi_remove_channel(nc);
322 
323 	/* Remove and free package */
324 	spin_lock_irqsave(&ndp->lock, flags);
325 	list_del_rcu(&np->node);
326 	ndp->package_num--;
327 	spin_unlock_irqrestore(&ndp->lock, flags);
328 
329 	kfree(np);
330 }
331 
332 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
333 				   unsigned char id,
334 				   struct ncsi_package **np,
335 				   struct ncsi_channel **nc)
336 {
337 	struct ncsi_package *p;
338 	struct ncsi_channel *c;
339 
340 	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
341 	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
342 
343 	if (np)
344 		*np = p;
345 	if (nc)
346 		*nc = c;
347 }
348 
349 /* For two consecutive NCSI commands, the packet IDs shouldn't
350  * be same. Otherwise, the bogus response might be replied. So
351  * the available IDs are allocated in round-robin fashion.
352  */
353 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
354 					unsigned int req_flags)
355 {
356 	struct ncsi_request *nr = NULL;
357 	int i, limit = ARRAY_SIZE(ndp->requests);
358 	unsigned long flags;
359 
360 	/* Check if there is one available request until the ceiling */
361 	spin_lock_irqsave(&ndp->lock, flags);
362 	for (i = ndp->request_id; i < limit; i++) {
363 		if (ndp->requests[i].used)
364 			continue;
365 
366 		nr = &ndp->requests[i];
367 		nr->used = true;
368 		nr->flags = req_flags;
369 		ndp->request_id = i + 1;
370 		goto found;
371 	}
372 
373 	/* Fail back to check from the starting cursor */
374 	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
375 		if (ndp->requests[i].used)
376 			continue;
377 
378 		nr = &ndp->requests[i];
379 		nr->used = true;
380 		nr->flags = req_flags;
381 		ndp->request_id = i + 1;
382 		goto found;
383 	}
384 
385 found:
386 	spin_unlock_irqrestore(&ndp->lock, flags);
387 	return nr;
388 }
389 
390 void ncsi_free_request(struct ncsi_request *nr)
391 {
392 	struct ncsi_dev_priv *ndp = nr->ndp;
393 	struct sk_buff *cmd, *rsp;
394 	unsigned long flags;
395 	bool driven;
396 
397 	if (nr->enabled) {
398 		nr->enabled = false;
399 		del_timer_sync(&nr->timer);
400 	}
401 
402 	spin_lock_irqsave(&ndp->lock, flags);
403 	cmd = nr->cmd;
404 	rsp = nr->rsp;
405 	nr->cmd = NULL;
406 	nr->rsp = NULL;
407 	nr->used = false;
408 	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
409 	spin_unlock_irqrestore(&ndp->lock, flags);
410 
411 	if (driven && cmd && --ndp->pending_req_num == 0)
412 		schedule_work(&ndp->work);
413 
414 	/* Release command and response */
415 	consume_skb(cmd);
416 	consume_skb(rsp);
417 }
418 
419 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
420 {
421 	struct ncsi_dev_priv *ndp;
422 
423 	NCSI_FOR_EACH_DEV(ndp) {
424 		if (ndp->ndev.dev == dev)
425 			return &ndp->ndev;
426 	}
427 
428 	return NULL;
429 }
430 
431 static void ncsi_request_timeout(struct timer_list *t)
432 {
433 	struct ncsi_request *nr = from_timer(nr, t, timer);
434 	struct ncsi_dev_priv *ndp = nr->ndp;
435 	struct ncsi_cmd_pkt *cmd;
436 	struct ncsi_package *np;
437 	struct ncsi_channel *nc;
438 	unsigned long flags;
439 
440 	/* If the request already had associated response,
441 	 * let the response handler to release it.
442 	 */
443 	spin_lock_irqsave(&ndp->lock, flags);
444 	nr->enabled = false;
445 	if (nr->rsp || !nr->cmd) {
446 		spin_unlock_irqrestore(&ndp->lock, flags);
447 		return;
448 	}
449 	spin_unlock_irqrestore(&ndp->lock, flags);
450 
451 	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
452 		if (nr->cmd) {
453 			/* Find the package */
454 			cmd = (struct ncsi_cmd_pkt *)
455 			      skb_network_header(nr->cmd);
456 			ncsi_find_package_and_channel(ndp,
457 						      cmd->cmd.common.channel,
458 						      &np, &nc);
459 			ncsi_send_netlink_timeout(nr, np, nc);
460 		}
461 	}
462 
463 	/* Release the request */
464 	ncsi_free_request(nr);
465 }
466 
467 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
468 {
469 	struct ncsi_dev *nd = &ndp->ndev;
470 	struct ncsi_package *np;
471 	struct ncsi_channel *nc, *tmp;
472 	struct ncsi_cmd_arg nca;
473 	unsigned long flags;
474 	int ret;
475 
476 	np = ndp->active_package;
477 	nc = ndp->active_channel;
478 	nca.ndp = ndp;
479 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
480 	switch (nd->state) {
481 	case ncsi_dev_state_suspend:
482 		nd->state = ncsi_dev_state_suspend_select;
483 		fallthrough;
484 	case ncsi_dev_state_suspend_select:
485 		ndp->pending_req_num = 1;
486 
487 		nca.type = NCSI_PKT_CMD_SP;
488 		nca.package = np->id;
489 		nca.channel = NCSI_RESERVED_CHANNEL;
490 		if (ndp->flags & NCSI_DEV_HWA)
491 			nca.bytes[0] = 0;
492 		else
493 			nca.bytes[0] = 1;
494 
495 		/* To retrieve the last link states of channels in current
496 		 * package when current active channel needs fail over to
497 		 * another one. It means we will possibly select another
498 		 * channel as next active one. The link states of channels
499 		 * are most important factor of the selection. So we need
500 		 * accurate link states. Unfortunately, the link states on
501 		 * inactive channels can't be updated with LSC AEN in time.
502 		 */
503 		if (ndp->flags & NCSI_DEV_RESHUFFLE)
504 			nd->state = ncsi_dev_state_suspend_gls;
505 		else
506 			nd->state = ncsi_dev_state_suspend_dcnt;
507 		ret = ncsi_xmit_cmd(&nca);
508 		if (ret)
509 			goto error;
510 
511 		break;
512 	case ncsi_dev_state_suspend_gls:
513 		ndp->pending_req_num = np->channel_num;
514 
515 		nca.type = NCSI_PKT_CMD_GLS;
516 		nca.package = np->id;
517 
518 		nd->state = ncsi_dev_state_suspend_dcnt;
519 		NCSI_FOR_EACH_CHANNEL(np, nc) {
520 			nca.channel = nc->id;
521 			ret = ncsi_xmit_cmd(&nca);
522 			if (ret)
523 				goto error;
524 		}
525 
526 		break;
527 	case ncsi_dev_state_suspend_dcnt:
528 		ndp->pending_req_num = 1;
529 
530 		nca.type = NCSI_PKT_CMD_DCNT;
531 		nca.package = np->id;
532 		nca.channel = nc->id;
533 
534 		nd->state = ncsi_dev_state_suspend_dc;
535 		ret = ncsi_xmit_cmd(&nca);
536 		if (ret)
537 			goto error;
538 
539 		break;
540 	case ncsi_dev_state_suspend_dc:
541 		ndp->pending_req_num = 1;
542 
543 		nca.type = NCSI_PKT_CMD_DC;
544 		nca.package = np->id;
545 		nca.channel = nc->id;
546 		nca.bytes[0] = 1;
547 
548 		nd->state = ncsi_dev_state_suspend_deselect;
549 		ret = ncsi_xmit_cmd(&nca);
550 		if (ret)
551 			goto error;
552 
553 		NCSI_FOR_EACH_CHANNEL(np, tmp) {
554 			/* If there is another channel active on this package
555 			 * do not deselect the package.
556 			 */
557 			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
558 				nd->state = ncsi_dev_state_suspend_done;
559 				break;
560 			}
561 		}
562 		break;
563 	case ncsi_dev_state_suspend_deselect:
564 		ndp->pending_req_num = 1;
565 
566 		nca.type = NCSI_PKT_CMD_DP;
567 		nca.package = np->id;
568 		nca.channel = NCSI_RESERVED_CHANNEL;
569 
570 		nd->state = ncsi_dev_state_suspend_done;
571 		ret = ncsi_xmit_cmd(&nca);
572 		if (ret)
573 			goto error;
574 
575 		break;
576 	case ncsi_dev_state_suspend_done:
577 		spin_lock_irqsave(&nc->lock, flags);
578 		nc->state = NCSI_CHANNEL_INACTIVE;
579 		spin_unlock_irqrestore(&nc->lock, flags);
580 		if (ndp->flags & NCSI_DEV_RESET)
581 			ncsi_reset_dev(nd);
582 		else
583 			ncsi_process_next_channel(ndp);
584 		break;
585 	default:
586 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
587 			    nd->state);
588 	}
589 
590 	return;
591 error:
592 	nd->state = ncsi_dev_state_functional;
593 }
594 
595 /* Check the VLAN filter bitmap for a set filter, and construct a
596  * "Set VLAN Filter - Disable" packet if found.
597  */
598 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
599 			 struct ncsi_cmd_arg *nca)
600 {
601 	struct ncsi_channel_vlan_filter *ncf;
602 	unsigned long flags;
603 	void *bitmap;
604 	int index;
605 	u16 vid;
606 
607 	ncf = &nc->vlan_filter;
608 	bitmap = &ncf->bitmap;
609 
610 	spin_lock_irqsave(&nc->lock, flags);
611 	index = find_next_bit(bitmap, ncf->n_vids, 0);
612 	if (index >= ncf->n_vids) {
613 		spin_unlock_irqrestore(&nc->lock, flags);
614 		return -1;
615 	}
616 	vid = ncf->vids[index];
617 
618 	clear_bit(index, bitmap);
619 	ncf->vids[index] = 0;
620 	spin_unlock_irqrestore(&nc->lock, flags);
621 
622 	nca->type = NCSI_PKT_CMD_SVF;
623 	nca->words[1] = vid;
624 	/* HW filter index starts at 1 */
625 	nca->bytes[6] = index + 1;
626 	nca->bytes[7] = 0x00;
627 	return 0;
628 }
629 
630 /* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
631  * packet.
632  */
633 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
634 		       struct ncsi_cmd_arg *nca)
635 {
636 	struct ncsi_channel_vlan_filter *ncf;
637 	struct vlan_vid *vlan = NULL;
638 	unsigned long flags;
639 	int i, index;
640 	void *bitmap;
641 	u16 vid;
642 
643 	if (list_empty(&ndp->vlan_vids))
644 		return -1;
645 
646 	ncf = &nc->vlan_filter;
647 	bitmap = &ncf->bitmap;
648 
649 	spin_lock_irqsave(&nc->lock, flags);
650 
651 	rcu_read_lock();
652 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
653 		vid = vlan->vid;
654 		for (i = 0; i < ncf->n_vids; i++)
655 			if (ncf->vids[i] == vid) {
656 				vid = 0;
657 				break;
658 			}
659 		if (vid)
660 			break;
661 	}
662 	rcu_read_unlock();
663 
664 	if (!vid) {
665 		/* No VLAN ID is not set */
666 		spin_unlock_irqrestore(&nc->lock, flags);
667 		return -1;
668 	}
669 
670 	index = find_next_zero_bit(bitmap, ncf->n_vids, 0);
671 	if (index < 0 || index >= ncf->n_vids) {
672 		netdev_err(ndp->ndev.dev,
673 			   "Channel %u already has all VLAN filters set\n",
674 			   nc->id);
675 		spin_unlock_irqrestore(&nc->lock, flags);
676 		return -1;
677 	}
678 
679 	ncf->vids[index] = vid;
680 	set_bit(index, bitmap);
681 	spin_unlock_irqrestore(&nc->lock, flags);
682 
683 	nca->type = NCSI_PKT_CMD_SVF;
684 	nca->words[1] = vid;
685 	/* HW filter index starts at 1 */
686 	nca->bytes[6] = index + 1;
687 	nca->bytes[7] = 0x01;
688 
689 	return 0;
690 }
691 
692 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
693 
694 static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
695 {
696 	unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
697 	int ret = 0;
698 
699 	nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
700 
701 	memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
702 	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
703 
704 	data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
705 
706 	/* PHY Link up attribute */
707 	data[6] = 0x1;
708 
709 	nca->data = data;
710 
711 	ret = ncsi_xmit_cmd(nca);
712 	if (ret)
713 		netdev_err(nca->ndp->ndev.dev,
714 			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
715 			   nca->type);
716 	return ret;
717 }
718 
719 #endif
720 
721 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
722 
723 /* NCSI OEM Command APIs */
724 static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
725 {
726 	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
727 	int ret = 0;
728 
729 	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
730 
731 	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
732 	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
733 	data[5] = NCSI_OEM_BCM_CMD_GMA;
734 
735 	nca->data = data;
736 
737 	ret = ncsi_xmit_cmd(nca);
738 	if (ret)
739 		netdev_err(nca->ndp->ndev.dev,
740 			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
741 			   nca->type);
742 	return ret;
743 }
744 
745 static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
746 {
747 	union {
748 		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
749 		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
750 	} u;
751 	int ret = 0;
752 
753 	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
754 
755 	memset(&u, 0, sizeof(u));
756 	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
757 	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
758 	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
759 
760 	nca->data = u.data_u8;
761 
762 	ret = ncsi_xmit_cmd(nca);
763 	if (ret)
764 		netdev_err(nca->ndp->ndev.dev,
765 			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
766 			   nca->type);
767 	return ret;
768 }
769 
770 static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
771 {
772 	union {
773 		u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
774 		u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
775 	} u;
776 	int ret = 0;
777 
778 	memset(&u, 0, sizeof(u));
779 	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
780 	u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
781 	u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
782 	memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
783 	       nca->ndp->ndev.dev->dev_addr,	ETH_ALEN);
784 	u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
785 		(MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
786 
787 	nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
788 	nca->data = u.data_u8;
789 
790 	ret = ncsi_xmit_cmd(nca);
791 	if (ret)
792 		netdev_err(nca->ndp->ndev.dev,
793 			   "NCSI: Failed to transmit cmd 0x%x during probe\n",
794 			   nca->type);
795 	return ret;
796 }
797 
798 /* OEM Command handlers initialization */
799 static struct ncsi_oem_gma_handler {
800 	unsigned int	mfr_id;
801 	int		(*handler)(struct ncsi_cmd_arg *nca);
802 } ncsi_oem_gma_handlers[] = {
803 	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
804 	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx }
805 };
806 
807 static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
808 {
809 	struct ncsi_oem_gma_handler *nch = NULL;
810 	int i;
811 
812 	/* This function should only be called once, return if flag set */
813 	if (nca->ndp->gma_flag == 1)
814 		return -1;
815 
816 	/* Find gma handler for given manufacturer id */
817 	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
818 		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
819 			if (ncsi_oem_gma_handlers[i].handler)
820 				nch = &ncsi_oem_gma_handlers[i];
821 			break;
822 			}
823 	}
824 
825 	if (!nch) {
826 		netdev_err(nca->ndp->ndev.dev,
827 			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
828 			   mf_id);
829 		return -1;
830 	}
831 
832 	/* Get Mac address from NCSI device */
833 	return nch->handler(nca);
834 }
835 
836 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
837 
838 /* Determine if a given channel from the channel_queue should be used for Tx */
839 static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
840 			       struct ncsi_channel *nc)
841 {
842 	struct ncsi_channel_mode *ncm;
843 	struct ncsi_channel *channel;
844 	struct ncsi_package *np;
845 
846 	/* Check if any other channel has Tx enabled; a channel may have already
847 	 * been configured and removed from the channel queue.
848 	 */
849 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
850 		if (!ndp->multi_package && np != nc->package)
851 			continue;
852 		NCSI_FOR_EACH_CHANNEL(np, channel) {
853 			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
854 			if (ncm->enable)
855 				return false;
856 		}
857 	}
858 
859 	/* This channel is the preferred channel and has link */
860 	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
861 		np = channel->package;
862 		if (np->preferred_channel &&
863 		    ncsi_channel_has_link(np->preferred_channel)) {
864 			return np->preferred_channel == nc;
865 		}
866 	}
867 
868 	/* This channel has link */
869 	if (ncsi_channel_has_link(nc))
870 		return true;
871 
872 	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
873 		if (ncsi_channel_has_link(channel))
874 			return false;
875 
876 	/* No other channel has link; default to this one */
877 	return true;
878 }
879 
880 /* Change the active Tx channel in a multi-channel setup */
881 int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
882 			   struct ncsi_package *package,
883 			   struct ncsi_channel *disable,
884 			   struct ncsi_channel *enable)
885 {
886 	struct ncsi_cmd_arg nca;
887 	struct ncsi_channel *nc;
888 	struct ncsi_package *np;
889 	int ret = 0;
890 
891 	if (!package->multi_channel && !ndp->multi_package)
892 		netdev_warn(ndp->ndev.dev,
893 			    "NCSI: Trying to update Tx channel in single-channel mode\n");
894 	nca.ndp = ndp;
895 	nca.req_flags = 0;
896 
897 	/* Find current channel with Tx enabled */
898 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
899 		if (disable)
900 			break;
901 		if (!ndp->multi_package && np != package)
902 			continue;
903 
904 		NCSI_FOR_EACH_CHANNEL(np, nc)
905 			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
906 				disable = nc;
907 				break;
908 			}
909 	}
910 
911 	/* Find a suitable channel for Tx */
912 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
913 		if (enable)
914 			break;
915 		if (!ndp->multi_package && np != package)
916 			continue;
917 		if (!(ndp->package_whitelist & (0x1 << np->id)))
918 			continue;
919 
920 		if (np->preferred_channel &&
921 		    ncsi_channel_has_link(np->preferred_channel)) {
922 			enable = np->preferred_channel;
923 			break;
924 		}
925 
926 		NCSI_FOR_EACH_CHANNEL(np, nc) {
927 			if (!(np->channel_whitelist & 0x1 << nc->id))
928 				continue;
929 			if (nc->state != NCSI_CHANNEL_ACTIVE)
930 				continue;
931 			if (ncsi_channel_has_link(nc)) {
932 				enable = nc;
933 				break;
934 			}
935 		}
936 	}
937 
938 	if (disable == enable)
939 		return -1;
940 
941 	if (!enable)
942 		return -1;
943 
944 	if (disable) {
945 		nca.channel = disable->id;
946 		nca.package = disable->package->id;
947 		nca.type = NCSI_PKT_CMD_DCNT;
948 		ret = ncsi_xmit_cmd(&nca);
949 		if (ret)
950 			netdev_err(ndp->ndev.dev,
951 				   "Error %d sending DCNT\n",
952 				   ret);
953 	}
954 
955 	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
956 
957 	nca.channel = enable->id;
958 	nca.package = enable->package->id;
959 	nca.type = NCSI_PKT_CMD_ECNT;
960 	ret = ncsi_xmit_cmd(&nca);
961 	if (ret)
962 		netdev_err(ndp->ndev.dev,
963 			   "Error %d sending ECNT\n",
964 			   ret);
965 
966 	return ret;
967 }
968 
969 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
970 {
971 	struct ncsi_package *np = ndp->active_package;
972 	struct ncsi_channel *nc = ndp->active_channel;
973 	struct ncsi_channel *hot_nc = NULL;
974 	struct ncsi_dev *nd = &ndp->ndev;
975 	struct net_device *dev = nd->dev;
976 	struct ncsi_cmd_arg nca;
977 	unsigned char index;
978 	unsigned long flags;
979 	int ret;
980 
981 	nca.ndp = ndp;
982 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
983 	switch (nd->state) {
984 	case ncsi_dev_state_config:
985 	case ncsi_dev_state_config_sp:
986 		ndp->pending_req_num = 1;
987 
988 		/* Select the specific package */
989 		nca.type = NCSI_PKT_CMD_SP;
990 		if (ndp->flags & NCSI_DEV_HWA)
991 			nca.bytes[0] = 0;
992 		else
993 			nca.bytes[0] = 1;
994 		nca.package = np->id;
995 		nca.channel = NCSI_RESERVED_CHANNEL;
996 		ret = ncsi_xmit_cmd(&nca);
997 		if (ret) {
998 			netdev_err(ndp->ndev.dev,
999 				   "NCSI: Failed to transmit CMD_SP\n");
1000 			goto error;
1001 		}
1002 
1003 		nd->state = ncsi_dev_state_config_cis;
1004 		break;
1005 	case ncsi_dev_state_config_cis:
1006 		ndp->pending_req_num = 1;
1007 
1008 		/* Clear initial state */
1009 		nca.type = NCSI_PKT_CMD_CIS;
1010 		nca.package = np->id;
1011 		nca.channel = nc->id;
1012 		ret = ncsi_xmit_cmd(&nca);
1013 		if (ret) {
1014 			netdev_err(ndp->ndev.dev,
1015 				   "NCSI: Failed to transmit CMD_CIS\n");
1016 			goto error;
1017 		}
1018 
1019 		nd->state = ncsi_dev_state_config_oem_gma;
1020 		break;
1021 	case ncsi_dev_state_config_oem_gma:
1022 		nd->state = ncsi_dev_state_config_clear_vids;
1023 		ret = -1;
1024 
1025 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
1026 		nca.type = NCSI_PKT_CMD_OEM;
1027 		nca.package = np->id;
1028 		nca.channel = nc->id;
1029 		ndp->pending_req_num = 1;
1030 		ret = ncsi_gma_handler(&nca, nc->version.mf_id);
1031 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
1032 
1033 		if (ret < 0)
1034 			schedule_work(&ndp->work);
1035 
1036 		break;
1037 	case ncsi_dev_state_config_clear_vids:
1038 	case ncsi_dev_state_config_svf:
1039 	case ncsi_dev_state_config_ev:
1040 	case ncsi_dev_state_config_sma:
1041 	case ncsi_dev_state_config_ebf:
1042 	case ncsi_dev_state_config_dgmf:
1043 	case ncsi_dev_state_config_ecnt:
1044 	case ncsi_dev_state_config_ec:
1045 	case ncsi_dev_state_config_ae:
1046 	case ncsi_dev_state_config_gls:
1047 		ndp->pending_req_num = 1;
1048 
1049 		nca.package = np->id;
1050 		nca.channel = nc->id;
1051 
1052 		/* Clear any active filters on the channel before setting */
1053 		if (nd->state == ncsi_dev_state_config_clear_vids) {
1054 			ret = clear_one_vid(ndp, nc, &nca);
1055 			if (ret) {
1056 				nd->state = ncsi_dev_state_config_svf;
1057 				schedule_work(&ndp->work);
1058 				break;
1059 			}
1060 			/* Repeat */
1061 			nd->state = ncsi_dev_state_config_clear_vids;
1062 		/* Add known VLAN tags to the filter */
1063 		} else if (nd->state == ncsi_dev_state_config_svf) {
1064 			ret = set_one_vid(ndp, nc, &nca);
1065 			if (ret) {
1066 				nd->state = ncsi_dev_state_config_ev;
1067 				schedule_work(&ndp->work);
1068 				break;
1069 			}
1070 			/* Repeat */
1071 			nd->state = ncsi_dev_state_config_svf;
1072 		/* Enable/Disable the VLAN filter */
1073 		} else if (nd->state == ncsi_dev_state_config_ev) {
1074 			if (list_empty(&ndp->vlan_vids)) {
1075 				nca.type = NCSI_PKT_CMD_DV;
1076 			} else {
1077 				nca.type = NCSI_PKT_CMD_EV;
1078 				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1079 			}
1080 			nd->state = ncsi_dev_state_config_sma;
1081 		} else if (nd->state == ncsi_dev_state_config_sma) {
1082 		/* Use first entry in unicast filter table. Note that
1083 		 * the MAC filter table starts from entry 1 instead of
1084 		 * 0.
1085 		 */
1086 			nca.type = NCSI_PKT_CMD_SMA;
1087 			for (index = 0; index < 6; index++)
1088 				nca.bytes[index] = dev->dev_addr[index];
1089 			nca.bytes[6] = 0x1;
1090 			nca.bytes[7] = 0x1;
1091 			nd->state = ncsi_dev_state_config_ebf;
1092 		} else if (nd->state == ncsi_dev_state_config_ebf) {
1093 			nca.type = NCSI_PKT_CMD_EBF;
1094 			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1095 			/* if multicast global filtering is supported then
1096 			 * disable it so that all multicast packet will be
1097 			 * forwarded to management controller
1098 			 */
1099 			if (nc->caps[NCSI_CAP_GENERIC].cap &
1100 			    NCSI_CAP_GENERIC_MC)
1101 				nd->state = ncsi_dev_state_config_dgmf;
1102 			else if (ncsi_channel_is_tx(ndp, nc))
1103 				nd->state = ncsi_dev_state_config_ecnt;
1104 			else
1105 				nd->state = ncsi_dev_state_config_ec;
1106 		} else if (nd->state == ncsi_dev_state_config_dgmf) {
1107 			nca.type = NCSI_PKT_CMD_DGMF;
1108 			if (ncsi_channel_is_tx(ndp, nc))
1109 				nd->state = ncsi_dev_state_config_ecnt;
1110 			else
1111 				nd->state = ncsi_dev_state_config_ec;
1112 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1113 			if (np->preferred_channel &&
1114 			    nc != np->preferred_channel)
1115 				netdev_info(ndp->ndev.dev,
1116 					    "NCSI: Tx failed over to channel %u\n",
1117 					    nc->id);
1118 			nca.type = NCSI_PKT_CMD_ECNT;
1119 			nd->state = ncsi_dev_state_config_ec;
1120 		} else if (nd->state == ncsi_dev_state_config_ec) {
1121 			/* Enable AEN if it's supported */
1122 			nca.type = NCSI_PKT_CMD_EC;
1123 			nd->state = ncsi_dev_state_config_ae;
1124 			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1125 				nd->state = ncsi_dev_state_config_gls;
1126 		} else if (nd->state == ncsi_dev_state_config_ae) {
1127 			nca.type = NCSI_PKT_CMD_AE;
1128 			nca.bytes[0] = 0;
1129 			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1130 			nd->state = ncsi_dev_state_config_gls;
1131 		} else if (nd->state == ncsi_dev_state_config_gls) {
1132 			nca.type = NCSI_PKT_CMD_GLS;
1133 			nd->state = ncsi_dev_state_config_done;
1134 		}
1135 
1136 		ret = ncsi_xmit_cmd(&nca);
1137 		if (ret) {
1138 			netdev_err(ndp->ndev.dev,
1139 				   "NCSI: Failed to transmit CMD %x\n",
1140 				   nca.type);
1141 			goto error;
1142 		}
1143 		break;
1144 	case ncsi_dev_state_config_done:
1145 		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1146 			   nc->id);
1147 		spin_lock_irqsave(&nc->lock, flags);
1148 		nc->state = NCSI_CHANNEL_ACTIVE;
1149 
1150 		if (ndp->flags & NCSI_DEV_RESET) {
1151 			/* A reset event happened during config, start it now */
1152 			nc->reconfigure_needed = false;
1153 			spin_unlock_irqrestore(&nc->lock, flags);
1154 			ncsi_reset_dev(nd);
1155 			break;
1156 		}
1157 
1158 		if (nc->reconfigure_needed) {
1159 			/* This channel's configuration has been updated
1160 			 * part-way during the config state - start the
1161 			 * channel configuration over
1162 			 */
1163 			nc->reconfigure_needed = false;
1164 			nc->state = NCSI_CHANNEL_INACTIVE;
1165 			spin_unlock_irqrestore(&nc->lock, flags);
1166 
1167 			spin_lock_irqsave(&ndp->lock, flags);
1168 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1169 			spin_unlock_irqrestore(&ndp->lock, flags);
1170 
1171 			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1172 			ncsi_process_next_channel(ndp);
1173 			break;
1174 		}
1175 
1176 		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1177 			hot_nc = nc;
1178 		} else {
1179 			hot_nc = NULL;
1180 			netdev_dbg(ndp->ndev.dev,
1181 				   "NCSI: channel %u link down after config\n",
1182 				   nc->id);
1183 		}
1184 		spin_unlock_irqrestore(&nc->lock, flags);
1185 
1186 		/* Update the hot channel */
1187 		spin_lock_irqsave(&ndp->lock, flags);
1188 		ndp->hot_channel = hot_nc;
1189 		spin_unlock_irqrestore(&ndp->lock, flags);
1190 
1191 		ncsi_start_channel_monitor(nc);
1192 		ncsi_process_next_channel(ndp);
1193 		break;
1194 	default:
1195 		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1196 			     nd->state);
1197 	}
1198 
1199 	return;
1200 
1201 error:
1202 	ncsi_report_link(ndp, true);
1203 }
1204 
1205 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1206 {
1207 	struct ncsi_channel *nc, *found, *hot_nc;
1208 	struct ncsi_channel_mode *ncm;
1209 	unsigned long flags, cflags;
1210 	struct ncsi_package *np;
1211 	bool with_link;
1212 
1213 	spin_lock_irqsave(&ndp->lock, flags);
1214 	hot_nc = ndp->hot_channel;
1215 	spin_unlock_irqrestore(&ndp->lock, flags);
1216 
1217 	/* By default the search is done once an inactive channel with up
1218 	 * link is found, unless a preferred channel is set.
1219 	 * If multi_package or multi_channel are configured all channels in the
1220 	 * whitelist are added to the channel queue.
1221 	 */
1222 	found = NULL;
1223 	with_link = false;
1224 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1225 		if (!(ndp->package_whitelist & (0x1 << np->id)))
1226 			continue;
1227 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1228 			if (!(np->channel_whitelist & (0x1 << nc->id)))
1229 				continue;
1230 
1231 			spin_lock_irqsave(&nc->lock, cflags);
1232 
1233 			if (!list_empty(&nc->link) ||
1234 			    nc->state != NCSI_CHANNEL_INACTIVE) {
1235 				spin_unlock_irqrestore(&nc->lock, cflags);
1236 				continue;
1237 			}
1238 
1239 			if (!found)
1240 				found = nc;
1241 
1242 			if (nc == hot_nc)
1243 				found = nc;
1244 
1245 			ncm = &nc->modes[NCSI_MODE_LINK];
1246 			if (ncm->data[2] & 0x1) {
1247 				found = nc;
1248 				with_link = true;
1249 			}
1250 
1251 			/* If multi_channel is enabled configure all valid
1252 			 * channels whether or not they currently have link
1253 			 * so they will have AENs enabled.
1254 			 */
1255 			if (with_link || np->multi_channel) {
1256 				spin_lock_irqsave(&ndp->lock, flags);
1257 				list_add_tail_rcu(&nc->link,
1258 						  &ndp->channel_queue);
1259 				spin_unlock_irqrestore(&ndp->lock, flags);
1260 
1261 				netdev_dbg(ndp->ndev.dev,
1262 					   "NCSI: Channel %u added to queue (link %s)\n",
1263 					   nc->id,
1264 					   ncm->data[2] & 0x1 ? "up" : "down");
1265 			}
1266 
1267 			spin_unlock_irqrestore(&nc->lock, cflags);
1268 
1269 			if (with_link && !np->multi_channel)
1270 				break;
1271 		}
1272 		if (with_link && !ndp->multi_package)
1273 			break;
1274 	}
1275 
1276 	if (list_empty(&ndp->channel_queue) && found) {
1277 		netdev_info(ndp->ndev.dev,
1278 			    "NCSI: No channel with link found, configuring channel %u\n",
1279 			    found->id);
1280 		spin_lock_irqsave(&ndp->lock, flags);
1281 		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1282 		spin_unlock_irqrestore(&ndp->lock, flags);
1283 	} else if (!found) {
1284 		netdev_warn(ndp->ndev.dev,
1285 			    "NCSI: No channel found to configure!\n");
1286 		ncsi_report_link(ndp, true);
1287 		return -ENODEV;
1288 	}
1289 
1290 	return ncsi_process_next_channel(ndp);
1291 }
1292 
1293 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1294 {
1295 	struct ncsi_package *np;
1296 	struct ncsi_channel *nc;
1297 	unsigned int cap;
1298 	bool has_channel = false;
1299 
1300 	/* The hardware arbitration is disabled if any one channel
1301 	 * doesn't support explicitly.
1302 	 */
1303 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1304 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1305 			has_channel = true;
1306 
1307 			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1308 			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1309 			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1310 			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1311 				ndp->flags &= ~NCSI_DEV_HWA;
1312 				return false;
1313 			}
1314 		}
1315 	}
1316 
1317 	if (has_channel) {
1318 		ndp->flags |= NCSI_DEV_HWA;
1319 		return true;
1320 	}
1321 
1322 	ndp->flags &= ~NCSI_DEV_HWA;
1323 	return false;
1324 }
1325 
1326 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1327 {
1328 	struct ncsi_dev *nd = &ndp->ndev;
1329 	struct ncsi_package *np;
1330 	struct ncsi_channel *nc;
1331 	struct ncsi_cmd_arg nca;
1332 	unsigned char index;
1333 	int ret;
1334 
1335 	nca.ndp = ndp;
1336 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1337 	switch (nd->state) {
1338 	case ncsi_dev_state_probe:
1339 		nd->state = ncsi_dev_state_probe_deselect;
1340 		fallthrough;
1341 	case ncsi_dev_state_probe_deselect:
1342 		ndp->pending_req_num = 8;
1343 
1344 		/* Deselect all possible packages */
1345 		nca.type = NCSI_PKT_CMD_DP;
1346 		nca.channel = NCSI_RESERVED_CHANNEL;
1347 		for (index = 0; index < 8; index++) {
1348 			nca.package = index;
1349 			ret = ncsi_xmit_cmd(&nca);
1350 			if (ret)
1351 				goto error;
1352 		}
1353 
1354 		nd->state = ncsi_dev_state_probe_package;
1355 		break;
1356 	case ncsi_dev_state_probe_package:
1357 		ndp->pending_req_num = 1;
1358 
1359 		nca.type = NCSI_PKT_CMD_SP;
1360 		nca.bytes[0] = 1;
1361 		nca.package = ndp->package_probe_id;
1362 		nca.channel = NCSI_RESERVED_CHANNEL;
1363 		ret = ncsi_xmit_cmd(&nca);
1364 		if (ret)
1365 			goto error;
1366 		nd->state = ncsi_dev_state_probe_channel;
1367 		break;
1368 	case ncsi_dev_state_probe_channel:
1369 		ndp->active_package = ncsi_find_package(ndp,
1370 							ndp->package_probe_id);
1371 		if (!ndp->active_package) {
1372 			/* No response */
1373 			nd->state = ncsi_dev_state_probe_dp;
1374 			schedule_work(&ndp->work);
1375 			break;
1376 		}
1377 		nd->state = ncsi_dev_state_probe_cis;
1378 		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
1379 		    ndp->mlx_multi_host)
1380 			nd->state = ncsi_dev_state_probe_mlx_gma;
1381 
1382 		schedule_work(&ndp->work);
1383 		break;
1384 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
1385 	case ncsi_dev_state_probe_mlx_gma:
1386 		ndp->pending_req_num = 1;
1387 
1388 		nca.type = NCSI_PKT_CMD_OEM;
1389 		nca.package = ndp->active_package->id;
1390 		nca.channel = 0;
1391 		ret = ncsi_oem_gma_handler_mlx(&nca);
1392 		if (ret)
1393 			goto error;
1394 
1395 		nd->state = ncsi_dev_state_probe_mlx_smaf;
1396 		break;
1397 	case ncsi_dev_state_probe_mlx_smaf:
1398 		ndp->pending_req_num = 1;
1399 
1400 		nca.type = NCSI_PKT_CMD_OEM;
1401 		nca.package = ndp->active_package->id;
1402 		nca.channel = 0;
1403 		ret = ncsi_oem_smaf_mlx(&nca);
1404 		if (ret)
1405 			goto error;
1406 
1407 		nd->state = ncsi_dev_state_probe_cis;
1408 		break;
1409 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
1410 	case ncsi_dev_state_probe_cis:
1411 		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1412 
1413 		/* Clear initial state */
1414 		nca.type = NCSI_PKT_CMD_CIS;
1415 		nca.package = ndp->active_package->id;
1416 		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1417 			nca.channel = index;
1418 			ret = ncsi_xmit_cmd(&nca);
1419 			if (ret)
1420 				goto error;
1421 		}
1422 
1423 		nd->state = ncsi_dev_state_probe_gvi;
1424 		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
1425 			nd->state = ncsi_dev_state_probe_keep_phy;
1426 		break;
1427 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)
1428 	case ncsi_dev_state_probe_keep_phy:
1429 		ndp->pending_req_num = 1;
1430 
1431 		nca.type = NCSI_PKT_CMD_OEM;
1432 		nca.package = ndp->active_package->id;
1433 		nca.channel = 0;
1434 		ret = ncsi_oem_keep_phy_intel(&nca);
1435 		if (ret)
1436 			goto error;
1437 
1438 		nd->state = ncsi_dev_state_probe_gvi;
1439 		break;
1440 #endif /* CONFIG_NCSI_OEM_CMD_KEEP_PHY */
1441 	case ncsi_dev_state_probe_gvi:
1442 	case ncsi_dev_state_probe_gc:
1443 	case ncsi_dev_state_probe_gls:
1444 		np = ndp->active_package;
1445 		ndp->pending_req_num = np->channel_num;
1446 
1447 		/* Retrieve version, capability or link status */
1448 		if (nd->state == ncsi_dev_state_probe_gvi)
1449 			nca.type = NCSI_PKT_CMD_GVI;
1450 		else if (nd->state == ncsi_dev_state_probe_gc)
1451 			nca.type = NCSI_PKT_CMD_GC;
1452 		else
1453 			nca.type = NCSI_PKT_CMD_GLS;
1454 
1455 		nca.package = np->id;
1456 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1457 			nca.channel = nc->id;
1458 			ret = ncsi_xmit_cmd(&nca);
1459 			if (ret)
1460 				goto error;
1461 		}
1462 
1463 		if (nd->state == ncsi_dev_state_probe_gvi)
1464 			nd->state = ncsi_dev_state_probe_gc;
1465 		else if (nd->state == ncsi_dev_state_probe_gc)
1466 			nd->state = ncsi_dev_state_probe_gls;
1467 		else
1468 			nd->state = ncsi_dev_state_probe_dp;
1469 		break;
1470 	case ncsi_dev_state_probe_dp:
1471 		ndp->pending_req_num = 1;
1472 
1473 		/* Deselect the current package */
1474 		nca.type = NCSI_PKT_CMD_DP;
1475 		nca.package = ndp->package_probe_id;
1476 		nca.channel = NCSI_RESERVED_CHANNEL;
1477 		ret = ncsi_xmit_cmd(&nca);
1478 		if (ret)
1479 			goto error;
1480 
1481 		/* Probe next package */
1482 		ndp->package_probe_id++;
1483 		if (ndp->package_probe_id >= 8) {
1484 			/* Probe finished */
1485 			ndp->flags |= NCSI_DEV_PROBED;
1486 			break;
1487 		}
1488 		nd->state = ncsi_dev_state_probe_package;
1489 		ndp->active_package = NULL;
1490 		break;
1491 	default:
1492 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1493 			    nd->state);
1494 	}
1495 
1496 	if (ndp->flags & NCSI_DEV_PROBED) {
1497 		/* Check if all packages have HWA support */
1498 		ncsi_check_hwa(ndp);
1499 		ncsi_choose_active_channel(ndp);
1500 	}
1501 
1502 	return;
1503 error:
1504 	netdev_err(ndp->ndev.dev,
1505 		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1506 		   nca.type);
1507 	ncsi_report_link(ndp, true);
1508 }
1509 
1510 static void ncsi_dev_work(struct work_struct *work)
1511 {
1512 	struct ncsi_dev_priv *ndp = container_of(work,
1513 			struct ncsi_dev_priv, work);
1514 	struct ncsi_dev *nd = &ndp->ndev;
1515 
1516 	switch (nd->state & ncsi_dev_state_major) {
1517 	case ncsi_dev_state_probe:
1518 		ncsi_probe_channel(ndp);
1519 		break;
1520 	case ncsi_dev_state_suspend:
1521 		ncsi_suspend_channel(ndp);
1522 		break;
1523 	case ncsi_dev_state_config:
1524 		ncsi_configure_channel(ndp);
1525 		break;
1526 	default:
1527 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1528 			    nd->state);
1529 	}
1530 }
1531 
1532 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1533 {
1534 	struct ncsi_channel *nc;
1535 	int old_state;
1536 	unsigned long flags;
1537 
1538 	spin_lock_irqsave(&ndp->lock, flags);
1539 	nc = list_first_or_null_rcu(&ndp->channel_queue,
1540 				    struct ncsi_channel, link);
1541 	if (!nc) {
1542 		spin_unlock_irqrestore(&ndp->lock, flags);
1543 		goto out;
1544 	}
1545 
1546 	list_del_init(&nc->link);
1547 	spin_unlock_irqrestore(&ndp->lock, flags);
1548 
1549 	spin_lock_irqsave(&nc->lock, flags);
1550 	old_state = nc->state;
1551 	nc->state = NCSI_CHANNEL_INVISIBLE;
1552 	spin_unlock_irqrestore(&nc->lock, flags);
1553 
1554 	ndp->active_channel = nc;
1555 	ndp->active_package = nc->package;
1556 
1557 	switch (old_state) {
1558 	case NCSI_CHANNEL_INACTIVE:
1559 		ndp->ndev.state = ncsi_dev_state_config;
1560 		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1561 	                   nc->id);
1562 		ncsi_configure_channel(ndp);
1563 		break;
1564 	case NCSI_CHANNEL_ACTIVE:
1565 		ndp->ndev.state = ncsi_dev_state_suspend;
1566 		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1567 			   nc->id);
1568 		ncsi_suspend_channel(ndp);
1569 		break;
1570 	default:
1571 		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1572 			   old_state, nc->package->id, nc->id);
1573 		ncsi_report_link(ndp, false);
1574 		return -EINVAL;
1575 	}
1576 
1577 	return 0;
1578 
1579 out:
1580 	ndp->active_channel = NULL;
1581 	ndp->active_package = NULL;
1582 	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1583 		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1584 		return ncsi_choose_active_channel(ndp);
1585 	}
1586 
1587 	ncsi_report_link(ndp, false);
1588 	return -ENODEV;
1589 }
1590 
1591 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1592 {
1593 	struct ncsi_dev *nd = &ndp->ndev;
1594 	struct ncsi_channel *nc;
1595 	struct ncsi_package *np;
1596 	unsigned long flags;
1597 	unsigned int n = 0;
1598 
1599 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1600 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1601 			spin_lock_irqsave(&nc->lock, flags);
1602 
1603 			/* Channels may be busy, mark dirty instead of
1604 			 * kicking if;
1605 			 * a) not ACTIVE (configured)
1606 			 * b) in the channel_queue (to be configured)
1607 			 * c) it's ndev is in the config state
1608 			 */
1609 			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1610 				if ((ndp->ndev.state & 0xff00) ==
1611 						ncsi_dev_state_config ||
1612 						!list_empty(&nc->link)) {
1613 					netdev_dbg(nd->dev,
1614 						   "NCSI: channel %p marked dirty\n",
1615 						   nc);
1616 					nc->reconfigure_needed = true;
1617 				}
1618 				spin_unlock_irqrestore(&nc->lock, flags);
1619 				continue;
1620 			}
1621 
1622 			spin_unlock_irqrestore(&nc->lock, flags);
1623 
1624 			ncsi_stop_channel_monitor(nc);
1625 			spin_lock_irqsave(&nc->lock, flags);
1626 			nc->state = NCSI_CHANNEL_INACTIVE;
1627 			spin_unlock_irqrestore(&nc->lock, flags);
1628 
1629 			spin_lock_irqsave(&ndp->lock, flags);
1630 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1631 			spin_unlock_irqrestore(&ndp->lock, flags);
1632 
1633 			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1634 			n++;
1635 		}
1636 	}
1637 
1638 	return n;
1639 }
1640 
1641 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1642 {
1643 	struct ncsi_dev_priv *ndp;
1644 	unsigned int n_vids = 0;
1645 	struct vlan_vid *vlan;
1646 	struct ncsi_dev *nd;
1647 	bool found = false;
1648 
1649 	if (vid == 0)
1650 		return 0;
1651 
1652 	nd = ncsi_find_dev(dev);
1653 	if (!nd) {
1654 		netdev_warn(dev, "NCSI: No net_device?\n");
1655 		return 0;
1656 	}
1657 
1658 	ndp = TO_NCSI_DEV_PRIV(nd);
1659 
1660 	/* Add the VLAN id to our internal list */
1661 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1662 		n_vids++;
1663 		if (vlan->vid == vid) {
1664 			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1665 				   vid);
1666 			return 0;
1667 		}
1668 	}
1669 	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1670 		netdev_warn(dev,
1671 			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1672 			    vid, NCSI_MAX_VLAN_VIDS);
1673 		return -ENOSPC;
1674 	}
1675 
1676 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1677 	if (!vlan)
1678 		return -ENOMEM;
1679 
1680 	vlan->proto = proto;
1681 	vlan->vid = vid;
1682 	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1683 
1684 	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1685 
1686 	found = ncsi_kick_channels(ndp) != 0;
1687 
1688 	return found ? ncsi_process_next_channel(ndp) : 0;
1689 }
1690 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1691 
1692 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1693 {
1694 	struct vlan_vid *vlan, *tmp;
1695 	struct ncsi_dev_priv *ndp;
1696 	struct ncsi_dev *nd;
1697 	bool found = false;
1698 
1699 	if (vid == 0)
1700 		return 0;
1701 
1702 	nd = ncsi_find_dev(dev);
1703 	if (!nd) {
1704 		netdev_warn(dev, "NCSI: no net_device?\n");
1705 		return 0;
1706 	}
1707 
1708 	ndp = TO_NCSI_DEV_PRIV(nd);
1709 
1710 	/* Remove the VLAN id from our internal list */
1711 	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1712 		if (vlan->vid == vid) {
1713 			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1714 			list_del_rcu(&vlan->list);
1715 			found = true;
1716 			kfree(vlan);
1717 		}
1718 
1719 	if (!found) {
1720 		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1721 		return -EINVAL;
1722 	}
1723 
1724 	found = ncsi_kick_channels(ndp) != 0;
1725 
1726 	return found ? ncsi_process_next_channel(ndp) : 0;
1727 }
1728 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1729 
1730 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1731 				   void (*handler)(struct ncsi_dev *ndev))
1732 {
1733 	struct ncsi_dev_priv *ndp;
1734 	struct ncsi_dev *nd;
1735 	struct platform_device *pdev;
1736 	struct device_node *np;
1737 	unsigned long flags;
1738 	int i;
1739 
1740 	/* Check if the device has been registered or not */
1741 	nd = ncsi_find_dev(dev);
1742 	if (nd)
1743 		return nd;
1744 
1745 	/* Create NCSI device */
1746 	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1747 	if (!ndp)
1748 		return NULL;
1749 
1750 	nd = &ndp->ndev;
1751 	nd->state = ncsi_dev_state_registered;
1752 	nd->dev = dev;
1753 	nd->handler = handler;
1754 	ndp->pending_req_num = 0;
1755 	INIT_LIST_HEAD(&ndp->channel_queue);
1756 	INIT_LIST_HEAD(&ndp->vlan_vids);
1757 	INIT_WORK(&ndp->work, ncsi_dev_work);
1758 	ndp->package_whitelist = UINT_MAX;
1759 
1760 	/* Initialize private NCSI device */
1761 	spin_lock_init(&ndp->lock);
1762 	INIT_LIST_HEAD(&ndp->packages);
1763 	ndp->request_id = NCSI_REQ_START_IDX;
1764 	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1765 		ndp->requests[i].id = i;
1766 		ndp->requests[i].ndp = ndp;
1767 		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1768 	}
1769 
1770 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1771 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1772 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1773 
1774 	/* Register NCSI packet Rx handler */
1775 	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1776 	ndp->ptype.func = ncsi_rcv_rsp;
1777 	ndp->ptype.dev = dev;
1778 	dev_add_pack(&ndp->ptype);
1779 
1780 	pdev = to_platform_device(dev->dev.parent);
1781 	if (pdev) {
1782 		np = pdev->dev.of_node;
1783 		if (np && of_get_property(np, "mlx,multi-host", NULL))
1784 			ndp->mlx_multi_host = true;
1785 	}
1786 
1787 	return nd;
1788 }
1789 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1790 
1791 int ncsi_start_dev(struct ncsi_dev *nd)
1792 {
1793 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1794 
1795 	if (nd->state != ncsi_dev_state_registered &&
1796 	    nd->state != ncsi_dev_state_functional)
1797 		return -ENOTTY;
1798 
1799 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1800 		ndp->package_probe_id = 0;
1801 		nd->state = ncsi_dev_state_probe;
1802 		schedule_work(&ndp->work);
1803 		return 0;
1804 	}
1805 
1806 	return ncsi_reset_dev(nd);
1807 }
1808 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1809 
1810 void ncsi_stop_dev(struct ncsi_dev *nd)
1811 {
1812 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1813 	struct ncsi_package *np;
1814 	struct ncsi_channel *nc;
1815 	bool chained;
1816 	int old_state;
1817 	unsigned long flags;
1818 
1819 	/* Stop the channel monitor on any active channels. Don't reset the
1820 	 * channel state so we know which were active when ncsi_start_dev()
1821 	 * is next called.
1822 	 */
1823 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1824 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1825 			ncsi_stop_channel_monitor(nc);
1826 
1827 			spin_lock_irqsave(&nc->lock, flags);
1828 			chained = !list_empty(&nc->link);
1829 			old_state = nc->state;
1830 			spin_unlock_irqrestore(&nc->lock, flags);
1831 
1832 			WARN_ON_ONCE(chained ||
1833 				     old_state == NCSI_CHANNEL_INVISIBLE);
1834 		}
1835 	}
1836 
1837 	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1838 	ncsi_report_link(ndp, true);
1839 }
1840 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1841 
1842 int ncsi_reset_dev(struct ncsi_dev *nd)
1843 {
1844 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1845 	struct ncsi_channel *nc, *active, *tmp;
1846 	struct ncsi_package *np;
1847 	unsigned long flags;
1848 
1849 	spin_lock_irqsave(&ndp->lock, flags);
1850 
1851 	if (!(ndp->flags & NCSI_DEV_RESET)) {
1852 		/* Haven't been called yet, check states */
1853 		switch (nd->state & ncsi_dev_state_major) {
1854 		case ncsi_dev_state_registered:
1855 		case ncsi_dev_state_probe:
1856 			/* Not even probed yet - do nothing */
1857 			spin_unlock_irqrestore(&ndp->lock, flags);
1858 			return 0;
1859 		case ncsi_dev_state_suspend:
1860 		case ncsi_dev_state_config:
1861 			/* Wait for the channel to finish its suspend/config
1862 			 * operation; once it finishes it will check for
1863 			 * NCSI_DEV_RESET and reset the state.
1864 			 */
1865 			ndp->flags |= NCSI_DEV_RESET;
1866 			spin_unlock_irqrestore(&ndp->lock, flags);
1867 			return 0;
1868 		}
1869 	} else {
1870 		switch (nd->state) {
1871 		case ncsi_dev_state_suspend_done:
1872 		case ncsi_dev_state_config_done:
1873 		case ncsi_dev_state_functional:
1874 			/* Ok */
1875 			break;
1876 		default:
1877 			/* Current reset operation happening */
1878 			spin_unlock_irqrestore(&ndp->lock, flags);
1879 			return 0;
1880 		}
1881 	}
1882 
1883 	if (!list_empty(&ndp->channel_queue)) {
1884 		/* Clear any channel queue we may have interrupted */
1885 		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1886 			list_del_init(&nc->link);
1887 	}
1888 	spin_unlock_irqrestore(&ndp->lock, flags);
1889 
1890 	active = NULL;
1891 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1892 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1893 			spin_lock_irqsave(&nc->lock, flags);
1894 
1895 			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1896 				active = nc;
1897 				nc->state = NCSI_CHANNEL_INVISIBLE;
1898 				spin_unlock_irqrestore(&nc->lock, flags);
1899 				ncsi_stop_channel_monitor(nc);
1900 				break;
1901 			}
1902 
1903 			spin_unlock_irqrestore(&nc->lock, flags);
1904 		}
1905 		if (active)
1906 			break;
1907 	}
1908 
1909 	if (!active) {
1910 		/* Done */
1911 		spin_lock_irqsave(&ndp->lock, flags);
1912 		ndp->flags &= ~NCSI_DEV_RESET;
1913 		spin_unlock_irqrestore(&ndp->lock, flags);
1914 		return ncsi_choose_active_channel(ndp);
1915 	}
1916 
1917 	spin_lock_irqsave(&ndp->lock, flags);
1918 	ndp->flags |= NCSI_DEV_RESET;
1919 	ndp->active_channel = active;
1920 	ndp->active_package = active->package;
1921 	spin_unlock_irqrestore(&ndp->lock, flags);
1922 
1923 	nd->state = ncsi_dev_state_suspend;
1924 	schedule_work(&ndp->work);
1925 	return 0;
1926 }
1927 
1928 void ncsi_unregister_dev(struct ncsi_dev *nd)
1929 {
1930 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1931 	struct ncsi_package *np, *tmp;
1932 	unsigned long flags;
1933 
1934 	dev_remove_pack(&ndp->ptype);
1935 
1936 	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1937 		ncsi_remove_package(np);
1938 
1939 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1940 	list_del_rcu(&ndp->node);
1941 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1942 
1943 	kfree(ndp);
1944 }
1945 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
1946