xref: /openbmc/linux/net/ncsi/ncsi-manage.c (revision 2c6467d2)
1 /*
2  * Copyright Gavin Shan, IBM Corporation 2016.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 
16 #include <net/ncsi.h>
17 #include <net/net_namespace.h>
18 #include <net/sock.h>
19 #include <net/addrconf.h>
20 #include <net/ipv6.h>
21 #include <net/if_inet6.h>
22 #include <net/genetlink.h>
23 
24 #include "internal.h"
25 #include "ncsi-pkt.h"
26 #include "ncsi-netlink.h"
27 
28 LIST_HEAD(ncsi_dev_list);
29 DEFINE_SPINLOCK(ncsi_dev_lock);
30 
31 bool ncsi_channel_has_link(struct ncsi_channel *channel)
32 {
33 	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
34 }
35 
36 bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
37 			  struct ncsi_channel *channel)
38 {
39 	struct ncsi_package *np;
40 	struct ncsi_channel *nc;
41 
42 	NCSI_FOR_EACH_PACKAGE(ndp, np)
43 		NCSI_FOR_EACH_CHANNEL(np, nc) {
44 			if (nc == channel)
45 				continue;
46 			if (nc->state == NCSI_CHANNEL_ACTIVE &&
47 			    ncsi_channel_has_link(nc))
48 				return false;
49 		}
50 
51 	return true;
52 }
53 
54 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
55 {
56 	struct ncsi_dev *nd = &ndp->ndev;
57 	struct ncsi_package *np;
58 	struct ncsi_channel *nc;
59 	unsigned long flags;
60 
61 	nd->state = ncsi_dev_state_functional;
62 	if (force_down) {
63 		nd->link_up = 0;
64 		goto report;
65 	}
66 
67 	nd->link_up = 0;
68 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
69 		NCSI_FOR_EACH_CHANNEL(np, nc) {
70 			spin_lock_irqsave(&nc->lock, flags);
71 
72 			if (!list_empty(&nc->link) ||
73 			    nc->state != NCSI_CHANNEL_ACTIVE) {
74 				spin_unlock_irqrestore(&nc->lock, flags);
75 				continue;
76 			}
77 
78 			if (ncsi_channel_has_link(nc)) {
79 				spin_unlock_irqrestore(&nc->lock, flags);
80 				nd->link_up = 1;
81 				goto report;
82 			}
83 
84 			spin_unlock_irqrestore(&nc->lock, flags);
85 		}
86 	}
87 
88 report:
89 	nd->handler(nd);
90 }
91 
92 static void ncsi_channel_monitor(struct timer_list *t)
93 {
94 	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
95 	struct ncsi_package *np = nc->package;
96 	struct ncsi_dev_priv *ndp = np->ndp;
97 	struct ncsi_channel_mode *ncm;
98 	struct ncsi_cmd_arg nca;
99 	bool enabled, chained;
100 	unsigned int monitor_state;
101 	unsigned long flags;
102 	int state, ret;
103 
104 	spin_lock_irqsave(&nc->lock, flags);
105 	state = nc->state;
106 	chained = !list_empty(&nc->link);
107 	enabled = nc->monitor.enabled;
108 	monitor_state = nc->monitor.state;
109 	spin_unlock_irqrestore(&nc->lock, flags);
110 
111 	if (!enabled || chained) {
112 		ncsi_stop_channel_monitor(nc);
113 		return;
114 	}
115 	if (state != NCSI_CHANNEL_INACTIVE &&
116 	    state != NCSI_CHANNEL_ACTIVE) {
117 		ncsi_stop_channel_monitor(nc);
118 		return;
119 	}
120 
121 	switch (monitor_state) {
122 	case NCSI_CHANNEL_MONITOR_START:
123 	case NCSI_CHANNEL_MONITOR_RETRY:
124 		nca.ndp = ndp;
125 		nca.package = np->id;
126 		nca.channel = nc->id;
127 		nca.type = NCSI_PKT_CMD_GLS;
128 		nca.req_flags = 0;
129 		ret = ncsi_xmit_cmd(&nca);
130 		if (ret)
131 			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
132 				   ret);
133 		break;
134 	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
135 		break;
136 	default:
137 		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
138 			   nc->id);
139 		ncsi_report_link(ndp, true);
140 		ndp->flags |= NCSI_DEV_RESHUFFLE;
141 
142 		ncsi_stop_channel_monitor(nc);
143 
144 		ncm = &nc->modes[NCSI_MODE_LINK];
145 		spin_lock_irqsave(&nc->lock, flags);
146 		nc->state = NCSI_CHANNEL_INVISIBLE;
147 		ncm->data[2] &= ~0x1;
148 		spin_unlock_irqrestore(&nc->lock, flags);
149 
150 		spin_lock_irqsave(&ndp->lock, flags);
151 		nc->state = NCSI_CHANNEL_ACTIVE;
152 		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
153 		spin_unlock_irqrestore(&ndp->lock, flags);
154 		ncsi_process_next_channel(ndp);
155 		return;
156 	}
157 
158 	spin_lock_irqsave(&nc->lock, flags);
159 	nc->monitor.state++;
160 	spin_unlock_irqrestore(&nc->lock, flags);
161 	mod_timer(&nc->monitor.timer, jiffies + HZ);
162 }
163 
164 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
165 {
166 	unsigned long flags;
167 
168 	spin_lock_irqsave(&nc->lock, flags);
169 	WARN_ON_ONCE(nc->monitor.enabled);
170 	nc->monitor.enabled = true;
171 	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
172 	spin_unlock_irqrestore(&nc->lock, flags);
173 
174 	mod_timer(&nc->monitor.timer, jiffies + HZ);
175 }
176 
177 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
178 {
179 	unsigned long flags;
180 
181 	spin_lock_irqsave(&nc->lock, flags);
182 	if (!nc->monitor.enabled) {
183 		spin_unlock_irqrestore(&nc->lock, flags);
184 		return;
185 	}
186 	nc->monitor.enabled = false;
187 	spin_unlock_irqrestore(&nc->lock, flags);
188 
189 	del_timer_sync(&nc->monitor.timer);
190 }
191 
192 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
193 				       unsigned char id)
194 {
195 	struct ncsi_channel *nc;
196 
197 	NCSI_FOR_EACH_CHANNEL(np, nc) {
198 		if (nc->id == id)
199 			return nc;
200 	}
201 
202 	return NULL;
203 }
204 
205 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
206 {
207 	struct ncsi_channel *nc, *tmp;
208 	int index;
209 	unsigned long flags;
210 
211 	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
212 	if (!nc)
213 		return NULL;
214 
215 	nc->id = id;
216 	nc->package = np;
217 	nc->state = NCSI_CHANNEL_INACTIVE;
218 	nc->monitor.enabled = false;
219 	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
220 	spin_lock_init(&nc->lock);
221 	INIT_LIST_HEAD(&nc->link);
222 	for (index = 0; index < NCSI_CAP_MAX; index++)
223 		nc->caps[index].index = index;
224 	for (index = 0; index < NCSI_MODE_MAX; index++)
225 		nc->modes[index].index = index;
226 
227 	spin_lock_irqsave(&np->lock, flags);
228 	tmp = ncsi_find_channel(np, id);
229 	if (tmp) {
230 		spin_unlock_irqrestore(&np->lock, flags);
231 		kfree(nc);
232 		return tmp;
233 	}
234 
235 	list_add_tail_rcu(&nc->node, &np->channels);
236 	np->channel_num++;
237 	spin_unlock_irqrestore(&np->lock, flags);
238 
239 	return nc;
240 }
241 
242 static void ncsi_remove_channel(struct ncsi_channel *nc)
243 {
244 	struct ncsi_package *np = nc->package;
245 	unsigned long flags;
246 
247 	spin_lock_irqsave(&nc->lock, flags);
248 
249 	/* Release filters */
250 	kfree(nc->mac_filter.addrs);
251 	kfree(nc->vlan_filter.vids);
252 
253 	nc->state = NCSI_CHANNEL_INACTIVE;
254 	spin_unlock_irqrestore(&nc->lock, flags);
255 	ncsi_stop_channel_monitor(nc);
256 
257 	/* Remove and free channel */
258 	spin_lock_irqsave(&np->lock, flags);
259 	list_del_rcu(&nc->node);
260 	np->channel_num--;
261 	spin_unlock_irqrestore(&np->lock, flags);
262 
263 	kfree(nc);
264 }
265 
266 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
267 				       unsigned char id)
268 {
269 	struct ncsi_package *np;
270 
271 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
272 		if (np->id == id)
273 			return np;
274 	}
275 
276 	return NULL;
277 }
278 
279 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
280 				      unsigned char id)
281 {
282 	struct ncsi_package *np, *tmp;
283 	unsigned long flags;
284 
285 	np = kzalloc(sizeof(*np), GFP_ATOMIC);
286 	if (!np)
287 		return NULL;
288 
289 	np->id = id;
290 	np->ndp = ndp;
291 	spin_lock_init(&np->lock);
292 	INIT_LIST_HEAD(&np->channels);
293 	np->channel_whitelist = UINT_MAX;
294 
295 	spin_lock_irqsave(&ndp->lock, flags);
296 	tmp = ncsi_find_package(ndp, id);
297 	if (tmp) {
298 		spin_unlock_irqrestore(&ndp->lock, flags);
299 		kfree(np);
300 		return tmp;
301 	}
302 
303 	list_add_tail_rcu(&np->node, &ndp->packages);
304 	ndp->package_num++;
305 	spin_unlock_irqrestore(&ndp->lock, flags);
306 
307 	return np;
308 }
309 
310 void ncsi_remove_package(struct ncsi_package *np)
311 {
312 	struct ncsi_dev_priv *ndp = np->ndp;
313 	struct ncsi_channel *nc, *tmp;
314 	unsigned long flags;
315 
316 	/* Release all child channels */
317 	list_for_each_entry_safe(nc, tmp, &np->channels, node)
318 		ncsi_remove_channel(nc);
319 
320 	/* Remove and free package */
321 	spin_lock_irqsave(&ndp->lock, flags);
322 	list_del_rcu(&np->node);
323 	ndp->package_num--;
324 	spin_unlock_irqrestore(&ndp->lock, flags);
325 
326 	kfree(np);
327 }
328 
329 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
330 				   unsigned char id,
331 				   struct ncsi_package **np,
332 				   struct ncsi_channel **nc)
333 {
334 	struct ncsi_package *p;
335 	struct ncsi_channel *c;
336 
337 	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
338 	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
339 
340 	if (np)
341 		*np = p;
342 	if (nc)
343 		*nc = c;
344 }
345 
346 /* For two consecutive NCSI commands, the packet IDs shouldn't
347  * be same. Otherwise, the bogus response might be replied. So
348  * the available IDs are allocated in round-robin fashion.
349  */
350 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
351 					unsigned int req_flags)
352 {
353 	struct ncsi_request *nr = NULL;
354 	int i, limit = ARRAY_SIZE(ndp->requests);
355 	unsigned long flags;
356 
357 	/* Check if there is one available request until the ceiling */
358 	spin_lock_irqsave(&ndp->lock, flags);
359 	for (i = ndp->request_id; i < limit; i++) {
360 		if (ndp->requests[i].used)
361 			continue;
362 
363 		nr = &ndp->requests[i];
364 		nr->used = true;
365 		nr->flags = req_flags;
366 		ndp->request_id = i + 1;
367 		goto found;
368 	}
369 
370 	/* Fail back to check from the starting cursor */
371 	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
372 		if (ndp->requests[i].used)
373 			continue;
374 
375 		nr = &ndp->requests[i];
376 		nr->used = true;
377 		nr->flags = req_flags;
378 		ndp->request_id = i + 1;
379 		goto found;
380 	}
381 
382 found:
383 	spin_unlock_irqrestore(&ndp->lock, flags);
384 	return nr;
385 }
386 
387 void ncsi_free_request(struct ncsi_request *nr)
388 {
389 	struct ncsi_dev_priv *ndp = nr->ndp;
390 	struct sk_buff *cmd, *rsp;
391 	unsigned long flags;
392 	bool driven;
393 
394 	if (nr->enabled) {
395 		nr->enabled = false;
396 		del_timer_sync(&nr->timer);
397 	}
398 
399 	spin_lock_irqsave(&ndp->lock, flags);
400 	cmd = nr->cmd;
401 	rsp = nr->rsp;
402 	nr->cmd = NULL;
403 	nr->rsp = NULL;
404 	nr->used = false;
405 	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
406 	spin_unlock_irqrestore(&ndp->lock, flags);
407 
408 	if (driven && cmd && --ndp->pending_req_num == 0)
409 		schedule_work(&ndp->work);
410 
411 	/* Release command and response */
412 	consume_skb(cmd);
413 	consume_skb(rsp);
414 }
415 
416 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
417 {
418 	struct ncsi_dev_priv *ndp;
419 
420 	NCSI_FOR_EACH_DEV(ndp) {
421 		if (ndp->ndev.dev == dev)
422 			return &ndp->ndev;
423 	}
424 
425 	return NULL;
426 }
427 
428 static void ncsi_request_timeout(struct timer_list *t)
429 {
430 	struct ncsi_request *nr = from_timer(nr, t, timer);
431 	struct ncsi_dev_priv *ndp = nr->ndp;
432 	struct ncsi_cmd_pkt *cmd;
433 	struct ncsi_package *np;
434 	struct ncsi_channel *nc;
435 	unsigned long flags;
436 
437 	/* If the request already had associated response,
438 	 * let the response handler to release it.
439 	 */
440 	spin_lock_irqsave(&ndp->lock, flags);
441 	nr->enabled = false;
442 	if (nr->rsp || !nr->cmd) {
443 		spin_unlock_irqrestore(&ndp->lock, flags);
444 		return;
445 	}
446 	spin_unlock_irqrestore(&ndp->lock, flags);
447 
448 	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
449 		if (nr->cmd) {
450 			/* Find the package */
451 			cmd = (struct ncsi_cmd_pkt *)
452 			      skb_network_header(nr->cmd);
453 			ncsi_find_package_and_channel(ndp,
454 						      cmd->cmd.common.channel,
455 						      &np, &nc);
456 			ncsi_send_netlink_timeout(nr, np, nc);
457 		}
458 	}
459 
460 	/* Release the request */
461 	ncsi_free_request(nr);
462 }
463 
464 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
465 {
466 	struct ncsi_dev *nd = &ndp->ndev;
467 	struct ncsi_package *np;
468 	struct ncsi_channel *nc, *tmp;
469 	struct ncsi_cmd_arg nca;
470 	unsigned long flags;
471 	int ret;
472 
473 	np = ndp->active_package;
474 	nc = ndp->active_channel;
475 	nca.ndp = ndp;
476 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
477 	switch (nd->state) {
478 	case ncsi_dev_state_suspend:
479 		nd->state = ncsi_dev_state_suspend_select;
480 		/* Fall through */
481 	case ncsi_dev_state_suspend_select:
482 		ndp->pending_req_num = 1;
483 
484 		nca.type = NCSI_PKT_CMD_SP;
485 		nca.package = np->id;
486 		nca.channel = NCSI_RESERVED_CHANNEL;
487 		if (ndp->flags & NCSI_DEV_HWA)
488 			nca.bytes[0] = 0;
489 		else
490 			nca.bytes[0] = 1;
491 
492 		/* To retrieve the last link states of channels in current
493 		 * package when current active channel needs fail over to
494 		 * another one. It means we will possibly select another
495 		 * channel as next active one. The link states of channels
496 		 * are most important factor of the selection. So we need
497 		 * accurate link states. Unfortunately, the link states on
498 		 * inactive channels can't be updated with LSC AEN in time.
499 		 */
500 		if (ndp->flags & NCSI_DEV_RESHUFFLE)
501 			nd->state = ncsi_dev_state_suspend_gls;
502 		else
503 			nd->state = ncsi_dev_state_suspend_dcnt;
504 		ret = ncsi_xmit_cmd(&nca);
505 		if (ret)
506 			goto error;
507 
508 		break;
509 	case ncsi_dev_state_suspend_gls:
510 		ndp->pending_req_num = np->channel_num;
511 
512 		nca.type = NCSI_PKT_CMD_GLS;
513 		nca.package = np->id;
514 
515 		nd->state = ncsi_dev_state_suspend_dcnt;
516 		NCSI_FOR_EACH_CHANNEL(np, nc) {
517 			nca.channel = nc->id;
518 			ret = ncsi_xmit_cmd(&nca);
519 			if (ret)
520 				goto error;
521 		}
522 
523 		break;
524 	case ncsi_dev_state_suspend_dcnt:
525 		ndp->pending_req_num = 1;
526 
527 		nca.type = NCSI_PKT_CMD_DCNT;
528 		nca.package = np->id;
529 		nca.channel = nc->id;
530 
531 		nd->state = ncsi_dev_state_suspend_dc;
532 		ret = ncsi_xmit_cmd(&nca);
533 		if (ret)
534 			goto error;
535 
536 		break;
537 	case ncsi_dev_state_suspend_dc:
538 		ndp->pending_req_num = 1;
539 
540 		nca.type = NCSI_PKT_CMD_DC;
541 		nca.package = np->id;
542 		nca.channel = nc->id;
543 		nca.bytes[0] = 1;
544 
545 		nd->state = ncsi_dev_state_suspend_deselect;
546 		ret = ncsi_xmit_cmd(&nca);
547 		if (ret)
548 			goto error;
549 
550 		NCSI_FOR_EACH_CHANNEL(np, tmp) {
551 			/* If there is another channel active on this package
552 			 * do not deselect the package.
553 			 */
554 			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
555 				nd->state = ncsi_dev_state_suspend_done;
556 				break;
557 			}
558 		}
559 		break;
560 	case ncsi_dev_state_suspend_deselect:
561 		ndp->pending_req_num = 1;
562 
563 		nca.type = NCSI_PKT_CMD_DP;
564 		nca.package = np->id;
565 		nca.channel = NCSI_RESERVED_CHANNEL;
566 
567 		nd->state = ncsi_dev_state_suspend_done;
568 		ret = ncsi_xmit_cmd(&nca);
569 		if (ret)
570 			goto error;
571 
572 		break;
573 	case ncsi_dev_state_suspend_done:
574 		spin_lock_irqsave(&nc->lock, flags);
575 		nc->state = NCSI_CHANNEL_INACTIVE;
576 		spin_unlock_irqrestore(&nc->lock, flags);
577 		if (ndp->flags & NCSI_DEV_RESET)
578 			ncsi_reset_dev(nd);
579 		else
580 			ncsi_process_next_channel(ndp);
581 		break;
582 	default:
583 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
584 			    nd->state);
585 	}
586 
587 	return;
588 error:
589 	nd->state = ncsi_dev_state_functional;
590 }
591 
592 /* Check the VLAN filter bitmap for a set filter, and construct a
593  * "Set VLAN Filter - Disable" packet if found.
594  */
595 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
596 			 struct ncsi_cmd_arg *nca)
597 {
598 	struct ncsi_channel_vlan_filter *ncf;
599 	unsigned long flags;
600 	void *bitmap;
601 	int index;
602 	u16 vid;
603 
604 	ncf = &nc->vlan_filter;
605 	bitmap = &ncf->bitmap;
606 
607 	spin_lock_irqsave(&nc->lock, flags);
608 	index = find_next_bit(bitmap, ncf->n_vids, 0);
609 	if (index >= ncf->n_vids) {
610 		spin_unlock_irqrestore(&nc->lock, flags);
611 		return -1;
612 	}
613 	vid = ncf->vids[index];
614 
615 	clear_bit(index, bitmap);
616 	ncf->vids[index] = 0;
617 	spin_unlock_irqrestore(&nc->lock, flags);
618 
619 	nca->type = NCSI_PKT_CMD_SVF;
620 	nca->words[1] = vid;
621 	/* HW filter index starts at 1 */
622 	nca->bytes[6] = index + 1;
623 	nca->bytes[7] = 0x00;
624 	return 0;
625 }
626 
627 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
628  * packet.
629  */
630 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
631 		       struct ncsi_cmd_arg *nca)
632 {
633 	struct ncsi_channel_vlan_filter *ncf;
634 	struct vlan_vid *vlan = NULL;
635 	unsigned long flags;
636 	int i, index;
637 	void *bitmap;
638 	u16 vid;
639 
640 	if (list_empty(&ndp->vlan_vids))
641 		return -1;
642 
643 	ncf = &nc->vlan_filter;
644 	bitmap = &ncf->bitmap;
645 
646 	spin_lock_irqsave(&nc->lock, flags);
647 
648 	rcu_read_lock();
649 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
650 		vid = vlan->vid;
651 		for (i = 0; i < ncf->n_vids; i++)
652 			if (ncf->vids[i] == vid) {
653 				vid = 0;
654 				break;
655 			}
656 		if (vid)
657 			break;
658 	}
659 	rcu_read_unlock();
660 
661 	if (!vid) {
662 		/* No VLAN ID is not set */
663 		spin_unlock_irqrestore(&nc->lock, flags);
664 		return -1;
665 	}
666 
667 	index = find_next_zero_bit(bitmap, ncf->n_vids, 0);
668 	if (index < 0 || index >= ncf->n_vids) {
669 		netdev_err(ndp->ndev.dev,
670 			   "Channel %u already has all VLAN filters set\n",
671 			   nc->id);
672 		spin_unlock_irqrestore(&nc->lock, flags);
673 		return -1;
674 	}
675 
676 	ncf->vids[index] = vid;
677 	set_bit(index, bitmap);
678 	spin_unlock_irqrestore(&nc->lock, flags);
679 
680 	nca->type = NCSI_PKT_CMD_SVF;
681 	nca->words[1] = vid;
682 	/* HW filter index starts at 1 */
683 	nca->bytes[6] = index + 1;
684 	nca->bytes[7] = 0x01;
685 
686 	return 0;
687 }
688 
689 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
690 
691 /* NCSI OEM Command APIs */
692 static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
693 {
694 	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
695 	int ret = 0;
696 
697 	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
698 
699 	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
700 	*(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
701 	data[5] = NCSI_OEM_BCM_CMD_GMA;
702 
703 	nca->data = data;
704 
705 	ret = ncsi_xmit_cmd(nca);
706 	if (ret)
707 		netdev_err(nca->ndp->ndev.dev,
708 			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
709 			   nca->type);
710 	return ret;
711 }
712 
713 static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
714 {
715 	union {
716 		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
717 		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
718 	} u;
719 	int ret = 0;
720 
721 	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
722 
723 	memset(&u, 0, sizeof(u));
724 	u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
725 	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
726 	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
727 
728 	nca->data = u.data_u8;
729 
730 	ret = ncsi_xmit_cmd(nca);
731 	if (ret)
732 		netdev_err(nca->ndp->ndev.dev,
733 			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
734 			   nca->type);
735 	return ret;
736 }
737 
738 /* OEM Command handlers initialization */
739 static struct ncsi_oem_gma_handler {
740 	unsigned int	mfr_id;
741 	int		(*handler)(struct ncsi_cmd_arg *nca);
742 } ncsi_oem_gma_handlers[] = {
743 	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
744 	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx }
745 };
746 
747 static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
748 {
749 	struct ncsi_oem_gma_handler *nch = NULL;
750 	int i;
751 
752 	/* This function should only be called once, return if flag set */
753 	if (nca->ndp->gma_flag == 1)
754 		return -1;
755 
756 	/* Find gma handler for given manufacturer id */
757 	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
758 		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
759 			if (ncsi_oem_gma_handlers[i].handler)
760 				nch = &ncsi_oem_gma_handlers[i];
761 			break;
762 			}
763 	}
764 
765 	if (!nch) {
766 		netdev_err(nca->ndp->ndev.dev,
767 			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
768 			   mf_id);
769 		return -1;
770 	}
771 
772 	/* Set the flag for GMA command which should only be called once */
773 	nca->ndp->gma_flag = 1;
774 
775 	/* Get Mac address from NCSI device */
776 	return nch->handler(nca);
777 }
778 
779 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
780 
781 /* Determine if a given channel from the channel_queue should be used for Tx */
782 static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
783 			       struct ncsi_channel *nc)
784 {
785 	struct ncsi_channel_mode *ncm;
786 	struct ncsi_channel *channel;
787 	struct ncsi_package *np;
788 
789 	/* Check if any other channel has Tx enabled; a channel may have already
790 	 * been configured and removed from the channel queue.
791 	 */
792 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
793 		if (!ndp->multi_package && np != nc->package)
794 			continue;
795 		NCSI_FOR_EACH_CHANNEL(np, channel) {
796 			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
797 			if (ncm->enable)
798 				return false;
799 		}
800 	}
801 
802 	/* This channel is the preferred channel and has link */
803 	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
804 		np = channel->package;
805 		if (np->preferred_channel &&
806 		    ncsi_channel_has_link(np->preferred_channel)) {
807 			return np->preferred_channel == nc;
808 		}
809 	}
810 
811 	/* This channel has link */
812 	if (ncsi_channel_has_link(nc))
813 		return true;
814 
815 	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
816 		if (ncsi_channel_has_link(channel))
817 			return false;
818 
819 	/* No other channel has link; default to this one */
820 	return true;
821 }
822 
823 /* Change the active Tx channel in a multi-channel setup */
824 int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
825 			   struct ncsi_package *package,
826 			   struct ncsi_channel *disable,
827 			   struct ncsi_channel *enable)
828 {
829 	struct ncsi_cmd_arg nca;
830 	struct ncsi_channel *nc;
831 	struct ncsi_package *np;
832 	int ret = 0;
833 
834 	if (!package->multi_channel && !ndp->multi_package)
835 		netdev_warn(ndp->ndev.dev,
836 			    "NCSI: Trying to update Tx channel in single-channel mode\n");
837 	nca.ndp = ndp;
838 	nca.req_flags = 0;
839 
840 	/* Find current channel with Tx enabled */
841 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
842 		if (disable)
843 			break;
844 		if (!ndp->multi_package && np != package)
845 			continue;
846 
847 		NCSI_FOR_EACH_CHANNEL(np, nc)
848 			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
849 				disable = nc;
850 				break;
851 			}
852 	}
853 
854 	/* Find a suitable channel for Tx */
855 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
856 		if (enable)
857 			break;
858 		if (!ndp->multi_package && np != package)
859 			continue;
860 		if (!(ndp->package_whitelist & (0x1 << np->id)))
861 			continue;
862 
863 		if (np->preferred_channel &&
864 		    ncsi_channel_has_link(np->preferred_channel)) {
865 			enable = np->preferred_channel;
866 			break;
867 		}
868 
869 		NCSI_FOR_EACH_CHANNEL(np, nc) {
870 			if (!(np->channel_whitelist & 0x1 << nc->id))
871 				continue;
872 			if (nc->state != NCSI_CHANNEL_ACTIVE)
873 				continue;
874 			if (ncsi_channel_has_link(nc)) {
875 				enable = nc;
876 				break;
877 			}
878 		}
879 	}
880 
881 	if (disable == enable)
882 		return -1;
883 
884 	if (!enable)
885 		return -1;
886 
887 	if (disable) {
888 		nca.channel = disable->id;
889 		nca.package = disable->package->id;
890 		nca.type = NCSI_PKT_CMD_DCNT;
891 		ret = ncsi_xmit_cmd(&nca);
892 		if (ret)
893 			netdev_err(ndp->ndev.dev,
894 				   "Error %d sending DCNT\n",
895 				   ret);
896 	}
897 
898 	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
899 
900 	nca.channel = enable->id;
901 	nca.package = enable->package->id;
902 	nca.type = NCSI_PKT_CMD_ECNT;
903 	ret = ncsi_xmit_cmd(&nca);
904 	if (ret)
905 		netdev_err(ndp->ndev.dev,
906 			   "Error %d sending ECNT\n",
907 			   ret);
908 
909 	return ret;
910 }
911 
912 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
913 {
914 	struct ncsi_package *np = ndp->active_package;
915 	struct ncsi_channel *nc = ndp->active_channel;
916 	struct ncsi_channel *hot_nc = NULL;
917 	struct ncsi_dev *nd = &ndp->ndev;
918 	struct net_device *dev = nd->dev;
919 	struct ncsi_cmd_arg nca;
920 	unsigned char index;
921 	unsigned long flags;
922 	int ret;
923 
924 	nca.ndp = ndp;
925 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
926 	switch (nd->state) {
927 	case ncsi_dev_state_config:
928 	case ncsi_dev_state_config_sp:
929 		ndp->pending_req_num = 1;
930 
931 		/* Select the specific package */
932 		nca.type = NCSI_PKT_CMD_SP;
933 		if (ndp->flags & NCSI_DEV_HWA)
934 			nca.bytes[0] = 0;
935 		else
936 			nca.bytes[0] = 1;
937 		nca.package = np->id;
938 		nca.channel = NCSI_RESERVED_CHANNEL;
939 		ret = ncsi_xmit_cmd(&nca);
940 		if (ret) {
941 			netdev_err(ndp->ndev.dev,
942 				   "NCSI: Failed to transmit CMD_SP\n");
943 			goto error;
944 		}
945 
946 		nd->state = ncsi_dev_state_config_cis;
947 		break;
948 	case ncsi_dev_state_config_cis:
949 		ndp->pending_req_num = 1;
950 
951 		/* Clear initial state */
952 		nca.type = NCSI_PKT_CMD_CIS;
953 		nca.package = np->id;
954 		nca.channel = nc->id;
955 		ret = ncsi_xmit_cmd(&nca);
956 		if (ret) {
957 			netdev_err(ndp->ndev.dev,
958 				   "NCSI: Failed to transmit CMD_CIS\n");
959 			goto error;
960 		}
961 
962 		nd->state = ncsi_dev_state_config_oem_gma;
963 		break;
964 	case ncsi_dev_state_config_oem_gma:
965 		nd->state = ncsi_dev_state_config_clear_vids;
966 		ret = -1;
967 
968 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
969 		nca.type = NCSI_PKT_CMD_OEM;
970 		nca.package = np->id;
971 		nca.channel = nc->id;
972 		ndp->pending_req_num = 1;
973 		ret = ncsi_gma_handler(&nca, nc->version.mf_id);
974 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
975 
976 		if (ret < 0)
977 			schedule_work(&ndp->work);
978 
979 		break;
980 	case ncsi_dev_state_config_clear_vids:
981 	case ncsi_dev_state_config_svf:
982 	case ncsi_dev_state_config_ev:
983 	case ncsi_dev_state_config_sma:
984 	case ncsi_dev_state_config_ebf:
985 #if IS_ENABLED(CONFIG_IPV6)
986 	case ncsi_dev_state_config_egmf:
987 #endif
988 	case ncsi_dev_state_config_ecnt:
989 	case ncsi_dev_state_config_ec:
990 	case ncsi_dev_state_config_ae:
991 	case ncsi_dev_state_config_gls:
992 		ndp->pending_req_num = 1;
993 
994 		nca.package = np->id;
995 		nca.channel = nc->id;
996 
997 		/* Clear any active filters on the channel before setting */
998 		if (nd->state == ncsi_dev_state_config_clear_vids) {
999 			ret = clear_one_vid(ndp, nc, &nca);
1000 			if (ret) {
1001 				nd->state = ncsi_dev_state_config_svf;
1002 				schedule_work(&ndp->work);
1003 				break;
1004 			}
1005 			/* Repeat */
1006 			nd->state = ncsi_dev_state_config_clear_vids;
1007 		/* Add known VLAN tags to the filter */
1008 		} else if (nd->state == ncsi_dev_state_config_svf) {
1009 			ret = set_one_vid(ndp, nc, &nca);
1010 			if (ret) {
1011 				nd->state = ncsi_dev_state_config_ev;
1012 				schedule_work(&ndp->work);
1013 				break;
1014 			}
1015 			/* Repeat */
1016 			nd->state = ncsi_dev_state_config_svf;
1017 		/* Enable/Disable the VLAN filter */
1018 		} else if (nd->state == ncsi_dev_state_config_ev) {
1019 			if (list_empty(&ndp->vlan_vids)) {
1020 				nca.type = NCSI_PKT_CMD_DV;
1021 			} else {
1022 				nca.type = NCSI_PKT_CMD_EV;
1023 				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1024 			}
1025 			nd->state = ncsi_dev_state_config_sma;
1026 		} else if (nd->state == ncsi_dev_state_config_sma) {
1027 		/* Use first entry in unicast filter table. Note that
1028 		 * the MAC filter table starts from entry 1 instead of
1029 		 * 0.
1030 		 */
1031 			nca.type = NCSI_PKT_CMD_SMA;
1032 			for (index = 0; index < 6; index++)
1033 				nca.bytes[index] = dev->dev_addr[index];
1034 			nca.bytes[6] = 0x1;
1035 			nca.bytes[7] = 0x1;
1036 			nd->state = ncsi_dev_state_config_ebf;
1037 		} else if (nd->state == ncsi_dev_state_config_ebf) {
1038 			nca.type = NCSI_PKT_CMD_EBF;
1039 			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1040 			if (ncsi_channel_is_tx(ndp, nc))
1041 				nd->state = ncsi_dev_state_config_ecnt;
1042 			else
1043 				nd->state = ncsi_dev_state_config_ec;
1044 #if IS_ENABLED(CONFIG_IPV6)
1045 			if (ndp->inet6_addr_num > 0 &&
1046 			    (nc->caps[NCSI_CAP_GENERIC].cap &
1047 			     NCSI_CAP_GENERIC_MC))
1048 				nd->state = ncsi_dev_state_config_egmf;
1049 		} else if (nd->state == ncsi_dev_state_config_egmf) {
1050 			nca.type = NCSI_PKT_CMD_EGMF;
1051 			nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1052 			if (ncsi_channel_is_tx(ndp, nc))
1053 				nd->state = ncsi_dev_state_config_ecnt;
1054 			else
1055 				nd->state = ncsi_dev_state_config_ec;
1056 #endif /* CONFIG_IPV6 */
1057 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1058 			if (np->preferred_channel &&
1059 			    nc != np->preferred_channel)
1060 				netdev_info(ndp->ndev.dev,
1061 					    "NCSI: Tx failed over to channel %u\n",
1062 					    nc->id);
1063 			nca.type = NCSI_PKT_CMD_ECNT;
1064 			nd->state = ncsi_dev_state_config_ec;
1065 		} else if (nd->state == ncsi_dev_state_config_ec) {
1066 			/* Enable AEN if it's supported */
1067 			nca.type = NCSI_PKT_CMD_EC;
1068 			nd->state = ncsi_dev_state_config_ae;
1069 			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1070 				nd->state = ncsi_dev_state_config_gls;
1071 		} else if (nd->state == ncsi_dev_state_config_ae) {
1072 			nca.type = NCSI_PKT_CMD_AE;
1073 			nca.bytes[0] = 0;
1074 			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1075 			nd->state = ncsi_dev_state_config_gls;
1076 		} else if (nd->state == ncsi_dev_state_config_gls) {
1077 			nca.type = NCSI_PKT_CMD_GLS;
1078 			nd->state = ncsi_dev_state_config_done;
1079 		}
1080 
1081 		ret = ncsi_xmit_cmd(&nca);
1082 		if (ret) {
1083 			netdev_err(ndp->ndev.dev,
1084 				   "NCSI: Failed to transmit CMD %x\n",
1085 				   nca.type);
1086 			goto error;
1087 		}
1088 		break;
1089 	case ncsi_dev_state_config_done:
1090 		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1091 			   nc->id);
1092 		spin_lock_irqsave(&nc->lock, flags);
1093 		nc->state = NCSI_CHANNEL_ACTIVE;
1094 
1095 		if (ndp->flags & NCSI_DEV_RESET) {
1096 			/* A reset event happened during config, start it now */
1097 			nc->reconfigure_needed = false;
1098 			spin_unlock_irqrestore(&nc->lock, flags);
1099 			ncsi_reset_dev(nd);
1100 			break;
1101 		}
1102 
1103 		if (nc->reconfigure_needed) {
1104 			/* This channel's configuration has been updated
1105 			 * part-way during the config state - start the
1106 			 * channel configuration over
1107 			 */
1108 			nc->reconfigure_needed = false;
1109 			nc->state = NCSI_CHANNEL_INACTIVE;
1110 			spin_unlock_irqrestore(&nc->lock, flags);
1111 
1112 			spin_lock_irqsave(&ndp->lock, flags);
1113 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1114 			spin_unlock_irqrestore(&ndp->lock, flags);
1115 
1116 			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1117 			ncsi_process_next_channel(ndp);
1118 			break;
1119 		}
1120 
1121 		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1122 			hot_nc = nc;
1123 		} else {
1124 			hot_nc = NULL;
1125 			netdev_dbg(ndp->ndev.dev,
1126 				   "NCSI: channel %u link down after config\n",
1127 				   nc->id);
1128 		}
1129 		spin_unlock_irqrestore(&nc->lock, flags);
1130 
1131 		/* Update the hot channel */
1132 		spin_lock_irqsave(&ndp->lock, flags);
1133 		ndp->hot_channel = hot_nc;
1134 		spin_unlock_irqrestore(&ndp->lock, flags);
1135 
1136 		ncsi_start_channel_monitor(nc);
1137 		ncsi_process_next_channel(ndp);
1138 		break;
1139 	default:
1140 		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1141 			     nd->state);
1142 	}
1143 
1144 	return;
1145 
1146 error:
1147 	ncsi_report_link(ndp, true);
1148 }
1149 
1150 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1151 {
1152 	struct ncsi_channel *nc, *found, *hot_nc;
1153 	struct ncsi_channel_mode *ncm;
1154 	unsigned long flags, cflags;
1155 	struct ncsi_package *np;
1156 	bool with_link;
1157 
1158 	spin_lock_irqsave(&ndp->lock, flags);
1159 	hot_nc = ndp->hot_channel;
1160 	spin_unlock_irqrestore(&ndp->lock, flags);
1161 
1162 	/* By default the search is done once an inactive channel with up
1163 	 * link is found, unless a preferred channel is set.
1164 	 * If multi_package or multi_channel are configured all channels in the
1165 	 * whitelist are added to the channel queue.
1166 	 */
1167 	found = NULL;
1168 	with_link = false;
1169 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1170 		if (!(ndp->package_whitelist & (0x1 << np->id)))
1171 			continue;
1172 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1173 			if (!(np->channel_whitelist & (0x1 << nc->id)))
1174 				continue;
1175 
1176 			spin_lock_irqsave(&nc->lock, cflags);
1177 
1178 			if (!list_empty(&nc->link) ||
1179 			    nc->state != NCSI_CHANNEL_INACTIVE) {
1180 				spin_unlock_irqrestore(&nc->lock, cflags);
1181 				continue;
1182 			}
1183 
1184 			if (!found)
1185 				found = nc;
1186 
1187 			if (nc == hot_nc)
1188 				found = nc;
1189 
1190 			ncm = &nc->modes[NCSI_MODE_LINK];
1191 			if (ncm->data[2] & 0x1) {
1192 				found = nc;
1193 				with_link = true;
1194 			}
1195 
1196 			/* If multi_channel is enabled configure all valid
1197 			 * channels whether or not they currently have link
1198 			 * so they will have AENs enabled.
1199 			 */
1200 			if (with_link || np->multi_channel) {
1201 				spin_lock_irqsave(&ndp->lock, flags);
1202 				list_add_tail_rcu(&nc->link,
1203 						  &ndp->channel_queue);
1204 				spin_unlock_irqrestore(&ndp->lock, flags);
1205 
1206 				netdev_dbg(ndp->ndev.dev,
1207 					   "NCSI: Channel %u added to queue (link %s)\n",
1208 					   nc->id,
1209 					   ncm->data[2] & 0x1 ? "up" : "down");
1210 			}
1211 
1212 			spin_unlock_irqrestore(&nc->lock, cflags);
1213 
1214 			if (with_link && !np->multi_channel)
1215 				break;
1216 		}
1217 		if (with_link && !ndp->multi_package)
1218 			break;
1219 	}
1220 
1221 	if (list_empty(&ndp->channel_queue) && found) {
1222 		netdev_info(ndp->ndev.dev,
1223 			    "NCSI: No channel with link found, configuring channel %u\n",
1224 			    found->id);
1225 		spin_lock_irqsave(&ndp->lock, flags);
1226 		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1227 		spin_unlock_irqrestore(&ndp->lock, flags);
1228 	} else if (!found) {
1229 		netdev_warn(ndp->ndev.dev,
1230 			    "NCSI: No channel found to configure!\n");
1231 		ncsi_report_link(ndp, true);
1232 		return -ENODEV;
1233 	}
1234 
1235 	return ncsi_process_next_channel(ndp);
1236 }
1237 
1238 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1239 {
1240 	struct ncsi_package *np;
1241 	struct ncsi_channel *nc;
1242 	unsigned int cap;
1243 	bool has_channel = false;
1244 
1245 	/* The hardware arbitration is disabled if any one channel
1246 	 * doesn't support explicitly.
1247 	 */
1248 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1249 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1250 			has_channel = true;
1251 
1252 			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1253 			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1254 			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1255 			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1256 				ndp->flags &= ~NCSI_DEV_HWA;
1257 				return false;
1258 			}
1259 		}
1260 	}
1261 
1262 	if (has_channel) {
1263 		ndp->flags |= NCSI_DEV_HWA;
1264 		return true;
1265 	}
1266 
1267 	ndp->flags &= ~NCSI_DEV_HWA;
1268 	return false;
1269 }
1270 
1271 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1272 {
1273 	struct ncsi_dev *nd = &ndp->ndev;
1274 	struct ncsi_package *np;
1275 	struct ncsi_channel *nc;
1276 	struct ncsi_cmd_arg nca;
1277 	unsigned char index;
1278 	int ret;
1279 
1280 	nca.ndp = ndp;
1281 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1282 	switch (nd->state) {
1283 	case ncsi_dev_state_probe:
1284 		nd->state = ncsi_dev_state_probe_deselect;
1285 		/* Fall through */
1286 	case ncsi_dev_state_probe_deselect:
1287 		ndp->pending_req_num = 8;
1288 
1289 		/* Deselect all possible packages */
1290 		nca.type = NCSI_PKT_CMD_DP;
1291 		nca.channel = NCSI_RESERVED_CHANNEL;
1292 		for (index = 0; index < 8; index++) {
1293 			nca.package = index;
1294 			ret = ncsi_xmit_cmd(&nca);
1295 			if (ret)
1296 				goto error;
1297 		}
1298 
1299 		nd->state = ncsi_dev_state_probe_package;
1300 		break;
1301 	case ncsi_dev_state_probe_package:
1302 		ndp->pending_req_num = 1;
1303 
1304 		nca.type = NCSI_PKT_CMD_SP;
1305 		nca.bytes[0] = 1;
1306 		nca.package = ndp->package_probe_id;
1307 		nca.channel = NCSI_RESERVED_CHANNEL;
1308 		ret = ncsi_xmit_cmd(&nca);
1309 		if (ret)
1310 			goto error;
1311 		nd->state = ncsi_dev_state_probe_channel;
1312 		break;
1313 	case ncsi_dev_state_probe_channel:
1314 		ndp->active_package = ncsi_find_package(ndp,
1315 							ndp->package_probe_id);
1316 		if (!ndp->active_package) {
1317 			/* No response */
1318 			nd->state = ncsi_dev_state_probe_dp;
1319 			schedule_work(&ndp->work);
1320 			break;
1321 		}
1322 		nd->state = ncsi_dev_state_probe_cis;
1323 		schedule_work(&ndp->work);
1324 		break;
1325 	case ncsi_dev_state_probe_cis:
1326 		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1327 
1328 		/* Clear initial state */
1329 		nca.type = NCSI_PKT_CMD_CIS;
1330 		nca.package = ndp->active_package->id;
1331 		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1332 			nca.channel = index;
1333 			ret = ncsi_xmit_cmd(&nca);
1334 			if (ret)
1335 				goto error;
1336 		}
1337 
1338 		nd->state = ncsi_dev_state_probe_gvi;
1339 		break;
1340 	case ncsi_dev_state_probe_gvi:
1341 	case ncsi_dev_state_probe_gc:
1342 	case ncsi_dev_state_probe_gls:
1343 		np = ndp->active_package;
1344 		ndp->pending_req_num = np->channel_num;
1345 
1346 		/* Retrieve version, capability or link status */
1347 		if (nd->state == ncsi_dev_state_probe_gvi)
1348 			nca.type = NCSI_PKT_CMD_GVI;
1349 		else if (nd->state == ncsi_dev_state_probe_gc)
1350 			nca.type = NCSI_PKT_CMD_GC;
1351 		else
1352 			nca.type = NCSI_PKT_CMD_GLS;
1353 
1354 		nca.package = np->id;
1355 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1356 			nca.channel = nc->id;
1357 			ret = ncsi_xmit_cmd(&nca);
1358 			if (ret)
1359 				goto error;
1360 		}
1361 
1362 		if (nd->state == ncsi_dev_state_probe_gvi)
1363 			nd->state = ncsi_dev_state_probe_gc;
1364 		else if (nd->state == ncsi_dev_state_probe_gc)
1365 			nd->state = ncsi_dev_state_probe_gls;
1366 		else
1367 			nd->state = ncsi_dev_state_probe_dp;
1368 		break;
1369 	case ncsi_dev_state_probe_dp:
1370 		ndp->pending_req_num = 1;
1371 
1372 		/* Deselect the current package */
1373 		nca.type = NCSI_PKT_CMD_DP;
1374 		nca.package = ndp->package_probe_id;
1375 		nca.channel = NCSI_RESERVED_CHANNEL;
1376 		ret = ncsi_xmit_cmd(&nca);
1377 		if (ret)
1378 			goto error;
1379 
1380 		/* Probe next package */
1381 		ndp->package_probe_id++;
1382 		if (ndp->package_probe_id >= 8) {
1383 			/* Probe finished */
1384 			ndp->flags |= NCSI_DEV_PROBED;
1385 			break;
1386 		}
1387 		nd->state = ncsi_dev_state_probe_package;
1388 		ndp->active_package = NULL;
1389 		break;
1390 	default:
1391 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1392 			    nd->state);
1393 	}
1394 
1395 	if (ndp->flags & NCSI_DEV_PROBED) {
1396 		/* Check if all packages have HWA support */
1397 		ncsi_check_hwa(ndp);
1398 		ncsi_choose_active_channel(ndp);
1399 	}
1400 
1401 	return;
1402 error:
1403 	netdev_err(ndp->ndev.dev,
1404 		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1405 		   nca.type);
1406 	ncsi_report_link(ndp, true);
1407 }
1408 
1409 static void ncsi_dev_work(struct work_struct *work)
1410 {
1411 	struct ncsi_dev_priv *ndp = container_of(work,
1412 			struct ncsi_dev_priv, work);
1413 	struct ncsi_dev *nd = &ndp->ndev;
1414 
1415 	switch (nd->state & ncsi_dev_state_major) {
1416 	case ncsi_dev_state_probe:
1417 		ncsi_probe_channel(ndp);
1418 		break;
1419 	case ncsi_dev_state_suspend:
1420 		ncsi_suspend_channel(ndp);
1421 		break;
1422 	case ncsi_dev_state_config:
1423 		ncsi_configure_channel(ndp);
1424 		break;
1425 	default:
1426 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1427 			    nd->state);
1428 	}
1429 }
1430 
1431 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1432 {
1433 	struct ncsi_channel *nc;
1434 	int old_state;
1435 	unsigned long flags;
1436 
1437 	spin_lock_irqsave(&ndp->lock, flags);
1438 	nc = list_first_or_null_rcu(&ndp->channel_queue,
1439 				    struct ncsi_channel, link);
1440 	if (!nc) {
1441 		spin_unlock_irqrestore(&ndp->lock, flags);
1442 		goto out;
1443 	}
1444 
1445 	list_del_init(&nc->link);
1446 	spin_unlock_irqrestore(&ndp->lock, flags);
1447 
1448 	spin_lock_irqsave(&nc->lock, flags);
1449 	old_state = nc->state;
1450 	nc->state = NCSI_CHANNEL_INVISIBLE;
1451 	spin_unlock_irqrestore(&nc->lock, flags);
1452 
1453 	ndp->active_channel = nc;
1454 	ndp->active_package = nc->package;
1455 
1456 	switch (old_state) {
1457 	case NCSI_CHANNEL_INACTIVE:
1458 		ndp->ndev.state = ncsi_dev_state_config;
1459 		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1460 	                   nc->id);
1461 		ncsi_configure_channel(ndp);
1462 		break;
1463 	case NCSI_CHANNEL_ACTIVE:
1464 		ndp->ndev.state = ncsi_dev_state_suspend;
1465 		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1466 			   nc->id);
1467 		ncsi_suspend_channel(ndp);
1468 		break;
1469 	default:
1470 		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1471 			   old_state, nc->package->id, nc->id);
1472 		ncsi_report_link(ndp, false);
1473 		return -EINVAL;
1474 	}
1475 
1476 	return 0;
1477 
1478 out:
1479 	ndp->active_channel = NULL;
1480 	ndp->active_package = NULL;
1481 	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1482 		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1483 		return ncsi_choose_active_channel(ndp);
1484 	}
1485 
1486 	ncsi_report_link(ndp, false);
1487 	return -ENODEV;
1488 }
1489 
1490 #if IS_ENABLED(CONFIG_IPV6)
1491 static int ncsi_inet6addr_event(struct notifier_block *this,
1492 				unsigned long event, void *data)
1493 {
1494 	struct inet6_ifaddr *ifa = data;
1495 	struct net_device *dev = ifa->idev->dev;
1496 	struct ncsi_dev *nd = ncsi_find_dev(dev);
1497 	struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1498 	struct ncsi_package *np;
1499 	struct ncsi_channel *nc;
1500 	struct ncsi_cmd_arg nca;
1501 	bool action;
1502 	int ret;
1503 
1504 	if (!ndp || (ipv6_addr_type(&ifa->addr) &
1505 	    (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1506 		return NOTIFY_OK;
1507 
1508 	switch (event) {
1509 	case NETDEV_UP:
1510 		action = (++ndp->inet6_addr_num) == 1;
1511 		nca.type = NCSI_PKT_CMD_EGMF;
1512 		break;
1513 	case NETDEV_DOWN:
1514 		action = (--ndp->inet6_addr_num == 0);
1515 		nca.type = NCSI_PKT_CMD_DGMF;
1516 		break;
1517 	default:
1518 		return NOTIFY_OK;
1519 	}
1520 
1521 	/* We might not have active channel or packages. The IPv6
1522 	 * required multicast will be enabled when active channel
1523 	 * or packages are chosen.
1524 	 */
1525 	np = ndp->active_package;
1526 	nc = ndp->active_channel;
1527 	if (!action || !np || !nc)
1528 		return NOTIFY_OK;
1529 
1530 	/* We needn't enable or disable it if the function isn't supported */
1531 	if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1532 		return NOTIFY_OK;
1533 
1534 	nca.ndp = ndp;
1535 	nca.req_flags = 0;
1536 	nca.package = np->id;
1537 	nca.channel = nc->id;
1538 	nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1539 	ret = ncsi_xmit_cmd(&nca);
1540 	if (ret) {
1541 		netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1542 			    (event == NETDEV_UP) ? "enable" : "disable", ret);
1543 		return NOTIFY_DONE;
1544 	}
1545 
1546 	return NOTIFY_OK;
1547 }
1548 
1549 static struct notifier_block ncsi_inet6addr_notifier = {
1550 	.notifier_call = ncsi_inet6addr_event,
1551 };
1552 #endif /* CONFIG_IPV6 */
1553 
1554 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1555 {
1556 	struct ncsi_dev *nd = &ndp->ndev;
1557 	struct ncsi_channel *nc;
1558 	struct ncsi_package *np;
1559 	unsigned long flags;
1560 	unsigned int n = 0;
1561 
1562 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1563 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1564 			spin_lock_irqsave(&nc->lock, flags);
1565 
1566 			/* Channels may be busy, mark dirty instead of
1567 			 * kicking if;
1568 			 * a) not ACTIVE (configured)
1569 			 * b) in the channel_queue (to be configured)
1570 			 * c) it's ndev is in the config state
1571 			 */
1572 			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1573 				if ((ndp->ndev.state & 0xff00) ==
1574 						ncsi_dev_state_config ||
1575 						!list_empty(&nc->link)) {
1576 					netdev_dbg(nd->dev,
1577 						   "NCSI: channel %p marked dirty\n",
1578 						   nc);
1579 					nc->reconfigure_needed = true;
1580 				}
1581 				spin_unlock_irqrestore(&nc->lock, flags);
1582 				continue;
1583 			}
1584 
1585 			spin_unlock_irqrestore(&nc->lock, flags);
1586 
1587 			ncsi_stop_channel_monitor(nc);
1588 			spin_lock_irqsave(&nc->lock, flags);
1589 			nc->state = NCSI_CHANNEL_INACTIVE;
1590 			spin_unlock_irqrestore(&nc->lock, flags);
1591 
1592 			spin_lock_irqsave(&ndp->lock, flags);
1593 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1594 			spin_unlock_irqrestore(&ndp->lock, flags);
1595 
1596 			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1597 			n++;
1598 		}
1599 	}
1600 
1601 	return n;
1602 }
1603 
1604 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1605 {
1606 	struct ncsi_dev_priv *ndp;
1607 	unsigned int n_vids = 0;
1608 	struct vlan_vid *vlan;
1609 	struct ncsi_dev *nd;
1610 	bool found = false;
1611 
1612 	if (vid == 0)
1613 		return 0;
1614 
1615 	nd = ncsi_find_dev(dev);
1616 	if (!nd) {
1617 		netdev_warn(dev, "NCSI: No net_device?\n");
1618 		return 0;
1619 	}
1620 
1621 	ndp = TO_NCSI_DEV_PRIV(nd);
1622 
1623 	/* Add the VLAN id to our internal list */
1624 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1625 		n_vids++;
1626 		if (vlan->vid == vid) {
1627 			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1628 				   vid);
1629 			return 0;
1630 		}
1631 	}
1632 	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1633 		netdev_warn(dev,
1634 			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1635 			    vid, NCSI_MAX_VLAN_VIDS);
1636 		return -ENOSPC;
1637 	}
1638 
1639 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1640 	if (!vlan)
1641 		return -ENOMEM;
1642 
1643 	vlan->proto = proto;
1644 	vlan->vid = vid;
1645 	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1646 
1647 	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1648 
1649 	found = ncsi_kick_channels(ndp) != 0;
1650 
1651 	return found ? ncsi_process_next_channel(ndp) : 0;
1652 }
1653 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1654 
1655 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1656 {
1657 	struct vlan_vid *vlan, *tmp;
1658 	struct ncsi_dev_priv *ndp;
1659 	struct ncsi_dev *nd;
1660 	bool found = false;
1661 
1662 	if (vid == 0)
1663 		return 0;
1664 
1665 	nd = ncsi_find_dev(dev);
1666 	if (!nd) {
1667 		netdev_warn(dev, "NCSI: no net_device?\n");
1668 		return 0;
1669 	}
1670 
1671 	ndp = TO_NCSI_DEV_PRIV(nd);
1672 
1673 	/* Remove the VLAN id from our internal list */
1674 	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1675 		if (vlan->vid == vid) {
1676 			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1677 			list_del_rcu(&vlan->list);
1678 			found = true;
1679 			kfree(vlan);
1680 		}
1681 
1682 	if (!found) {
1683 		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1684 		return -EINVAL;
1685 	}
1686 
1687 	found = ncsi_kick_channels(ndp) != 0;
1688 
1689 	return found ? ncsi_process_next_channel(ndp) : 0;
1690 }
1691 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1692 
1693 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1694 				   void (*handler)(struct ncsi_dev *ndev))
1695 {
1696 	struct ncsi_dev_priv *ndp;
1697 	struct ncsi_dev *nd;
1698 	unsigned long flags;
1699 	int i;
1700 
1701 	/* Check if the device has been registered or not */
1702 	nd = ncsi_find_dev(dev);
1703 	if (nd)
1704 		return nd;
1705 
1706 	/* Create NCSI device */
1707 	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1708 	if (!ndp)
1709 		return NULL;
1710 
1711 	nd = &ndp->ndev;
1712 	nd->state = ncsi_dev_state_registered;
1713 	nd->dev = dev;
1714 	nd->handler = handler;
1715 	ndp->pending_req_num = 0;
1716 	INIT_LIST_HEAD(&ndp->channel_queue);
1717 	INIT_LIST_HEAD(&ndp->vlan_vids);
1718 	INIT_WORK(&ndp->work, ncsi_dev_work);
1719 	ndp->package_whitelist = UINT_MAX;
1720 
1721 	/* Initialize private NCSI device */
1722 	spin_lock_init(&ndp->lock);
1723 	INIT_LIST_HEAD(&ndp->packages);
1724 	ndp->request_id = NCSI_REQ_START_IDX;
1725 	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1726 		ndp->requests[i].id = i;
1727 		ndp->requests[i].ndp = ndp;
1728 		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1729 	}
1730 
1731 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1732 #if IS_ENABLED(CONFIG_IPV6)
1733 	ndp->inet6_addr_num = 0;
1734 	if (list_empty(&ncsi_dev_list))
1735 		register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1736 #endif
1737 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1738 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1739 
1740 	/* Register NCSI packet Rx handler */
1741 	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1742 	ndp->ptype.func = ncsi_rcv_rsp;
1743 	ndp->ptype.dev = dev;
1744 	dev_add_pack(&ndp->ptype);
1745 
1746 	/* Set up generic netlink interface */
1747 	ncsi_init_netlink(dev);
1748 
1749 	return nd;
1750 }
1751 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1752 
1753 int ncsi_start_dev(struct ncsi_dev *nd)
1754 {
1755 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1756 
1757 	if (nd->state != ncsi_dev_state_registered &&
1758 	    nd->state != ncsi_dev_state_functional)
1759 		return -ENOTTY;
1760 
1761 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1762 		ndp->package_probe_id = 0;
1763 		nd->state = ncsi_dev_state_probe;
1764 		schedule_work(&ndp->work);
1765 		return 0;
1766 	}
1767 
1768 	return ncsi_reset_dev(nd);
1769 }
1770 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1771 
1772 void ncsi_stop_dev(struct ncsi_dev *nd)
1773 {
1774 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1775 	struct ncsi_package *np;
1776 	struct ncsi_channel *nc;
1777 	bool chained;
1778 	int old_state;
1779 	unsigned long flags;
1780 
1781 	/* Stop the channel monitor on any active channels. Don't reset the
1782 	 * channel state so we know which were active when ncsi_start_dev()
1783 	 * is next called.
1784 	 */
1785 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1786 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1787 			ncsi_stop_channel_monitor(nc);
1788 
1789 			spin_lock_irqsave(&nc->lock, flags);
1790 			chained = !list_empty(&nc->link);
1791 			old_state = nc->state;
1792 			spin_unlock_irqrestore(&nc->lock, flags);
1793 
1794 			WARN_ON_ONCE(chained ||
1795 				     old_state == NCSI_CHANNEL_INVISIBLE);
1796 		}
1797 	}
1798 
1799 	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1800 	ncsi_report_link(ndp, true);
1801 }
1802 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1803 
1804 int ncsi_reset_dev(struct ncsi_dev *nd)
1805 {
1806 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1807 	struct ncsi_channel *nc, *active, *tmp;
1808 	struct ncsi_package *np;
1809 	unsigned long flags;
1810 
1811 	spin_lock_irqsave(&ndp->lock, flags);
1812 
1813 	if (!(ndp->flags & NCSI_DEV_RESET)) {
1814 		/* Haven't been called yet, check states */
1815 		switch (nd->state & ncsi_dev_state_major) {
1816 		case ncsi_dev_state_registered:
1817 		case ncsi_dev_state_probe:
1818 			/* Not even probed yet - do nothing */
1819 			spin_unlock_irqrestore(&ndp->lock, flags);
1820 			return 0;
1821 		case ncsi_dev_state_suspend:
1822 		case ncsi_dev_state_config:
1823 			/* Wait for the channel to finish its suspend/config
1824 			 * operation; once it finishes it will check for
1825 			 * NCSI_DEV_RESET and reset the state.
1826 			 */
1827 			ndp->flags |= NCSI_DEV_RESET;
1828 			spin_unlock_irqrestore(&ndp->lock, flags);
1829 			return 0;
1830 		}
1831 	} else {
1832 		switch (nd->state) {
1833 		case ncsi_dev_state_suspend_done:
1834 		case ncsi_dev_state_config_done:
1835 		case ncsi_dev_state_functional:
1836 			/* Ok */
1837 			break;
1838 		default:
1839 			/* Current reset operation happening */
1840 			spin_unlock_irqrestore(&ndp->lock, flags);
1841 			return 0;
1842 		}
1843 	}
1844 
1845 	if (!list_empty(&ndp->channel_queue)) {
1846 		/* Clear any channel queue we may have interrupted */
1847 		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1848 			list_del_init(&nc->link);
1849 	}
1850 	spin_unlock_irqrestore(&ndp->lock, flags);
1851 
1852 	active = NULL;
1853 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1854 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1855 			spin_lock_irqsave(&nc->lock, flags);
1856 
1857 			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1858 				active = nc;
1859 				nc->state = NCSI_CHANNEL_INVISIBLE;
1860 				spin_unlock_irqrestore(&nc->lock, flags);
1861 				ncsi_stop_channel_monitor(nc);
1862 				break;
1863 			}
1864 
1865 			spin_unlock_irqrestore(&nc->lock, flags);
1866 		}
1867 		if (active)
1868 			break;
1869 	}
1870 
1871 	if (!active) {
1872 		/* Done */
1873 		spin_lock_irqsave(&ndp->lock, flags);
1874 		ndp->flags &= ~NCSI_DEV_RESET;
1875 		spin_unlock_irqrestore(&ndp->lock, flags);
1876 		return ncsi_choose_active_channel(ndp);
1877 	}
1878 
1879 	spin_lock_irqsave(&ndp->lock, flags);
1880 	ndp->flags |= NCSI_DEV_RESET;
1881 	ndp->active_channel = active;
1882 	ndp->active_package = active->package;
1883 	spin_unlock_irqrestore(&ndp->lock, flags);
1884 
1885 	nd->state = ncsi_dev_state_suspend;
1886 	schedule_work(&ndp->work);
1887 	return 0;
1888 }
1889 
1890 void ncsi_unregister_dev(struct ncsi_dev *nd)
1891 {
1892 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1893 	struct ncsi_package *np, *tmp;
1894 	unsigned long flags;
1895 
1896 	dev_remove_pack(&ndp->ptype);
1897 
1898 	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1899 		ncsi_remove_package(np);
1900 
1901 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1902 	list_del_rcu(&ndp->node);
1903 #if IS_ENABLED(CONFIG_IPV6)
1904 	if (list_empty(&ncsi_dev_list))
1905 		unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1906 #endif
1907 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1908 
1909 	ncsi_unregister_netlink(nd->dev);
1910 
1911 	kfree(ndp);
1912 }
1913 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
1914