xref: /openbmc/linux/net/ncsi/ncsi-manage.c (revision e5c86679)
1 /*
2  * Copyright Gavin Shan, IBM Corporation 2016.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
16 
17 #include <net/ncsi.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/addrconf.h>
21 #include <net/ipv6.h>
22 #include <net/if_inet6.h>
23 
24 #include "internal.h"
25 #include "ncsi-pkt.h"
26 
27 LIST_HEAD(ncsi_dev_list);
28 DEFINE_SPINLOCK(ncsi_dev_lock);
29 
30 static inline int ncsi_filter_size(int table)
31 {
32 	int sizes[] = { 2, 6, 6, 6 };
33 
34 	BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
35 	if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
36 		return -EINVAL;
37 
38 	return sizes[table];
39 }
40 
41 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
42 {
43 	struct ncsi_channel_filter *ncf;
44 	void *bitmap;
45 	int index, size;
46 	unsigned long flags;
47 
48 	ncf = nc->filters[table];
49 	if (!ncf)
50 		return -ENXIO;
51 
52 	size = ncsi_filter_size(table);
53 	if (size < 0)
54 		return size;
55 
56 	spin_lock_irqsave(&nc->lock, flags);
57 	bitmap = (void *)&ncf->bitmap;
58 	index = -1;
59 	while ((index = find_next_bit(bitmap, ncf->total, index + 1))
60 	       < ncf->total) {
61 		if (!memcmp(ncf->data + size * index, data, size)) {
62 			spin_unlock_irqrestore(&nc->lock, flags);
63 			return index;
64 		}
65 	}
66 	spin_unlock_irqrestore(&nc->lock, flags);
67 
68 	return -ENOENT;
69 }
70 
71 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
72 {
73 	struct ncsi_channel_filter *ncf;
74 	int index, size;
75 	void *bitmap;
76 	unsigned long flags;
77 
78 	size = ncsi_filter_size(table);
79 	if (size < 0)
80 		return size;
81 
82 	index = ncsi_find_filter(nc, table, data);
83 	if (index >= 0)
84 		return index;
85 
86 	ncf = nc->filters[table];
87 	if (!ncf)
88 		return -ENODEV;
89 
90 	spin_lock_irqsave(&nc->lock, flags);
91 	bitmap = (void *)&ncf->bitmap;
92 	do {
93 		index = find_next_zero_bit(bitmap, ncf->total, 0);
94 		if (index >= ncf->total) {
95 			spin_unlock_irqrestore(&nc->lock, flags);
96 			return -ENOSPC;
97 		}
98 	} while (test_and_set_bit(index, bitmap));
99 
100 	memcpy(ncf->data + size * index, data, size);
101 	spin_unlock_irqrestore(&nc->lock, flags);
102 
103 	return index;
104 }
105 
106 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
107 {
108 	struct ncsi_channel_filter *ncf;
109 	int size;
110 	void *bitmap;
111 	unsigned long flags;
112 
113 	size = ncsi_filter_size(table);
114 	if (size < 0)
115 		return size;
116 
117 	ncf = nc->filters[table];
118 	if (!ncf || index >= ncf->total)
119 		return -ENODEV;
120 
121 	spin_lock_irqsave(&nc->lock, flags);
122 	bitmap = (void *)&ncf->bitmap;
123 	if (test_and_clear_bit(index, bitmap))
124 		memset(ncf->data + size * index, 0, size);
125 	spin_unlock_irqrestore(&nc->lock, flags);
126 
127 	return 0;
128 }
129 
130 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
131 {
132 	struct ncsi_dev *nd = &ndp->ndev;
133 	struct ncsi_package *np;
134 	struct ncsi_channel *nc;
135 	unsigned long flags;
136 
137 	nd->state = ncsi_dev_state_functional;
138 	if (force_down) {
139 		nd->link_up = 0;
140 		goto report;
141 	}
142 
143 	nd->link_up = 0;
144 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
145 		NCSI_FOR_EACH_CHANNEL(np, nc) {
146 			spin_lock_irqsave(&nc->lock, flags);
147 
148 			if (!list_empty(&nc->link) ||
149 			    nc->state != NCSI_CHANNEL_ACTIVE) {
150 				spin_unlock_irqrestore(&nc->lock, flags);
151 				continue;
152 			}
153 
154 			if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
155 				spin_unlock_irqrestore(&nc->lock, flags);
156 				nd->link_up = 1;
157 				goto report;
158 			}
159 
160 			spin_unlock_irqrestore(&nc->lock, flags);
161 		}
162 	}
163 
164 report:
165 	nd->handler(nd);
166 }
167 
168 static void ncsi_channel_monitor(unsigned long data)
169 {
170 	struct ncsi_channel *nc = (struct ncsi_channel *)data;
171 	struct ncsi_package *np = nc->package;
172 	struct ncsi_dev_priv *ndp = np->ndp;
173 	struct ncsi_cmd_arg nca;
174 	bool enabled, chained;
175 	unsigned int monitor_state;
176 	unsigned long flags;
177 	int state, ret;
178 
179 	spin_lock_irqsave(&nc->lock, flags);
180 	state = nc->state;
181 	chained = !list_empty(&nc->link);
182 	enabled = nc->monitor.enabled;
183 	monitor_state = nc->monitor.state;
184 	spin_unlock_irqrestore(&nc->lock, flags);
185 
186 	if (!enabled || chained)
187 		return;
188 	if (state != NCSI_CHANNEL_INACTIVE &&
189 	    state != NCSI_CHANNEL_ACTIVE)
190 		return;
191 
192 	switch (monitor_state) {
193 	case NCSI_CHANNEL_MONITOR_START:
194 	case NCSI_CHANNEL_MONITOR_RETRY:
195 		nca.ndp = ndp;
196 		nca.package = np->id;
197 		nca.channel = nc->id;
198 		nca.type = NCSI_PKT_CMD_GLS;
199 		nca.req_flags = 0;
200 		ret = ncsi_xmit_cmd(&nca);
201 		if (ret) {
202 			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
203 				   ret);
204 			return;
205 		}
206 
207 		break;
208 	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
209 		break;
210 	default:
211 		if (!(ndp->flags & NCSI_DEV_HWA) &&
212 		    state == NCSI_CHANNEL_ACTIVE) {
213 			ncsi_report_link(ndp, true);
214 			ndp->flags |= NCSI_DEV_RESHUFFLE;
215 		}
216 
217 		spin_lock_irqsave(&nc->lock, flags);
218 		nc->state = NCSI_CHANNEL_INVISIBLE;
219 		spin_unlock_irqrestore(&nc->lock, flags);
220 
221 		spin_lock_irqsave(&ndp->lock, flags);
222 		nc->state = NCSI_CHANNEL_INACTIVE;
223 		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
224 		spin_unlock_irqrestore(&ndp->lock, flags);
225 		ncsi_process_next_channel(ndp);
226 		return;
227 	}
228 
229 	spin_lock_irqsave(&nc->lock, flags);
230 	nc->monitor.state++;
231 	spin_unlock_irqrestore(&nc->lock, flags);
232 	mod_timer(&nc->monitor.timer, jiffies + HZ);
233 }
234 
235 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
236 {
237 	unsigned long flags;
238 
239 	spin_lock_irqsave(&nc->lock, flags);
240 	WARN_ON_ONCE(nc->monitor.enabled);
241 	nc->monitor.enabled = true;
242 	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
243 	spin_unlock_irqrestore(&nc->lock, flags);
244 
245 	mod_timer(&nc->monitor.timer, jiffies + HZ);
246 }
247 
248 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
249 {
250 	unsigned long flags;
251 
252 	spin_lock_irqsave(&nc->lock, flags);
253 	if (!nc->monitor.enabled) {
254 		spin_unlock_irqrestore(&nc->lock, flags);
255 		return;
256 	}
257 	nc->monitor.enabled = false;
258 	spin_unlock_irqrestore(&nc->lock, flags);
259 
260 	del_timer_sync(&nc->monitor.timer);
261 }
262 
263 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
264 				       unsigned char id)
265 {
266 	struct ncsi_channel *nc;
267 
268 	NCSI_FOR_EACH_CHANNEL(np, nc) {
269 		if (nc->id == id)
270 			return nc;
271 	}
272 
273 	return NULL;
274 }
275 
276 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
277 {
278 	struct ncsi_channel *nc, *tmp;
279 	int index;
280 	unsigned long flags;
281 
282 	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
283 	if (!nc)
284 		return NULL;
285 
286 	nc->id = id;
287 	nc->package = np;
288 	nc->state = NCSI_CHANNEL_INACTIVE;
289 	nc->monitor.enabled = false;
290 	setup_timer(&nc->monitor.timer,
291 		    ncsi_channel_monitor, (unsigned long)nc);
292 	spin_lock_init(&nc->lock);
293 	INIT_LIST_HEAD(&nc->link);
294 	for (index = 0; index < NCSI_CAP_MAX; index++)
295 		nc->caps[index].index = index;
296 	for (index = 0; index < NCSI_MODE_MAX; index++)
297 		nc->modes[index].index = index;
298 
299 	spin_lock_irqsave(&np->lock, flags);
300 	tmp = ncsi_find_channel(np, id);
301 	if (tmp) {
302 		spin_unlock_irqrestore(&np->lock, flags);
303 		kfree(nc);
304 		return tmp;
305 	}
306 
307 	list_add_tail_rcu(&nc->node, &np->channels);
308 	np->channel_num++;
309 	spin_unlock_irqrestore(&np->lock, flags);
310 
311 	return nc;
312 }
313 
314 static void ncsi_remove_channel(struct ncsi_channel *nc)
315 {
316 	struct ncsi_package *np = nc->package;
317 	struct ncsi_channel_filter *ncf;
318 	unsigned long flags;
319 	int i;
320 
321 	/* Release filters */
322 	spin_lock_irqsave(&nc->lock, flags);
323 	for (i = 0; i < NCSI_FILTER_MAX; i++) {
324 		ncf = nc->filters[i];
325 		if (!ncf)
326 			continue;
327 
328 		nc->filters[i] = NULL;
329 		kfree(ncf);
330 	}
331 
332 	nc->state = NCSI_CHANNEL_INACTIVE;
333 	spin_unlock_irqrestore(&nc->lock, flags);
334 	ncsi_stop_channel_monitor(nc);
335 
336 	/* Remove and free channel */
337 	spin_lock_irqsave(&np->lock, flags);
338 	list_del_rcu(&nc->node);
339 	np->channel_num--;
340 	spin_unlock_irqrestore(&np->lock, flags);
341 
342 	kfree(nc);
343 }
344 
345 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
346 				       unsigned char id)
347 {
348 	struct ncsi_package *np;
349 
350 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
351 		if (np->id == id)
352 			return np;
353 	}
354 
355 	return NULL;
356 }
357 
358 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
359 				      unsigned char id)
360 {
361 	struct ncsi_package *np, *tmp;
362 	unsigned long flags;
363 
364 	np = kzalloc(sizeof(*np), GFP_ATOMIC);
365 	if (!np)
366 		return NULL;
367 
368 	np->id = id;
369 	np->ndp = ndp;
370 	spin_lock_init(&np->lock);
371 	INIT_LIST_HEAD(&np->channels);
372 
373 	spin_lock_irqsave(&ndp->lock, flags);
374 	tmp = ncsi_find_package(ndp, id);
375 	if (tmp) {
376 		spin_unlock_irqrestore(&ndp->lock, flags);
377 		kfree(np);
378 		return tmp;
379 	}
380 
381 	list_add_tail_rcu(&np->node, &ndp->packages);
382 	ndp->package_num++;
383 	spin_unlock_irqrestore(&ndp->lock, flags);
384 
385 	return np;
386 }
387 
388 void ncsi_remove_package(struct ncsi_package *np)
389 {
390 	struct ncsi_dev_priv *ndp = np->ndp;
391 	struct ncsi_channel *nc, *tmp;
392 	unsigned long flags;
393 
394 	/* Release all child channels */
395 	list_for_each_entry_safe(nc, tmp, &np->channels, node)
396 		ncsi_remove_channel(nc);
397 
398 	/* Remove and free package */
399 	spin_lock_irqsave(&ndp->lock, flags);
400 	list_del_rcu(&np->node);
401 	ndp->package_num--;
402 	spin_unlock_irqrestore(&ndp->lock, flags);
403 
404 	kfree(np);
405 }
406 
407 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
408 				   unsigned char id,
409 				   struct ncsi_package **np,
410 				   struct ncsi_channel **nc)
411 {
412 	struct ncsi_package *p;
413 	struct ncsi_channel *c;
414 
415 	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
416 	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
417 
418 	if (np)
419 		*np = p;
420 	if (nc)
421 		*nc = c;
422 }
423 
424 /* For two consecutive NCSI commands, the packet IDs shouldn't
425  * be same. Otherwise, the bogus response might be replied. So
426  * the available IDs are allocated in round-robin fashion.
427  */
428 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
429 					unsigned int req_flags)
430 {
431 	struct ncsi_request *nr = NULL;
432 	int i, limit = ARRAY_SIZE(ndp->requests);
433 	unsigned long flags;
434 
435 	/* Check if there is one available request until the ceiling */
436 	spin_lock_irqsave(&ndp->lock, flags);
437 	for (i = ndp->request_id; i < limit; i++) {
438 		if (ndp->requests[i].used)
439 			continue;
440 
441 		nr = &ndp->requests[i];
442 		nr->used = true;
443 		nr->flags = req_flags;
444 		ndp->request_id = i + 1;
445 		goto found;
446 	}
447 
448 	/* Fail back to check from the starting cursor */
449 	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
450 		if (ndp->requests[i].used)
451 			continue;
452 
453 		nr = &ndp->requests[i];
454 		nr->used = true;
455 		nr->flags = req_flags;
456 		ndp->request_id = i + 1;
457 		goto found;
458 	}
459 
460 found:
461 	spin_unlock_irqrestore(&ndp->lock, flags);
462 	return nr;
463 }
464 
465 void ncsi_free_request(struct ncsi_request *nr)
466 {
467 	struct ncsi_dev_priv *ndp = nr->ndp;
468 	struct sk_buff *cmd, *rsp;
469 	unsigned long flags;
470 	bool driven;
471 
472 	if (nr->enabled) {
473 		nr->enabled = false;
474 		del_timer_sync(&nr->timer);
475 	}
476 
477 	spin_lock_irqsave(&ndp->lock, flags);
478 	cmd = nr->cmd;
479 	rsp = nr->rsp;
480 	nr->cmd = NULL;
481 	nr->rsp = NULL;
482 	nr->used = false;
483 	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
484 	spin_unlock_irqrestore(&ndp->lock, flags);
485 
486 	if (driven && cmd && --ndp->pending_req_num == 0)
487 		schedule_work(&ndp->work);
488 
489 	/* Release command and response */
490 	consume_skb(cmd);
491 	consume_skb(rsp);
492 }
493 
494 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
495 {
496 	struct ncsi_dev_priv *ndp;
497 
498 	NCSI_FOR_EACH_DEV(ndp) {
499 		if (ndp->ndev.dev == dev)
500 			return &ndp->ndev;
501 	}
502 
503 	return NULL;
504 }
505 
506 static void ncsi_request_timeout(unsigned long data)
507 {
508 	struct ncsi_request *nr = (struct ncsi_request *)data;
509 	struct ncsi_dev_priv *ndp = nr->ndp;
510 	unsigned long flags;
511 
512 	/* If the request already had associated response,
513 	 * let the response handler to release it.
514 	 */
515 	spin_lock_irqsave(&ndp->lock, flags);
516 	nr->enabled = false;
517 	if (nr->rsp || !nr->cmd) {
518 		spin_unlock_irqrestore(&ndp->lock, flags);
519 		return;
520 	}
521 	spin_unlock_irqrestore(&ndp->lock, flags);
522 
523 	/* Release the request */
524 	ncsi_free_request(nr);
525 }
526 
527 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
528 {
529 	struct ncsi_dev *nd = &ndp->ndev;
530 	struct ncsi_package *np = ndp->active_package;
531 	struct ncsi_channel *nc = ndp->active_channel;
532 	struct ncsi_cmd_arg nca;
533 	unsigned long flags;
534 	int ret;
535 
536 	nca.ndp = ndp;
537 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
538 	switch (nd->state) {
539 	case ncsi_dev_state_suspend:
540 		nd->state = ncsi_dev_state_suspend_select;
541 		/* Fall through */
542 	case ncsi_dev_state_suspend_select:
543 		ndp->pending_req_num = 1;
544 
545 		nca.type = NCSI_PKT_CMD_SP;
546 		nca.package = np->id;
547 		nca.channel = NCSI_RESERVED_CHANNEL;
548 		if (ndp->flags & NCSI_DEV_HWA)
549 			nca.bytes[0] = 0;
550 		else
551 			nca.bytes[0] = 1;
552 
553 		/* To retrieve the last link states of channels in current
554 		 * package when current active channel needs fail over to
555 		 * another one. It means we will possibly select another
556 		 * channel as next active one. The link states of channels
557 		 * are most important factor of the selection. So we need
558 		 * accurate link states. Unfortunately, the link states on
559 		 * inactive channels can't be updated with LSC AEN in time.
560 		 */
561 		if (ndp->flags & NCSI_DEV_RESHUFFLE)
562 			nd->state = ncsi_dev_state_suspend_gls;
563 		else
564 			nd->state = ncsi_dev_state_suspend_dcnt;
565 		ret = ncsi_xmit_cmd(&nca);
566 		if (ret)
567 			goto error;
568 
569 		break;
570 	case ncsi_dev_state_suspend_gls:
571 		ndp->pending_req_num = np->channel_num;
572 
573 		nca.type = NCSI_PKT_CMD_GLS;
574 		nca.package = np->id;
575 
576 		nd->state = ncsi_dev_state_suspend_dcnt;
577 		NCSI_FOR_EACH_CHANNEL(np, nc) {
578 			nca.channel = nc->id;
579 			ret = ncsi_xmit_cmd(&nca);
580 			if (ret)
581 				goto error;
582 		}
583 
584 		break;
585 	case ncsi_dev_state_suspend_dcnt:
586 		ndp->pending_req_num = 1;
587 
588 		nca.type = NCSI_PKT_CMD_DCNT;
589 		nca.package = np->id;
590 		nca.channel = nc->id;
591 
592 		nd->state = ncsi_dev_state_suspend_dc;
593 		ret = ncsi_xmit_cmd(&nca);
594 		if (ret)
595 			goto error;
596 
597 		break;
598 	case ncsi_dev_state_suspend_dc:
599 		ndp->pending_req_num = 1;
600 
601 		nca.type = NCSI_PKT_CMD_DC;
602 		nca.package = np->id;
603 		nca.channel = nc->id;
604 		nca.bytes[0] = 1;
605 
606 		nd->state = ncsi_dev_state_suspend_deselect;
607 		ret = ncsi_xmit_cmd(&nca);
608 		if (ret)
609 			goto error;
610 
611 		break;
612 	case ncsi_dev_state_suspend_deselect:
613 		ndp->pending_req_num = 1;
614 
615 		nca.type = NCSI_PKT_CMD_DP;
616 		nca.package = np->id;
617 		nca.channel = NCSI_RESERVED_CHANNEL;
618 
619 		nd->state = ncsi_dev_state_suspend_done;
620 		ret = ncsi_xmit_cmd(&nca);
621 		if (ret)
622 			goto error;
623 
624 		break;
625 	case ncsi_dev_state_suspend_done:
626 		spin_lock_irqsave(&nc->lock, flags);
627 		nc->state = NCSI_CHANNEL_INACTIVE;
628 		spin_unlock_irqrestore(&nc->lock, flags);
629 		ncsi_process_next_channel(ndp);
630 
631 		break;
632 	default:
633 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
634 			    nd->state);
635 	}
636 
637 	return;
638 error:
639 	nd->state = ncsi_dev_state_functional;
640 }
641 
642 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
643 {
644 	struct ncsi_dev *nd = &ndp->ndev;
645 	struct net_device *dev = nd->dev;
646 	struct ncsi_package *np = ndp->active_package;
647 	struct ncsi_channel *nc = ndp->active_channel;
648 	struct ncsi_channel *hot_nc = NULL;
649 	struct ncsi_cmd_arg nca;
650 	unsigned char index;
651 	unsigned long flags;
652 	int ret;
653 
654 	nca.ndp = ndp;
655 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
656 	switch (nd->state) {
657 	case ncsi_dev_state_config:
658 	case ncsi_dev_state_config_sp:
659 		ndp->pending_req_num = 1;
660 
661 		/* Select the specific package */
662 		nca.type = NCSI_PKT_CMD_SP;
663 		if (ndp->flags & NCSI_DEV_HWA)
664 			nca.bytes[0] = 0;
665 		else
666 			nca.bytes[0] = 1;
667 		nca.package = np->id;
668 		nca.channel = NCSI_RESERVED_CHANNEL;
669 		ret = ncsi_xmit_cmd(&nca);
670 		if (ret)
671 			goto error;
672 
673 		nd->state = ncsi_dev_state_config_cis;
674 		break;
675 	case ncsi_dev_state_config_cis:
676 		ndp->pending_req_num = 1;
677 
678 		/* Clear initial state */
679 		nca.type = NCSI_PKT_CMD_CIS;
680 		nca.package = np->id;
681 		nca.channel = nc->id;
682 		ret = ncsi_xmit_cmd(&nca);
683 		if (ret)
684 			goto error;
685 
686 		nd->state = ncsi_dev_state_config_sma;
687 		break;
688 	case ncsi_dev_state_config_sma:
689 	case ncsi_dev_state_config_ebf:
690 #if IS_ENABLED(CONFIG_IPV6)
691 	case ncsi_dev_state_config_egmf:
692 #endif
693 	case ncsi_dev_state_config_ecnt:
694 	case ncsi_dev_state_config_ec:
695 	case ncsi_dev_state_config_ae:
696 	case ncsi_dev_state_config_gls:
697 		ndp->pending_req_num = 1;
698 
699 		nca.package = np->id;
700 		nca.channel = nc->id;
701 
702 		/* Use first entry in unicast filter table. Note that
703 		 * the MAC filter table starts from entry 1 instead of
704 		 * 0.
705 		 */
706 		if (nd->state == ncsi_dev_state_config_sma) {
707 			nca.type = NCSI_PKT_CMD_SMA;
708 			for (index = 0; index < 6; index++)
709 				nca.bytes[index] = dev->dev_addr[index];
710 			nca.bytes[6] = 0x1;
711 			nca.bytes[7] = 0x1;
712 			nd->state = ncsi_dev_state_config_ebf;
713 		} else if (nd->state == ncsi_dev_state_config_ebf) {
714 			nca.type = NCSI_PKT_CMD_EBF;
715 			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
716 			nd->state = ncsi_dev_state_config_ecnt;
717 #if IS_ENABLED(CONFIG_IPV6)
718 			if (ndp->inet6_addr_num > 0 &&
719 			    (nc->caps[NCSI_CAP_GENERIC].cap &
720 			     NCSI_CAP_GENERIC_MC))
721 				nd->state = ncsi_dev_state_config_egmf;
722 			else
723 				nd->state = ncsi_dev_state_config_ecnt;
724 		} else if (nd->state == ncsi_dev_state_config_egmf) {
725 			nca.type = NCSI_PKT_CMD_EGMF;
726 			nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
727 			nd->state = ncsi_dev_state_config_ecnt;
728 #endif /* CONFIG_IPV6 */
729 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
730 			nca.type = NCSI_PKT_CMD_ECNT;
731 			nd->state = ncsi_dev_state_config_ec;
732 		} else if (nd->state == ncsi_dev_state_config_ec) {
733 			/* Enable AEN if it's supported */
734 			nca.type = NCSI_PKT_CMD_EC;
735 			nd->state = ncsi_dev_state_config_ae;
736 			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
737 				nd->state = ncsi_dev_state_config_gls;
738 		} else if (nd->state == ncsi_dev_state_config_ae) {
739 			nca.type = NCSI_PKT_CMD_AE;
740 			nca.bytes[0] = 0;
741 			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
742 			nd->state = ncsi_dev_state_config_gls;
743 		} else if (nd->state == ncsi_dev_state_config_gls) {
744 			nca.type = NCSI_PKT_CMD_GLS;
745 			nd->state = ncsi_dev_state_config_done;
746 		}
747 
748 		ret = ncsi_xmit_cmd(&nca);
749 		if (ret)
750 			goto error;
751 		break;
752 	case ncsi_dev_state_config_done:
753 		spin_lock_irqsave(&nc->lock, flags);
754 		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
755 			hot_nc = nc;
756 			nc->state = NCSI_CHANNEL_ACTIVE;
757 		} else {
758 			hot_nc = NULL;
759 			nc->state = NCSI_CHANNEL_INACTIVE;
760 		}
761 		spin_unlock_irqrestore(&nc->lock, flags);
762 
763 		/* Update the hot channel */
764 		spin_lock_irqsave(&ndp->lock, flags);
765 		ndp->hot_channel = hot_nc;
766 		spin_unlock_irqrestore(&ndp->lock, flags);
767 
768 		ncsi_start_channel_monitor(nc);
769 		ncsi_process_next_channel(ndp);
770 		break;
771 	default:
772 		netdev_warn(dev, "Wrong NCSI state 0x%x in config\n",
773 			    nd->state);
774 	}
775 
776 	return;
777 
778 error:
779 	ncsi_report_link(ndp, true);
780 }
781 
782 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
783 {
784 	struct ncsi_package *np;
785 	struct ncsi_channel *nc, *found, *hot_nc;
786 	struct ncsi_channel_mode *ncm;
787 	unsigned long flags;
788 
789 	spin_lock_irqsave(&ndp->lock, flags);
790 	hot_nc = ndp->hot_channel;
791 	spin_unlock_irqrestore(&ndp->lock, flags);
792 
793 	/* The search is done once an inactive channel with up
794 	 * link is found.
795 	 */
796 	found = NULL;
797 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
798 		NCSI_FOR_EACH_CHANNEL(np, nc) {
799 			spin_lock_irqsave(&nc->lock, flags);
800 
801 			if (!list_empty(&nc->link) ||
802 			    nc->state != NCSI_CHANNEL_INACTIVE) {
803 				spin_unlock_irqrestore(&nc->lock, flags);
804 				continue;
805 			}
806 
807 			if (!found)
808 				found = nc;
809 
810 			if (nc == hot_nc)
811 				found = nc;
812 
813 			ncm = &nc->modes[NCSI_MODE_LINK];
814 			if (ncm->data[2] & 0x1) {
815 				spin_unlock_irqrestore(&nc->lock, flags);
816 				found = nc;
817 				goto out;
818 			}
819 
820 			spin_unlock_irqrestore(&nc->lock, flags);
821 		}
822 	}
823 
824 	if (!found) {
825 		ncsi_report_link(ndp, true);
826 		return -ENODEV;
827 	}
828 
829 out:
830 	spin_lock_irqsave(&ndp->lock, flags);
831 	list_add_tail_rcu(&found->link, &ndp->channel_queue);
832 	spin_unlock_irqrestore(&ndp->lock, flags);
833 
834 	return ncsi_process_next_channel(ndp);
835 }
836 
837 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
838 {
839 	struct ncsi_package *np;
840 	struct ncsi_channel *nc;
841 	unsigned int cap;
842 
843 	/* The hardware arbitration is disabled if any one channel
844 	 * doesn't support explicitly.
845 	 */
846 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
847 		NCSI_FOR_EACH_CHANNEL(np, nc) {
848 			cap = nc->caps[NCSI_CAP_GENERIC].cap;
849 			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
850 			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
851 			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
852 				ndp->flags &= ~NCSI_DEV_HWA;
853 				return false;
854 			}
855 		}
856 	}
857 
858 	ndp->flags |= NCSI_DEV_HWA;
859 	return true;
860 }
861 
862 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
863 {
864 	struct ncsi_package *np;
865 	struct ncsi_channel *nc;
866 	unsigned long flags;
867 
868 	/* Move all available channels to processing queue */
869 	spin_lock_irqsave(&ndp->lock, flags);
870 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
871 		NCSI_FOR_EACH_CHANNEL(np, nc) {
872 			WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
873 				     !list_empty(&nc->link));
874 			ncsi_stop_channel_monitor(nc);
875 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
876 		}
877 	}
878 	spin_unlock_irqrestore(&ndp->lock, flags);
879 
880 	/* We can have no channels in extremely case */
881 	if (list_empty(&ndp->channel_queue)) {
882 		ncsi_report_link(ndp, false);
883 		return -ENOENT;
884 	}
885 
886 	return ncsi_process_next_channel(ndp);
887 }
888 
889 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
890 {
891 	struct ncsi_dev *nd = &ndp->ndev;
892 	struct ncsi_package *np;
893 	struct ncsi_channel *nc;
894 	struct ncsi_cmd_arg nca;
895 	unsigned char index;
896 	int ret;
897 
898 	nca.ndp = ndp;
899 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
900 	switch (nd->state) {
901 	case ncsi_dev_state_probe:
902 		nd->state = ncsi_dev_state_probe_deselect;
903 		/* Fall through */
904 	case ncsi_dev_state_probe_deselect:
905 		ndp->pending_req_num = 8;
906 
907 		/* Deselect all possible packages */
908 		nca.type = NCSI_PKT_CMD_DP;
909 		nca.channel = NCSI_RESERVED_CHANNEL;
910 		for (index = 0; index < 8; index++) {
911 			nca.package = index;
912 			ret = ncsi_xmit_cmd(&nca);
913 			if (ret)
914 				goto error;
915 		}
916 
917 		nd->state = ncsi_dev_state_probe_package;
918 		break;
919 	case ncsi_dev_state_probe_package:
920 		ndp->pending_req_num = 16;
921 
922 		/* Select all possible packages */
923 		nca.type = NCSI_PKT_CMD_SP;
924 		nca.bytes[0] = 1;
925 		nca.channel = NCSI_RESERVED_CHANNEL;
926 		for (index = 0; index < 8; index++) {
927 			nca.package = index;
928 			ret = ncsi_xmit_cmd(&nca);
929 			if (ret)
930 				goto error;
931 		}
932 
933 		/* Disable all possible packages */
934 		nca.type = NCSI_PKT_CMD_DP;
935 		for (index = 0; index < 8; index++) {
936 			nca.package = index;
937 			ret = ncsi_xmit_cmd(&nca);
938 			if (ret)
939 				goto error;
940 		}
941 
942 		nd->state = ncsi_dev_state_probe_channel;
943 		break;
944 	case ncsi_dev_state_probe_channel:
945 		if (!ndp->active_package)
946 			ndp->active_package = list_first_or_null_rcu(
947 				&ndp->packages, struct ncsi_package, node);
948 		else if (list_is_last(&ndp->active_package->node,
949 				      &ndp->packages))
950 			ndp->active_package = NULL;
951 		else
952 			ndp->active_package = list_next_entry(
953 				ndp->active_package, node);
954 
955 		/* All available packages and channels are enumerated. The
956 		 * enumeration happens for once when the NCSI interface is
957 		 * started. So we need continue to start the interface after
958 		 * the enumeration.
959 		 *
960 		 * We have to choose an active channel before configuring it.
961 		 * Note that we possibly don't have active channel in extreme
962 		 * situation.
963 		 */
964 		if (!ndp->active_package) {
965 			ndp->flags |= NCSI_DEV_PROBED;
966 			if (ncsi_check_hwa(ndp))
967 				ncsi_enable_hwa(ndp);
968 			else
969 				ncsi_choose_active_channel(ndp);
970 			return;
971 		}
972 
973 		/* Select the active package */
974 		ndp->pending_req_num = 1;
975 		nca.type = NCSI_PKT_CMD_SP;
976 		nca.bytes[0] = 1;
977 		nca.package = ndp->active_package->id;
978 		nca.channel = NCSI_RESERVED_CHANNEL;
979 		ret = ncsi_xmit_cmd(&nca);
980 		if (ret)
981 			goto error;
982 
983 		nd->state = ncsi_dev_state_probe_cis;
984 		break;
985 	case ncsi_dev_state_probe_cis:
986 		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
987 
988 		/* Clear initial state */
989 		nca.type = NCSI_PKT_CMD_CIS;
990 		nca.package = ndp->active_package->id;
991 		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
992 			nca.channel = index;
993 			ret = ncsi_xmit_cmd(&nca);
994 			if (ret)
995 				goto error;
996 		}
997 
998 		nd->state = ncsi_dev_state_probe_gvi;
999 		break;
1000 	case ncsi_dev_state_probe_gvi:
1001 	case ncsi_dev_state_probe_gc:
1002 	case ncsi_dev_state_probe_gls:
1003 		np = ndp->active_package;
1004 		ndp->pending_req_num = np->channel_num;
1005 
1006 		/* Retrieve version, capability or link status */
1007 		if (nd->state == ncsi_dev_state_probe_gvi)
1008 			nca.type = NCSI_PKT_CMD_GVI;
1009 		else if (nd->state == ncsi_dev_state_probe_gc)
1010 			nca.type = NCSI_PKT_CMD_GC;
1011 		else
1012 			nca.type = NCSI_PKT_CMD_GLS;
1013 
1014 		nca.package = np->id;
1015 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1016 			nca.channel = nc->id;
1017 			ret = ncsi_xmit_cmd(&nca);
1018 			if (ret)
1019 				goto error;
1020 		}
1021 
1022 		if (nd->state == ncsi_dev_state_probe_gvi)
1023 			nd->state = ncsi_dev_state_probe_gc;
1024 		else if (nd->state == ncsi_dev_state_probe_gc)
1025 			nd->state = ncsi_dev_state_probe_gls;
1026 		else
1027 			nd->state = ncsi_dev_state_probe_dp;
1028 		break;
1029 	case ncsi_dev_state_probe_dp:
1030 		ndp->pending_req_num = 1;
1031 
1032 		/* Deselect the active package */
1033 		nca.type = NCSI_PKT_CMD_DP;
1034 		nca.package = ndp->active_package->id;
1035 		nca.channel = NCSI_RESERVED_CHANNEL;
1036 		ret = ncsi_xmit_cmd(&nca);
1037 		if (ret)
1038 			goto error;
1039 
1040 		/* Scan channels in next package */
1041 		nd->state = ncsi_dev_state_probe_channel;
1042 		break;
1043 	default:
1044 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1045 			    nd->state);
1046 	}
1047 
1048 	return;
1049 error:
1050 	ncsi_report_link(ndp, true);
1051 }
1052 
1053 static void ncsi_dev_work(struct work_struct *work)
1054 {
1055 	struct ncsi_dev_priv *ndp = container_of(work,
1056 			struct ncsi_dev_priv, work);
1057 	struct ncsi_dev *nd = &ndp->ndev;
1058 
1059 	switch (nd->state & ncsi_dev_state_major) {
1060 	case ncsi_dev_state_probe:
1061 		ncsi_probe_channel(ndp);
1062 		break;
1063 	case ncsi_dev_state_suspend:
1064 		ncsi_suspend_channel(ndp);
1065 		break;
1066 	case ncsi_dev_state_config:
1067 		ncsi_configure_channel(ndp);
1068 		break;
1069 	default:
1070 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1071 			    nd->state);
1072 	}
1073 }
1074 
1075 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1076 {
1077 	struct ncsi_channel *nc;
1078 	int old_state;
1079 	unsigned long flags;
1080 
1081 	spin_lock_irqsave(&ndp->lock, flags);
1082 	nc = list_first_or_null_rcu(&ndp->channel_queue,
1083 				    struct ncsi_channel, link);
1084 	if (!nc) {
1085 		spin_unlock_irqrestore(&ndp->lock, flags);
1086 		goto out;
1087 	}
1088 
1089 	list_del_init(&nc->link);
1090 	spin_unlock_irqrestore(&ndp->lock, flags);
1091 
1092 	spin_lock_irqsave(&nc->lock, flags);
1093 	old_state = nc->state;
1094 	nc->state = NCSI_CHANNEL_INVISIBLE;
1095 	spin_unlock_irqrestore(&nc->lock, flags);
1096 
1097 	ndp->active_channel = nc;
1098 	ndp->active_package = nc->package;
1099 
1100 	switch (old_state) {
1101 	case NCSI_CHANNEL_INACTIVE:
1102 		ndp->ndev.state = ncsi_dev_state_config;
1103 		ncsi_configure_channel(ndp);
1104 		break;
1105 	case NCSI_CHANNEL_ACTIVE:
1106 		ndp->ndev.state = ncsi_dev_state_suspend;
1107 		ncsi_suspend_channel(ndp);
1108 		break;
1109 	default:
1110 		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1111 			   old_state, nc->package->id, nc->id);
1112 		ncsi_report_link(ndp, false);
1113 		return -EINVAL;
1114 	}
1115 
1116 	return 0;
1117 
1118 out:
1119 	ndp->active_channel = NULL;
1120 	ndp->active_package = NULL;
1121 	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1122 		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1123 		return ncsi_choose_active_channel(ndp);
1124 	}
1125 
1126 	ncsi_report_link(ndp, false);
1127 	return -ENODEV;
1128 }
1129 
1130 #if IS_ENABLED(CONFIG_IPV6)
1131 static int ncsi_inet6addr_event(struct notifier_block *this,
1132 				unsigned long event, void *data)
1133 {
1134 	struct inet6_ifaddr *ifa = data;
1135 	struct net_device *dev = ifa->idev->dev;
1136 	struct ncsi_dev *nd = ncsi_find_dev(dev);
1137 	struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1138 	struct ncsi_package *np;
1139 	struct ncsi_channel *nc;
1140 	struct ncsi_cmd_arg nca;
1141 	bool action;
1142 	int ret;
1143 
1144 	if (!ndp || (ipv6_addr_type(&ifa->addr) &
1145 	    (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1146 		return NOTIFY_OK;
1147 
1148 	switch (event) {
1149 	case NETDEV_UP:
1150 		action = (++ndp->inet6_addr_num) == 1;
1151 		nca.type = NCSI_PKT_CMD_EGMF;
1152 		break;
1153 	case NETDEV_DOWN:
1154 		action = (--ndp->inet6_addr_num == 0);
1155 		nca.type = NCSI_PKT_CMD_DGMF;
1156 		break;
1157 	default:
1158 		return NOTIFY_OK;
1159 	}
1160 
1161 	/* We might not have active channel or packages. The IPv6
1162 	 * required multicast will be enabled when active channel
1163 	 * or packages are chosen.
1164 	 */
1165 	np = ndp->active_package;
1166 	nc = ndp->active_channel;
1167 	if (!action || !np || !nc)
1168 		return NOTIFY_OK;
1169 
1170 	/* We needn't enable or disable it if the function isn't supported */
1171 	if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1172 		return NOTIFY_OK;
1173 
1174 	nca.ndp = ndp;
1175 	nca.req_flags = 0;
1176 	nca.package = np->id;
1177 	nca.channel = nc->id;
1178 	nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1179 	ret = ncsi_xmit_cmd(&nca);
1180 	if (ret) {
1181 		netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1182 			    (event == NETDEV_UP) ? "enable" : "disable", ret);
1183 		return NOTIFY_DONE;
1184 	}
1185 
1186 	return NOTIFY_OK;
1187 }
1188 
1189 static struct notifier_block ncsi_inet6addr_notifier = {
1190 	.notifier_call = ncsi_inet6addr_event,
1191 };
1192 #endif /* CONFIG_IPV6 */
1193 
1194 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1195 				   void (*handler)(struct ncsi_dev *ndev))
1196 {
1197 	struct ncsi_dev_priv *ndp;
1198 	struct ncsi_dev *nd;
1199 	unsigned long flags;
1200 	int i;
1201 
1202 	/* Check if the device has been registered or not */
1203 	nd = ncsi_find_dev(dev);
1204 	if (nd)
1205 		return nd;
1206 
1207 	/* Create NCSI device */
1208 	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1209 	if (!ndp)
1210 		return NULL;
1211 
1212 	nd = &ndp->ndev;
1213 	nd->state = ncsi_dev_state_registered;
1214 	nd->dev = dev;
1215 	nd->handler = handler;
1216 	ndp->pending_req_num = 0;
1217 	INIT_LIST_HEAD(&ndp->channel_queue);
1218 	INIT_WORK(&ndp->work, ncsi_dev_work);
1219 
1220 	/* Initialize private NCSI device */
1221 	spin_lock_init(&ndp->lock);
1222 	INIT_LIST_HEAD(&ndp->packages);
1223 	ndp->request_id = NCSI_REQ_START_IDX;
1224 	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1225 		ndp->requests[i].id = i;
1226 		ndp->requests[i].ndp = ndp;
1227 		setup_timer(&ndp->requests[i].timer,
1228 			    ncsi_request_timeout,
1229 			    (unsigned long)&ndp->requests[i]);
1230 	}
1231 
1232 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1233 #if IS_ENABLED(CONFIG_IPV6)
1234 	ndp->inet6_addr_num = 0;
1235 	if (list_empty(&ncsi_dev_list))
1236 		register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1237 #endif
1238 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1239 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1240 
1241 	/* Register NCSI packet Rx handler */
1242 	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1243 	ndp->ptype.func = ncsi_rcv_rsp;
1244 	ndp->ptype.dev = dev;
1245 	dev_add_pack(&ndp->ptype);
1246 
1247 	return nd;
1248 }
1249 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1250 
1251 int ncsi_start_dev(struct ncsi_dev *nd)
1252 {
1253 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1254 	int ret;
1255 
1256 	if (nd->state != ncsi_dev_state_registered &&
1257 	    nd->state != ncsi_dev_state_functional)
1258 		return -ENOTTY;
1259 
1260 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1261 		nd->state = ncsi_dev_state_probe;
1262 		schedule_work(&ndp->work);
1263 		return 0;
1264 	}
1265 
1266 	if (ndp->flags & NCSI_DEV_HWA)
1267 		ret = ncsi_enable_hwa(ndp);
1268 	else
1269 		ret = ncsi_choose_active_channel(ndp);
1270 
1271 	return ret;
1272 }
1273 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1274 
1275 void ncsi_stop_dev(struct ncsi_dev *nd)
1276 {
1277 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1278 	struct ncsi_package *np;
1279 	struct ncsi_channel *nc;
1280 	bool chained;
1281 	int old_state;
1282 	unsigned long flags;
1283 
1284 	/* Stop the channel monitor and reset channel's state */
1285 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1286 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1287 			ncsi_stop_channel_monitor(nc);
1288 
1289 			spin_lock_irqsave(&nc->lock, flags);
1290 			chained = !list_empty(&nc->link);
1291 			old_state = nc->state;
1292 			nc->state = NCSI_CHANNEL_INACTIVE;
1293 			spin_unlock_irqrestore(&nc->lock, flags);
1294 
1295 			WARN_ON_ONCE(chained ||
1296 				     old_state == NCSI_CHANNEL_INVISIBLE);
1297 		}
1298 	}
1299 
1300 	ncsi_report_link(ndp, true);
1301 }
1302 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1303 
1304 void ncsi_unregister_dev(struct ncsi_dev *nd)
1305 {
1306 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1307 	struct ncsi_package *np, *tmp;
1308 	unsigned long flags;
1309 
1310 	dev_remove_pack(&ndp->ptype);
1311 
1312 	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1313 		ncsi_remove_package(np);
1314 
1315 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1316 	list_del_rcu(&ndp->node);
1317 #if IS_ENABLED(CONFIG_IPV6)
1318 	if (list_empty(&ncsi_dev_list))
1319 		unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1320 #endif
1321 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1322 
1323 	kfree(ndp);
1324 }
1325 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
1326