xref: /openbmc/linux/net/ncsi/ncsi-manage.c (revision b593bce5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright Gavin Shan, IBM Corporation 2016.
4  */
5 
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 
12 #include <net/ncsi.h>
13 #include <net/net_namespace.h>
14 #include <net/sock.h>
15 #include <net/addrconf.h>
16 #include <net/ipv6.h>
17 #include <net/if_inet6.h>
18 #include <net/genetlink.h>
19 
20 #include "internal.h"
21 #include "ncsi-pkt.h"
22 #include "ncsi-netlink.h"
23 
24 LIST_HEAD(ncsi_dev_list);
25 DEFINE_SPINLOCK(ncsi_dev_lock);
26 
27 bool ncsi_channel_has_link(struct ncsi_channel *channel)
28 {
29 	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
30 }
31 
32 bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
33 			  struct ncsi_channel *channel)
34 {
35 	struct ncsi_package *np;
36 	struct ncsi_channel *nc;
37 
38 	NCSI_FOR_EACH_PACKAGE(ndp, np)
39 		NCSI_FOR_EACH_CHANNEL(np, nc) {
40 			if (nc == channel)
41 				continue;
42 			if (nc->state == NCSI_CHANNEL_ACTIVE &&
43 			    ncsi_channel_has_link(nc))
44 				return false;
45 		}
46 
47 	return true;
48 }
49 
50 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
51 {
52 	struct ncsi_dev *nd = &ndp->ndev;
53 	struct ncsi_package *np;
54 	struct ncsi_channel *nc;
55 	unsigned long flags;
56 
57 	nd->state = ncsi_dev_state_functional;
58 	if (force_down) {
59 		nd->link_up = 0;
60 		goto report;
61 	}
62 
63 	nd->link_up = 0;
64 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
65 		NCSI_FOR_EACH_CHANNEL(np, nc) {
66 			spin_lock_irqsave(&nc->lock, flags);
67 
68 			if (!list_empty(&nc->link) ||
69 			    nc->state != NCSI_CHANNEL_ACTIVE) {
70 				spin_unlock_irqrestore(&nc->lock, flags);
71 				continue;
72 			}
73 
74 			if (ncsi_channel_has_link(nc)) {
75 				spin_unlock_irqrestore(&nc->lock, flags);
76 				nd->link_up = 1;
77 				goto report;
78 			}
79 
80 			spin_unlock_irqrestore(&nc->lock, flags);
81 		}
82 	}
83 
84 report:
85 	nd->handler(nd);
86 }
87 
88 static void ncsi_channel_monitor(struct timer_list *t)
89 {
90 	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
91 	struct ncsi_package *np = nc->package;
92 	struct ncsi_dev_priv *ndp = np->ndp;
93 	struct ncsi_channel_mode *ncm;
94 	struct ncsi_cmd_arg nca;
95 	bool enabled, chained;
96 	unsigned int monitor_state;
97 	unsigned long flags;
98 	int state, ret;
99 
100 	spin_lock_irqsave(&nc->lock, flags);
101 	state = nc->state;
102 	chained = !list_empty(&nc->link);
103 	enabled = nc->monitor.enabled;
104 	monitor_state = nc->monitor.state;
105 	spin_unlock_irqrestore(&nc->lock, flags);
106 
107 	if (!enabled || chained) {
108 		ncsi_stop_channel_monitor(nc);
109 		return;
110 	}
111 	if (state != NCSI_CHANNEL_INACTIVE &&
112 	    state != NCSI_CHANNEL_ACTIVE) {
113 		ncsi_stop_channel_monitor(nc);
114 		return;
115 	}
116 
117 	switch (monitor_state) {
118 	case NCSI_CHANNEL_MONITOR_START:
119 	case NCSI_CHANNEL_MONITOR_RETRY:
120 		nca.ndp = ndp;
121 		nca.package = np->id;
122 		nca.channel = nc->id;
123 		nca.type = NCSI_PKT_CMD_GLS;
124 		nca.req_flags = 0;
125 		ret = ncsi_xmit_cmd(&nca);
126 		if (ret)
127 			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
128 				   ret);
129 		break;
130 	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
131 		break;
132 	default:
133 		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
134 			   nc->id);
135 		ncsi_report_link(ndp, true);
136 		ndp->flags |= NCSI_DEV_RESHUFFLE;
137 
138 		ncsi_stop_channel_monitor(nc);
139 
140 		ncm = &nc->modes[NCSI_MODE_LINK];
141 		spin_lock_irqsave(&nc->lock, flags);
142 		nc->state = NCSI_CHANNEL_INVISIBLE;
143 		ncm->data[2] &= ~0x1;
144 		spin_unlock_irqrestore(&nc->lock, flags);
145 
146 		spin_lock_irqsave(&ndp->lock, flags);
147 		nc->state = NCSI_CHANNEL_ACTIVE;
148 		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
149 		spin_unlock_irqrestore(&ndp->lock, flags);
150 		ncsi_process_next_channel(ndp);
151 		return;
152 	}
153 
154 	spin_lock_irqsave(&nc->lock, flags);
155 	nc->monitor.state++;
156 	spin_unlock_irqrestore(&nc->lock, flags);
157 	mod_timer(&nc->monitor.timer, jiffies + HZ);
158 }
159 
160 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
161 {
162 	unsigned long flags;
163 
164 	spin_lock_irqsave(&nc->lock, flags);
165 	WARN_ON_ONCE(nc->monitor.enabled);
166 	nc->monitor.enabled = true;
167 	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
168 	spin_unlock_irqrestore(&nc->lock, flags);
169 
170 	mod_timer(&nc->monitor.timer, jiffies + HZ);
171 }
172 
173 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
174 {
175 	unsigned long flags;
176 
177 	spin_lock_irqsave(&nc->lock, flags);
178 	if (!nc->monitor.enabled) {
179 		spin_unlock_irqrestore(&nc->lock, flags);
180 		return;
181 	}
182 	nc->monitor.enabled = false;
183 	spin_unlock_irqrestore(&nc->lock, flags);
184 
185 	del_timer_sync(&nc->monitor.timer);
186 }
187 
188 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
189 				       unsigned char id)
190 {
191 	struct ncsi_channel *nc;
192 
193 	NCSI_FOR_EACH_CHANNEL(np, nc) {
194 		if (nc->id == id)
195 			return nc;
196 	}
197 
198 	return NULL;
199 }
200 
201 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
202 {
203 	struct ncsi_channel *nc, *tmp;
204 	int index;
205 	unsigned long flags;
206 
207 	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
208 	if (!nc)
209 		return NULL;
210 
211 	nc->id = id;
212 	nc->package = np;
213 	nc->state = NCSI_CHANNEL_INACTIVE;
214 	nc->monitor.enabled = false;
215 	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
216 	spin_lock_init(&nc->lock);
217 	INIT_LIST_HEAD(&nc->link);
218 	for (index = 0; index < NCSI_CAP_MAX; index++)
219 		nc->caps[index].index = index;
220 	for (index = 0; index < NCSI_MODE_MAX; index++)
221 		nc->modes[index].index = index;
222 
223 	spin_lock_irqsave(&np->lock, flags);
224 	tmp = ncsi_find_channel(np, id);
225 	if (tmp) {
226 		spin_unlock_irqrestore(&np->lock, flags);
227 		kfree(nc);
228 		return tmp;
229 	}
230 
231 	list_add_tail_rcu(&nc->node, &np->channels);
232 	np->channel_num++;
233 	spin_unlock_irqrestore(&np->lock, flags);
234 
235 	return nc;
236 }
237 
238 static void ncsi_remove_channel(struct ncsi_channel *nc)
239 {
240 	struct ncsi_package *np = nc->package;
241 	unsigned long flags;
242 
243 	spin_lock_irqsave(&nc->lock, flags);
244 
245 	/* Release filters */
246 	kfree(nc->mac_filter.addrs);
247 	kfree(nc->vlan_filter.vids);
248 
249 	nc->state = NCSI_CHANNEL_INACTIVE;
250 	spin_unlock_irqrestore(&nc->lock, flags);
251 	ncsi_stop_channel_monitor(nc);
252 
253 	/* Remove and free channel */
254 	spin_lock_irqsave(&np->lock, flags);
255 	list_del_rcu(&nc->node);
256 	np->channel_num--;
257 	spin_unlock_irqrestore(&np->lock, flags);
258 
259 	kfree(nc);
260 }
261 
262 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
263 				       unsigned char id)
264 {
265 	struct ncsi_package *np;
266 
267 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
268 		if (np->id == id)
269 			return np;
270 	}
271 
272 	return NULL;
273 }
274 
275 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
276 				      unsigned char id)
277 {
278 	struct ncsi_package *np, *tmp;
279 	unsigned long flags;
280 
281 	np = kzalloc(sizeof(*np), GFP_ATOMIC);
282 	if (!np)
283 		return NULL;
284 
285 	np->id = id;
286 	np->ndp = ndp;
287 	spin_lock_init(&np->lock);
288 	INIT_LIST_HEAD(&np->channels);
289 	np->channel_whitelist = UINT_MAX;
290 
291 	spin_lock_irqsave(&ndp->lock, flags);
292 	tmp = ncsi_find_package(ndp, id);
293 	if (tmp) {
294 		spin_unlock_irqrestore(&ndp->lock, flags);
295 		kfree(np);
296 		return tmp;
297 	}
298 
299 	list_add_tail_rcu(&np->node, &ndp->packages);
300 	ndp->package_num++;
301 	spin_unlock_irqrestore(&ndp->lock, flags);
302 
303 	return np;
304 }
305 
306 void ncsi_remove_package(struct ncsi_package *np)
307 {
308 	struct ncsi_dev_priv *ndp = np->ndp;
309 	struct ncsi_channel *nc, *tmp;
310 	unsigned long flags;
311 
312 	/* Release all child channels */
313 	list_for_each_entry_safe(nc, tmp, &np->channels, node)
314 		ncsi_remove_channel(nc);
315 
316 	/* Remove and free package */
317 	spin_lock_irqsave(&ndp->lock, flags);
318 	list_del_rcu(&np->node);
319 	ndp->package_num--;
320 	spin_unlock_irqrestore(&ndp->lock, flags);
321 
322 	kfree(np);
323 }
324 
325 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
326 				   unsigned char id,
327 				   struct ncsi_package **np,
328 				   struct ncsi_channel **nc)
329 {
330 	struct ncsi_package *p;
331 	struct ncsi_channel *c;
332 
333 	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
334 	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
335 
336 	if (np)
337 		*np = p;
338 	if (nc)
339 		*nc = c;
340 }
341 
342 /* For two consecutive NCSI commands, the packet IDs shouldn't
343  * be same. Otherwise, the bogus response might be replied. So
344  * the available IDs are allocated in round-robin fashion.
345  */
346 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
347 					unsigned int req_flags)
348 {
349 	struct ncsi_request *nr = NULL;
350 	int i, limit = ARRAY_SIZE(ndp->requests);
351 	unsigned long flags;
352 
353 	/* Check if there is one available request until the ceiling */
354 	spin_lock_irqsave(&ndp->lock, flags);
355 	for (i = ndp->request_id; i < limit; i++) {
356 		if (ndp->requests[i].used)
357 			continue;
358 
359 		nr = &ndp->requests[i];
360 		nr->used = true;
361 		nr->flags = req_flags;
362 		ndp->request_id = i + 1;
363 		goto found;
364 	}
365 
366 	/* Fail back to check from the starting cursor */
367 	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
368 		if (ndp->requests[i].used)
369 			continue;
370 
371 		nr = &ndp->requests[i];
372 		nr->used = true;
373 		nr->flags = req_flags;
374 		ndp->request_id = i + 1;
375 		goto found;
376 	}
377 
378 found:
379 	spin_unlock_irqrestore(&ndp->lock, flags);
380 	return nr;
381 }
382 
383 void ncsi_free_request(struct ncsi_request *nr)
384 {
385 	struct ncsi_dev_priv *ndp = nr->ndp;
386 	struct sk_buff *cmd, *rsp;
387 	unsigned long flags;
388 	bool driven;
389 
390 	if (nr->enabled) {
391 		nr->enabled = false;
392 		del_timer_sync(&nr->timer);
393 	}
394 
395 	spin_lock_irqsave(&ndp->lock, flags);
396 	cmd = nr->cmd;
397 	rsp = nr->rsp;
398 	nr->cmd = NULL;
399 	nr->rsp = NULL;
400 	nr->used = false;
401 	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
402 	spin_unlock_irqrestore(&ndp->lock, flags);
403 
404 	if (driven && cmd && --ndp->pending_req_num == 0)
405 		schedule_work(&ndp->work);
406 
407 	/* Release command and response */
408 	consume_skb(cmd);
409 	consume_skb(rsp);
410 }
411 
412 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
413 {
414 	struct ncsi_dev_priv *ndp;
415 
416 	NCSI_FOR_EACH_DEV(ndp) {
417 		if (ndp->ndev.dev == dev)
418 			return &ndp->ndev;
419 	}
420 
421 	return NULL;
422 }
423 
424 static void ncsi_request_timeout(struct timer_list *t)
425 {
426 	struct ncsi_request *nr = from_timer(nr, t, timer);
427 	struct ncsi_dev_priv *ndp = nr->ndp;
428 	struct ncsi_cmd_pkt *cmd;
429 	struct ncsi_package *np;
430 	struct ncsi_channel *nc;
431 	unsigned long flags;
432 
433 	/* If the request already had associated response,
434 	 * let the response handler to release it.
435 	 */
436 	spin_lock_irqsave(&ndp->lock, flags);
437 	nr->enabled = false;
438 	if (nr->rsp || !nr->cmd) {
439 		spin_unlock_irqrestore(&ndp->lock, flags);
440 		return;
441 	}
442 	spin_unlock_irqrestore(&ndp->lock, flags);
443 
444 	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
445 		if (nr->cmd) {
446 			/* Find the package */
447 			cmd = (struct ncsi_cmd_pkt *)
448 			      skb_network_header(nr->cmd);
449 			ncsi_find_package_and_channel(ndp,
450 						      cmd->cmd.common.channel,
451 						      &np, &nc);
452 			ncsi_send_netlink_timeout(nr, np, nc);
453 		}
454 	}
455 
456 	/* Release the request */
457 	ncsi_free_request(nr);
458 }
459 
460 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
461 {
462 	struct ncsi_dev *nd = &ndp->ndev;
463 	struct ncsi_package *np;
464 	struct ncsi_channel *nc, *tmp;
465 	struct ncsi_cmd_arg nca;
466 	unsigned long flags;
467 	int ret;
468 
469 	np = ndp->active_package;
470 	nc = ndp->active_channel;
471 	nca.ndp = ndp;
472 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
473 	switch (nd->state) {
474 	case ncsi_dev_state_suspend:
475 		nd->state = ncsi_dev_state_suspend_select;
476 		/* Fall through */
477 	case ncsi_dev_state_suspend_select:
478 		ndp->pending_req_num = 1;
479 
480 		nca.type = NCSI_PKT_CMD_SP;
481 		nca.package = np->id;
482 		nca.channel = NCSI_RESERVED_CHANNEL;
483 		if (ndp->flags & NCSI_DEV_HWA)
484 			nca.bytes[0] = 0;
485 		else
486 			nca.bytes[0] = 1;
487 
488 		/* To retrieve the last link states of channels in current
489 		 * package when current active channel needs fail over to
490 		 * another one. It means we will possibly select another
491 		 * channel as next active one. The link states of channels
492 		 * are most important factor of the selection. So we need
493 		 * accurate link states. Unfortunately, the link states on
494 		 * inactive channels can't be updated with LSC AEN in time.
495 		 */
496 		if (ndp->flags & NCSI_DEV_RESHUFFLE)
497 			nd->state = ncsi_dev_state_suspend_gls;
498 		else
499 			nd->state = ncsi_dev_state_suspend_dcnt;
500 		ret = ncsi_xmit_cmd(&nca);
501 		if (ret)
502 			goto error;
503 
504 		break;
505 	case ncsi_dev_state_suspend_gls:
506 		ndp->pending_req_num = np->channel_num;
507 
508 		nca.type = NCSI_PKT_CMD_GLS;
509 		nca.package = np->id;
510 
511 		nd->state = ncsi_dev_state_suspend_dcnt;
512 		NCSI_FOR_EACH_CHANNEL(np, nc) {
513 			nca.channel = nc->id;
514 			ret = ncsi_xmit_cmd(&nca);
515 			if (ret)
516 				goto error;
517 		}
518 
519 		break;
520 	case ncsi_dev_state_suspend_dcnt:
521 		ndp->pending_req_num = 1;
522 
523 		nca.type = NCSI_PKT_CMD_DCNT;
524 		nca.package = np->id;
525 		nca.channel = nc->id;
526 
527 		nd->state = ncsi_dev_state_suspend_dc;
528 		ret = ncsi_xmit_cmd(&nca);
529 		if (ret)
530 			goto error;
531 
532 		break;
533 	case ncsi_dev_state_suspend_dc:
534 		ndp->pending_req_num = 1;
535 
536 		nca.type = NCSI_PKT_CMD_DC;
537 		nca.package = np->id;
538 		nca.channel = nc->id;
539 		nca.bytes[0] = 1;
540 
541 		nd->state = ncsi_dev_state_suspend_deselect;
542 		ret = ncsi_xmit_cmd(&nca);
543 		if (ret)
544 			goto error;
545 
546 		NCSI_FOR_EACH_CHANNEL(np, tmp) {
547 			/* If there is another channel active on this package
548 			 * do not deselect the package.
549 			 */
550 			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
551 				nd->state = ncsi_dev_state_suspend_done;
552 				break;
553 			}
554 		}
555 		break;
556 	case ncsi_dev_state_suspend_deselect:
557 		ndp->pending_req_num = 1;
558 
559 		nca.type = NCSI_PKT_CMD_DP;
560 		nca.package = np->id;
561 		nca.channel = NCSI_RESERVED_CHANNEL;
562 
563 		nd->state = ncsi_dev_state_suspend_done;
564 		ret = ncsi_xmit_cmd(&nca);
565 		if (ret)
566 			goto error;
567 
568 		break;
569 	case ncsi_dev_state_suspend_done:
570 		spin_lock_irqsave(&nc->lock, flags);
571 		nc->state = NCSI_CHANNEL_INACTIVE;
572 		spin_unlock_irqrestore(&nc->lock, flags);
573 		if (ndp->flags & NCSI_DEV_RESET)
574 			ncsi_reset_dev(nd);
575 		else
576 			ncsi_process_next_channel(ndp);
577 		break;
578 	default:
579 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
580 			    nd->state);
581 	}
582 
583 	return;
584 error:
585 	nd->state = ncsi_dev_state_functional;
586 }
587 
588 /* Check the VLAN filter bitmap for a set filter, and construct a
589  * "Set VLAN Filter - Disable" packet if found.
590  */
591 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
592 			 struct ncsi_cmd_arg *nca)
593 {
594 	struct ncsi_channel_vlan_filter *ncf;
595 	unsigned long flags;
596 	void *bitmap;
597 	int index;
598 	u16 vid;
599 
600 	ncf = &nc->vlan_filter;
601 	bitmap = &ncf->bitmap;
602 
603 	spin_lock_irqsave(&nc->lock, flags);
604 	index = find_next_bit(bitmap, ncf->n_vids, 0);
605 	if (index >= ncf->n_vids) {
606 		spin_unlock_irqrestore(&nc->lock, flags);
607 		return -1;
608 	}
609 	vid = ncf->vids[index];
610 
611 	clear_bit(index, bitmap);
612 	ncf->vids[index] = 0;
613 	spin_unlock_irqrestore(&nc->lock, flags);
614 
615 	nca->type = NCSI_PKT_CMD_SVF;
616 	nca->words[1] = vid;
617 	/* HW filter index starts at 1 */
618 	nca->bytes[6] = index + 1;
619 	nca->bytes[7] = 0x00;
620 	return 0;
621 }
622 
623 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
624  * packet.
625  */
626 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
627 		       struct ncsi_cmd_arg *nca)
628 {
629 	struct ncsi_channel_vlan_filter *ncf;
630 	struct vlan_vid *vlan = NULL;
631 	unsigned long flags;
632 	int i, index;
633 	void *bitmap;
634 	u16 vid;
635 
636 	if (list_empty(&ndp->vlan_vids))
637 		return -1;
638 
639 	ncf = &nc->vlan_filter;
640 	bitmap = &ncf->bitmap;
641 
642 	spin_lock_irqsave(&nc->lock, flags);
643 
644 	rcu_read_lock();
645 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
646 		vid = vlan->vid;
647 		for (i = 0; i < ncf->n_vids; i++)
648 			if (ncf->vids[i] == vid) {
649 				vid = 0;
650 				break;
651 			}
652 		if (vid)
653 			break;
654 	}
655 	rcu_read_unlock();
656 
657 	if (!vid) {
658 		/* No VLAN ID is not set */
659 		spin_unlock_irqrestore(&nc->lock, flags);
660 		return -1;
661 	}
662 
663 	index = find_next_zero_bit(bitmap, ncf->n_vids, 0);
664 	if (index < 0 || index >= ncf->n_vids) {
665 		netdev_err(ndp->ndev.dev,
666 			   "Channel %u already has all VLAN filters set\n",
667 			   nc->id);
668 		spin_unlock_irqrestore(&nc->lock, flags);
669 		return -1;
670 	}
671 
672 	ncf->vids[index] = vid;
673 	set_bit(index, bitmap);
674 	spin_unlock_irqrestore(&nc->lock, flags);
675 
676 	nca->type = NCSI_PKT_CMD_SVF;
677 	nca->words[1] = vid;
678 	/* HW filter index starts at 1 */
679 	nca->bytes[6] = index + 1;
680 	nca->bytes[7] = 0x01;
681 
682 	return 0;
683 }
684 
685 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
686 
687 /* NCSI OEM Command APIs */
688 static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
689 {
690 	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
691 	int ret = 0;
692 
693 	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
694 
695 	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
696 	*(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
697 	data[5] = NCSI_OEM_BCM_CMD_GMA;
698 
699 	nca->data = data;
700 
701 	ret = ncsi_xmit_cmd(nca);
702 	if (ret)
703 		netdev_err(nca->ndp->ndev.dev,
704 			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
705 			   nca->type);
706 	return ret;
707 }
708 
709 static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
710 {
711 	union {
712 		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
713 		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
714 	} u;
715 	int ret = 0;
716 
717 	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
718 
719 	memset(&u, 0, sizeof(u));
720 	u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
721 	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
722 	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
723 
724 	nca->data = u.data_u8;
725 
726 	ret = ncsi_xmit_cmd(nca);
727 	if (ret)
728 		netdev_err(nca->ndp->ndev.dev,
729 			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
730 			   nca->type);
731 	return ret;
732 }
733 
734 /* OEM Command handlers initialization */
735 static struct ncsi_oem_gma_handler {
736 	unsigned int	mfr_id;
737 	int		(*handler)(struct ncsi_cmd_arg *nca);
738 } ncsi_oem_gma_handlers[] = {
739 	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
740 	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx }
741 };
742 
743 static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
744 {
745 	struct ncsi_oem_gma_handler *nch = NULL;
746 	int i;
747 
748 	/* This function should only be called once, return if flag set */
749 	if (nca->ndp->gma_flag == 1)
750 		return -1;
751 
752 	/* Find gma handler for given manufacturer id */
753 	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
754 		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
755 			if (ncsi_oem_gma_handlers[i].handler)
756 				nch = &ncsi_oem_gma_handlers[i];
757 			break;
758 			}
759 	}
760 
761 	if (!nch) {
762 		netdev_err(nca->ndp->ndev.dev,
763 			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
764 			   mf_id);
765 		return -1;
766 	}
767 
768 	/* Set the flag for GMA command which should only be called once */
769 	nca->ndp->gma_flag = 1;
770 
771 	/* Get Mac address from NCSI device */
772 	return nch->handler(nca);
773 }
774 
775 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
776 
777 /* Determine if a given channel from the channel_queue should be used for Tx */
778 static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
779 			       struct ncsi_channel *nc)
780 {
781 	struct ncsi_channel_mode *ncm;
782 	struct ncsi_channel *channel;
783 	struct ncsi_package *np;
784 
785 	/* Check if any other channel has Tx enabled; a channel may have already
786 	 * been configured and removed from the channel queue.
787 	 */
788 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
789 		if (!ndp->multi_package && np != nc->package)
790 			continue;
791 		NCSI_FOR_EACH_CHANNEL(np, channel) {
792 			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
793 			if (ncm->enable)
794 				return false;
795 		}
796 	}
797 
798 	/* This channel is the preferred channel and has link */
799 	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
800 		np = channel->package;
801 		if (np->preferred_channel &&
802 		    ncsi_channel_has_link(np->preferred_channel)) {
803 			return np->preferred_channel == nc;
804 		}
805 	}
806 
807 	/* This channel has link */
808 	if (ncsi_channel_has_link(nc))
809 		return true;
810 
811 	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
812 		if (ncsi_channel_has_link(channel))
813 			return false;
814 
815 	/* No other channel has link; default to this one */
816 	return true;
817 }
818 
819 /* Change the active Tx channel in a multi-channel setup */
820 int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
821 			   struct ncsi_package *package,
822 			   struct ncsi_channel *disable,
823 			   struct ncsi_channel *enable)
824 {
825 	struct ncsi_cmd_arg nca;
826 	struct ncsi_channel *nc;
827 	struct ncsi_package *np;
828 	int ret = 0;
829 
830 	if (!package->multi_channel && !ndp->multi_package)
831 		netdev_warn(ndp->ndev.dev,
832 			    "NCSI: Trying to update Tx channel in single-channel mode\n");
833 	nca.ndp = ndp;
834 	nca.req_flags = 0;
835 
836 	/* Find current channel with Tx enabled */
837 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
838 		if (disable)
839 			break;
840 		if (!ndp->multi_package && np != package)
841 			continue;
842 
843 		NCSI_FOR_EACH_CHANNEL(np, nc)
844 			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
845 				disable = nc;
846 				break;
847 			}
848 	}
849 
850 	/* Find a suitable channel for Tx */
851 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
852 		if (enable)
853 			break;
854 		if (!ndp->multi_package && np != package)
855 			continue;
856 		if (!(ndp->package_whitelist & (0x1 << np->id)))
857 			continue;
858 
859 		if (np->preferred_channel &&
860 		    ncsi_channel_has_link(np->preferred_channel)) {
861 			enable = np->preferred_channel;
862 			break;
863 		}
864 
865 		NCSI_FOR_EACH_CHANNEL(np, nc) {
866 			if (!(np->channel_whitelist & 0x1 << nc->id))
867 				continue;
868 			if (nc->state != NCSI_CHANNEL_ACTIVE)
869 				continue;
870 			if (ncsi_channel_has_link(nc)) {
871 				enable = nc;
872 				break;
873 			}
874 		}
875 	}
876 
877 	if (disable == enable)
878 		return -1;
879 
880 	if (!enable)
881 		return -1;
882 
883 	if (disable) {
884 		nca.channel = disable->id;
885 		nca.package = disable->package->id;
886 		nca.type = NCSI_PKT_CMD_DCNT;
887 		ret = ncsi_xmit_cmd(&nca);
888 		if (ret)
889 			netdev_err(ndp->ndev.dev,
890 				   "Error %d sending DCNT\n",
891 				   ret);
892 	}
893 
894 	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
895 
896 	nca.channel = enable->id;
897 	nca.package = enable->package->id;
898 	nca.type = NCSI_PKT_CMD_ECNT;
899 	ret = ncsi_xmit_cmd(&nca);
900 	if (ret)
901 		netdev_err(ndp->ndev.dev,
902 			   "Error %d sending ECNT\n",
903 			   ret);
904 
905 	return ret;
906 }
907 
908 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
909 {
910 	struct ncsi_package *np = ndp->active_package;
911 	struct ncsi_channel *nc = ndp->active_channel;
912 	struct ncsi_channel *hot_nc = NULL;
913 	struct ncsi_dev *nd = &ndp->ndev;
914 	struct net_device *dev = nd->dev;
915 	struct ncsi_cmd_arg nca;
916 	unsigned char index;
917 	unsigned long flags;
918 	int ret;
919 
920 	nca.ndp = ndp;
921 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
922 	switch (nd->state) {
923 	case ncsi_dev_state_config:
924 	case ncsi_dev_state_config_sp:
925 		ndp->pending_req_num = 1;
926 
927 		/* Select the specific package */
928 		nca.type = NCSI_PKT_CMD_SP;
929 		if (ndp->flags & NCSI_DEV_HWA)
930 			nca.bytes[0] = 0;
931 		else
932 			nca.bytes[0] = 1;
933 		nca.package = np->id;
934 		nca.channel = NCSI_RESERVED_CHANNEL;
935 		ret = ncsi_xmit_cmd(&nca);
936 		if (ret) {
937 			netdev_err(ndp->ndev.dev,
938 				   "NCSI: Failed to transmit CMD_SP\n");
939 			goto error;
940 		}
941 
942 		nd->state = ncsi_dev_state_config_cis;
943 		break;
944 	case ncsi_dev_state_config_cis:
945 		ndp->pending_req_num = 1;
946 
947 		/* Clear initial state */
948 		nca.type = NCSI_PKT_CMD_CIS;
949 		nca.package = np->id;
950 		nca.channel = nc->id;
951 		ret = ncsi_xmit_cmd(&nca);
952 		if (ret) {
953 			netdev_err(ndp->ndev.dev,
954 				   "NCSI: Failed to transmit CMD_CIS\n");
955 			goto error;
956 		}
957 
958 		nd->state = ncsi_dev_state_config_oem_gma;
959 		break;
960 	case ncsi_dev_state_config_oem_gma:
961 		nd->state = ncsi_dev_state_config_clear_vids;
962 		ret = -1;
963 
964 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
965 		nca.type = NCSI_PKT_CMD_OEM;
966 		nca.package = np->id;
967 		nca.channel = nc->id;
968 		ndp->pending_req_num = 1;
969 		ret = ncsi_gma_handler(&nca, nc->version.mf_id);
970 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
971 
972 		if (ret < 0)
973 			schedule_work(&ndp->work);
974 
975 		break;
976 	case ncsi_dev_state_config_clear_vids:
977 	case ncsi_dev_state_config_svf:
978 	case ncsi_dev_state_config_ev:
979 	case ncsi_dev_state_config_sma:
980 	case ncsi_dev_state_config_ebf:
981 #if IS_ENABLED(CONFIG_IPV6)
982 	case ncsi_dev_state_config_egmf:
983 #endif
984 	case ncsi_dev_state_config_ecnt:
985 	case ncsi_dev_state_config_ec:
986 	case ncsi_dev_state_config_ae:
987 	case ncsi_dev_state_config_gls:
988 		ndp->pending_req_num = 1;
989 
990 		nca.package = np->id;
991 		nca.channel = nc->id;
992 
993 		/* Clear any active filters on the channel before setting */
994 		if (nd->state == ncsi_dev_state_config_clear_vids) {
995 			ret = clear_one_vid(ndp, nc, &nca);
996 			if (ret) {
997 				nd->state = ncsi_dev_state_config_svf;
998 				schedule_work(&ndp->work);
999 				break;
1000 			}
1001 			/* Repeat */
1002 			nd->state = ncsi_dev_state_config_clear_vids;
1003 		/* Add known VLAN tags to the filter */
1004 		} else if (nd->state == ncsi_dev_state_config_svf) {
1005 			ret = set_one_vid(ndp, nc, &nca);
1006 			if (ret) {
1007 				nd->state = ncsi_dev_state_config_ev;
1008 				schedule_work(&ndp->work);
1009 				break;
1010 			}
1011 			/* Repeat */
1012 			nd->state = ncsi_dev_state_config_svf;
1013 		/* Enable/Disable the VLAN filter */
1014 		} else if (nd->state == ncsi_dev_state_config_ev) {
1015 			if (list_empty(&ndp->vlan_vids)) {
1016 				nca.type = NCSI_PKT_CMD_DV;
1017 			} else {
1018 				nca.type = NCSI_PKT_CMD_EV;
1019 				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1020 			}
1021 			nd->state = ncsi_dev_state_config_sma;
1022 		} else if (nd->state == ncsi_dev_state_config_sma) {
1023 		/* Use first entry in unicast filter table. Note that
1024 		 * the MAC filter table starts from entry 1 instead of
1025 		 * 0.
1026 		 */
1027 			nca.type = NCSI_PKT_CMD_SMA;
1028 			for (index = 0; index < 6; index++)
1029 				nca.bytes[index] = dev->dev_addr[index];
1030 			nca.bytes[6] = 0x1;
1031 			nca.bytes[7] = 0x1;
1032 			nd->state = ncsi_dev_state_config_ebf;
1033 		} else if (nd->state == ncsi_dev_state_config_ebf) {
1034 			nca.type = NCSI_PKT_CMD_EBF;
1035 			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1036 			if (ncsi_channel_is_tx(ndp, nc))
1037 				nd->state = ncsi_dev_state_config_ecnt;
1038 			else
1039 				nd->state = ncsi_dev_state_config_ec;
1040 #if IS_ENABLED(CONFIG_IPV6)
1041 			if (ndp->inet6_addr_num > 0 &&
1042 			    (nc->caps[NCSI_CAP_GENERIC].cap &
1043 			     NCSI_CAP_GENERIC_MC))
1044 				nd->state = ncsi_dev_state_config_egmf;
1045 		} else if (nd->state == ncsi_dev_state_config_egmf) {
1046 			nca.type = NCSI_PKT_CMD_EGMF;
1047 			nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1048 			if (ncsi_channel_is_tx(ndp, nc))
1049 				nd->state = ncsi_dev_state_config_ecnt;
1050 			else
1051 				nd->state = ncsi_dev_state_config_ec;
1052 #endif /* CONFIG_IPV6 */
1053 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1054 			if (np->preferred_channel &&
1055 			    nc != np->preferred_channel)
1056 				netdev_info(ndp->ndev.dev,
1057 					    "NCSI: Tx failed over to channel %u\n",
1058 					    nc->id);
1059 			nca.type = NCSI_PKT_CMD_ECNT;
1060 			nd->state = ncsi_dev_state_config_ec;
1061 		} else if (nd->state == ncsi_dev_state_config_ec) {
1062 			/* Enable AEN if it's supported */
1063 			nca.type = NCSI_PKT_CMD_EC;
1064 			nd->state = ncsi_dev_state_config_ae;
1065 			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1066 				nd->state = ncsi_dev_state_config_gls;
1067 		} else if (nd->state == ncsi_dev_state_config_ae) {
1068 			nca.type = NCSI_PKT_CMD_AE;
1069 			nca.bytes[0] = 0;
1070 			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1071 			nd->state = ncsi_dev_state_config_gls;
1072 		} else if (nd->state == ncsi_dev_state_config_gls) {
1073 			nca.type = NCSI_PKT_CMD_GLS;
1074 			nd->state = ncsi_dev_state_config_done;
1075 		}
1076 
1077 		ret = ncsi_xmit_cmd(&nca);
1078 		if (ret) {
1079 			netdev_err(ndp->ndev.dev,
1080 				   "NCSI: Failed to transmit CMD %x\n",
1081 				   nca.type);
1082 			goto error;
1083 		}
1084 		break;
1085 	case ncsi_dev_state_config_done:
1086 		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1087 			   nc->id);
1088 		spin_lock_irqsave(&nc->lock, flags);
1089 		nc->state = NCSI_CHANNEL_ACTIVE;
1090 
1091 		if (ndp->flags & NCSI_DEV_RESET) {
1092 			/* A reset event happened during config, start it now */
1093 			nc->reconfigure_needed = false;
1094 			spin_unlock_irqrestore(&nc->lock, flags);
1095 			ncsi_reset_dev(nd);
1096 			break;
1097 		}
1098 
1099 		if (nc->reconfigure_needed) {
1100 			/* This channel's configuration has been updated
1101 			 * part-way during the config state - start the
1102 			 * channel configuration over
1103 			 */
1104 			nc->reconfigure_needed = false;
1105 			nc->state = NCSI_CHANNEL_INACTIVE;
1106 			spin_unlock_irqrestore(&nc->lock, flags);
1107 
1108 			spin_lock_irqsave(&ndp->lock, flags);
1109 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1110 			spin_unlock_irqrestore(&ndp->lock, flags);
1111 
1112 			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1113 			ncsi_process_next_channel(ndp);
1114 			break;
1115 		}
1116 
1117 		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1118 			hot_nc = nc;
1119 		} else {
1120 			hot_nc = NULL;
1121 			netdev_dbg(ndp->ndev.dev,
1122 				   "NCSI: channel %u link down after config\n",
1123 				   nc->id);
1124 		}
1125 		spin_unlock_irqrestore(&nc->lock, flags);
1126 
1127 		/* Update the hot channel */
1128 		spin_lock_irqsave(&ndp->lock, flags);
1129 		ndp->hot_channel = hot_nc;
1130 		spin_unlock_irqrestore(&ndp->lock, flags);
1131 
1132 		ncsi_start_channel_monitor(nc);
1133 		ncsi_process_next_channel(ndp);
1134 		break;
1135 	default:
1136 		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1137 			     nd->state);
1138 	}
1139 
1140 	return;
1141 
1142 error:
1143 	ncsi_report_link(ndp, true);
1144 }
1145 
1146 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1147 {
1148 	struct ncsi_channel *nc, *found, *hot_nc;
1149 	struct ncsi_channel_mode *ncm;
1150 	unsigned long flags, cflags;
1151 	struct ncsi_package *np;
1152 	bool with_link;
1153 
1154 	spin_lock_irqsave(&ndp->lock, flags);
1155 	hot_nc = ndp->hot_channel;
1156 	spin_unlock_irqrestore(&ndp->lock, flags);
1157 
1158 	/* By default the search is done once an inactive channel with up
1159 	 * link is found, unless a preferred channel is set.
1160 	 * If multi_package or multi_channel are configured all channels in the
1161 	 * whitelist are added to the channel queue.
1162 	 */
1163 	found = NULL;
1164 	with_link = false;
1165 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1166 		if (!(ndp->package_whitelist & (0x1 << np->id)))
1167 			continue;
1168 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1169 			if (!(np->channel_whitelist & (0x1 << nc->id)))
1170 				continue;
1171 
1172 			spin_lock_irqsave(&nc->lock, cflags);
1173 
1174 			if (!list_empty(&nc->link) ||
1175 			    nc->state != NCSI_CHANNEL_INACTIVE) {
1176 				spin_unlock_irqrestore(&nc->lock, cflags);
1177 				continue;
1178 			}
1179 
1180 			if (!found)
1181 				found = nc;
1182 
1183 			if (nc == hot_nc)
1184 				found = nc;
1185 
1186 			ncm = &nc->modes[NCSI_MODE_LINK];
1187 			if (ncm->data[2] & 0x1) {
1188 				found = nc;
1189 				with_link = true;
1190 			}
1191 
1192 			/* If multi_channel is enabled configure all valid
1193 			 * channels whether or not they currently have link
1194 			 * so they will have AENs enabled.
1195 			 */
1196 			if (with_link || np->multi_channel) {
1197 				spin_lock_irqsave(&ndp->lock, flags);
1198 				list_add_tail_rcu(&nc->link,
1199 						  &ndp->channel_queue);
1200 				spin_unlock_irqrestore(&ndp->lock, flags);
1201 
1202 				netdev_dbg(ndp->ndev.dev,
1203 					   "NCSI: Channel %u added to queue (link %s)\n",
1204 					   nc->id,
1205 					   ncm->data[2] & 0x1 ? "up" : "down");
1206 			}
1207 
1208 			spin_unlock_irqrestore(&nc->lock, cflags);
1209 
1210 			if (with_link && !np->multi_channel)
1211 				break;
1212 		}
1213 		if (with_link && !ndp->multi_package)
1214 			break;
1215 	}
1216 
1217 	if (list_empty(&ndp->channel_queue) && found) {
1218 		netdev_info(ndp->ndev.dev,
1219 			    "NCSI: No channel with link found, configuring channel %u\n",
1220 			    found->id);
1221 		spin_lock_irqsave(&ndp->lock, flags);
1222 		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1223 		spin_unlock_irqrestore(&ndp->lock, flags);
1224 	} else if (!found) {
1225 		netdev_warn(ndp->ndev.dev,
1226 			    "NCSI: No channel found to configure!\n");
1227 		ncsi_report_link(ndp, true);
1228 		return -ENODEV;
1229 	}
1230 
1231 	return ncsi_process_next_channel(ndp);
1232 }
1233 
1234 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1235 {
1236 	struct ncsi_package *np;
1237 	struct ncsi_channel *nc;
1238 	unsigned int cap;
1239 	bool has_channel = false;
1240 
1241 	/* The hardware arbitration is disabled if any one channel
1242 	 * doesn't support explicitly.
1243 	 */
1244 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1245 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1246 			has_channel = true;
1247 
1248 			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1249 			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1250 			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1251 			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1252 				ndp->flags &= ~NCSI_DEV_HWA;
1253 				return false;
1254 			}
1255 		}
1256 	}
1257 
1258 	if (has_channel) {
1259 		ndp->flags |= NCSI_DEV_HWA;
1260 		return true;
1261 	}
1262 
1263 	ndp->flags &= ~NCSI_DEV_HWA;
1264 	return false;
1265 }
1266 
1267 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1268 {
1269 	struct ncsi_dev *nd = &ndp->ndev;
1270 	struct ncsi_package *np;
1271 	struct ncsi_channel *nc;
1272 	struct ncsi_cmd_arg nca;
1273 	unsigned char index;
1274 	int ret;
1275 
1276 	nca.ndp = ndp;
1277 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1278 	switch (nd->state) {
1279 	case ncsi_dev_state_probe:
1280 		nd->state = ncsi_dev_state_probe_deselect;
1281 		/* Fall through */
1282 	case ncsi_dev_state_probe_deselect:
1283 		ndp->pending_req_num = 8;
1284 
1285 		/* Deselect all possible packages */
1286 		nca.type = NCSI_PKT_CMD_DP;
1287 		nca.channel = NCSI_RESERVED_CHANNEL;
1288 		for (index = 0; index < 8; index++) {
1289 			nca.package = index;
1290 			ret = ncsi_xmit_cmd(&nca);
1291 			if (ret)
1292 				goto error;
1293 		}
1294 
1295 		nd->state = ncsi_dev_state_probe_package;
1296 		break;
1297 	case ncsi_dev_state_probe_package:
1298 		ndp->pending_req_num = 1;
1299 
1300 		nca.type = NCSI_PKT_CMD_SP;
1301 		nca.bytes[0] = 1;
1302 		nca.package = ndp->package_probe_id;
1303 		nca.channel = NCSI_RESERVED_CHANNEL;
1304 		ret = ncsi_xmit_cmd(&nca);
1305 		if (ret)
1306 			goto error;
1307 		nd->state = ncsi_dev_state_probe_channel;
1308 		break;
1309 	case ncsi_dev_state_probe_channel:
1310 		ndp->active_package = ncsi_find_package(ndp,
1311 							ndp->package_probe_id);
1312 		if (!ndp->active_package) {
1313 			/* No response */
1314 			nd->state = ncsi_dev_state_probe_dp;
1315 			schedule_work(&ndp->work);
1316 			break;
1317 		}
1318 		nd->state = ncsi_dev_state_probe_cis;
1319 		schedule_work(&ndp->work);
1320 		break;
1321 	case ncsi_dev_state_probe_cis:
1322 		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1323 
1324 		/* Clear initial state */
1325 		nca.type = NCSI_PKT_CMD_CIS;
1326 		nca.package = ndp->active_package->id;
1327 		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1328 			nca.channel = index;
1329 			ret = ncsi_xmit_cmd(&nca);
1330 			if (ret)
1331 				goto error;
1332 		}
1333 
1334 		nd->state = ncsi_dev_state_probe_gvi;
1335 		break;
1336 	case ncsi_dev_state_probe_gvi:
1337 	case ncsi_dev_state_probe_gc:
1338 	case ncsi_dev_state_probe_gls:
1339 		np = ndp->active_package;
1340 		ndp->pending_req_num = np->channel_num;
1341 
1342 		/* Retrieve version, capability or link status */
1343 		if (nd->state == ncsi_dev_state_probe_gvi)
1344 			nca.type = NCSI_PKT_CMD_GVI;
1345 		else if (nd->state == ncsi_dev_state_probe_gc)
1346 			nca.type = NCSI_PKT_CMD_GC;
1347 		else
1348 			nca.type = NCSI_PKT_CMD_GLS;
1349 
1350 		nca.package = np->id;
1351 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1352 			nca.channel = nc->id;
1353 			ret = ncsi_xmit_cmd(&nca);
1354 			if (ret)
1355 				goto error;
1356 		}
1357 
1358 		if (nd->state == ncsi_dev_state_probe_gvi)
1359 			nd->state = ncsi_dev_state_probe_gc;
1360 		else if (nd->state == ncsi_dev_state_probe_gc)
1361 			nd->state = ncsi_dev_state_probe_gls;
1362 		else
1363 			nd->state = ncsi_dev_state_probe_dp;
1364 		break;
1365 	case ncsi_dev_state_probe_dp:
1366 		ndp->pending_req_num = 1;
1367 
1368 		/* Deselect the current package */
1369 		nca.type = NCSI_PKT_CMD_DP;
1370 		nca.package = ndp->package_probe_id;
1371 		nca.channel = NCSI_RESERVED_CHANNEL;
1372 		ret = ncsi_xmit_cmd(&nca);
1373 		if (ret)
1374 			goto error;
1375 
1376 		/* Probe next package */
1377 		ndp->package_probe_id++;
1378 		if (ndp->package_probe_id >= 8) {
1379 			/* Probe finished */
1380 			ndp->flags |= NCSI_DEV_PROBED;
1381 			break;
1382 		}
1383 		nd->state = ncsi_dev_state_probe_package;
1384 		ndp->active_package = NULL;
1385 		break;
1386 	default:
1387 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1388 			    nd->state);
1389 	}
1390 
1391 	if (ndp->flags & NCSI_DEV_PROBED) {
1392 		/* Check if all packages have HWA support */
1393 		ncsi_check_hwa(ndp);
1394 		ncsi_choose_active_channel(ndp);
1395 	}
1396 
1397 	return;
1398 error:
1399 	netdev_err(ndp->ndev.dev,
1400 		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1401 		   nca.type);
1402 	ncsi_report_link(ndp, true);
1403 }
1404 
1405 static void ncsi_dev_work(struct work_struct *work)
1406 {
1407 	struct ncsi_dev_priv *ndp = container_of(work,
1408 			struct ncsi_dev_priv, work);
1409 	struct ncsi_dev *nd = &ndp->ndev;
1410 
1411 	switch (nd->state & ncsi_dev_state_major) {
1412 	case ncsi_dev_state_probe:
1413 		ncsi_probe_channel(ndp);
1414 		break;
1415 	case ncsi_dev_state_suspend:
1416 		ncsi_suspend_channel(ndp);
1417 		break;
1418 	case ncsi_dev_state_config:
1419 		ncsi_configure_channel(ndp);
1420 		break;
1421 	default:
1422 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1423 			    nd->state);
1424 	}
1425 }
1426 
1427 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1428 {
1429 	struct ncsi_channel *nc;
1430 	int old_state;
1431 	unsigned long flags;
1432 
1433 	spin_lock_irqsave(&ndp->lock, flags);
1434 	nc = list_first_or_null_rcu(&ndp->channel_queue,
1435 				    struct ncsi_channel, link);
1436 	if (!nc) {
1437 		spin_unlock_irqrestore(&ndp->lock, flags);
1438 		goto out;
1439 	}
1440 
1441 	list_del_init(&nc->link);
1442 	spin_unlock_irqrestore(&ndp->lock, flags);
1443 
1444 	spin_lock_irqsave(&nc->lock, flags);
1445 	old_state = nc->state;
1446 	nc->state = NCSI_CHANNEL_INVISIBLE;
1447 	spin_unlock_irqrestore(&nc->lock, flags);
1448 
1449 	ndp->active_channel = nc;
1450 	ndp->active_package = nc->package;
1451 
1452 	switch (old_state) {
1453 	case NCSI_CHANNEL_INACTIVE:
1454 		ndp->ndev.state = ncsi_dev_state_config;
1455 		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1456 	                   nc->id);
1457 		ncsi_configure_channel(ndp);
1458 		break;
1459 	case NCSI_CHANNEL_ACTIVE:
1460 		ndp->ndev.state = ncsi_dev_state_suspend;
1461 		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1462 			   nc->id);
1463 		ncsi_suspend_channel(ndp);
1464 		break;
1465 	default:
1466 		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1467 			   old_state, nc->package->id, nc->id);
1468 		ncsi_report_link(ndp, false);
1469 		return -EINVAL;
1470 	}
1471 
1472 	return 0;
1473 
1474 out:
1475 	ndp->active_channel = NULL;
1476 	ndp->active_package = NULL;
1477 	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1478 		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1479 		return ncsi_choose_active_channel(ndp);
1480 	}
1481 
1482 	ncsi_report_link(ndp, false);
1483 	return -ENODEV;
1484 }
1485 
1486 #if IS_ENABLED(CONFIG_IPV6)
1487 static int ncsi_inet6addr_event(struct notifier_block *this,
1488 				unsigned long event, void *data)
1489 {
1490 	struct inet6_ifaddr *ifa = data;
1491 	struct net_device *dev = ifa->idev->dev;
1492 	struct ncsi_dev *nd = ncsi_find_dev(dev);
1493 	struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1494 	struct ncsi_package *np;
1495 	struct ncsi_channel *nc;
1496 	struct ncsi_cmd_arg nca;
1497 	bool action;
1498 	int ret;
1499 
1500 	if (!ndp || (ipv6_addr_type(&ifa->addr) &
1501 	    (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1502 		return NOTIFY_OK;
1503 
1504 	switch (event) {
1505 	case NETDEV_UP:
1506 		action = (++ndp->inet6_addr_num) == 1;
1507 		nca.type = NCSI_PKT_CMD_EGMF;
1508 		break;
1509 	case NETDEV_DOWN:
1510 		action = (--ndp->inet6_addr_num == 0);
1511 		nca.type = NCSI_PKT_CMD_DGMF;
1512 		break;
1513 	default:
1514 		return NOTIFY_OK;
1515 	}
1516 
1517 	/* We might not have active channel or packages. The IPv6
1518 	 * required multicast will be enabled when active channel
1519 	 * or packages are chosen.
1520 	 */
1521 	np = ndp->active_package;
1522 	nc = ndp->active_channel;
1523 	if (!action || !np || !nc)
1524 		return NOTIFY_OK;
1525 
1526 	/* We needn't enable or disable it if the function isn't supported */
1527 	if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1528 		return NOTIFY_OK;
1529 
1530 	nca.ndp = ndp;
1531 	nca.req_flags = 0;
1532 	nca.package = np->id;
1533 	nca.channel = nc->id;
1534 	nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1535 	ret = ncsi_xmit_cmd(&nca);
1536 	if (ret) {
1537 		netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1538 			    (event == NETDEV_UP) ? "enable" : "disable", ret);
1539 		return NOTIFY_DONE;
1540 	}
1541 
1542 	return NOTIFY_OK;
1543 }
1544 
1545 static struct notifier_block ncsi_inet6addr_notifier = {
1546 	.notifier_call = ncsi_inet6addr_event,
1547 };
1548 #endif /* CONFIG_IPV6 */
1549 
1550 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1551 {
1552 	struct ncsi_dev *nd = &ndp->ndev;
1553 	struct ncsi_channel *nc;
1554 	struct ncsi_package *np;
1555 	unsigned long flags;
1556 	unsigned int n = 0;
1557 
1558 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1559 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1560 			spin_lock_irqsave(&nc->lock, flags);
1561 
1562 			/* Channels may be busy, mark dirty instead of
1563 			 * kicking if;
1564 			 * a) not ACTIVE (configured)
1565 			 * b) in the channel_queue (to be configured)
1566 			 * c) it's ndev is in the config state
1567 			 */
1568 			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1569 				if ((ndp->ndev.state & 0xff00) ==
1570 						ncsi_dev_state_config ||
1571 						!list_empty(&nc->link)) {
1572 					netdev_dbg(nd->dev,
1573 						   "NCSI: channel %p marked dirty\n",
1574 						   nc);
1575 					nc->reconfigure_needed = true;
1576 				}
1577 				spin_unlock_irqrestore(&nc->lock, flags);
1578 				continue;
1579 			}
1580 
1581 			spin_unlock_irqrestore(&nc->lock, flags);
1582 
1583 			ncsi_stop_channel_monitor(nc);
1584 			spin_lock_irqsave(&nc->lock, flags);
1585 			nc->state = NCSI_CHANNEL_INACTIVE;
1586 			spin_unlock_irqrestore(&nc->lock, flags);
1587 
1588 			spin_lock_irqsave(&ndp->lock, flags);
1589 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1590 			spin_unlock_irqrestore(&ndp->lock, flags);
1591 
1592 			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1593 			n++;
1594 		}
1595 	}
1596 
1597 	return n;
1598 }
1599 
1600 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1601 {
1602 	struct ncsi_dev_priv *ndp;
1603 	unsigned int n_vids = 0;
1604 	struct vlan_vid *vlan;
1605 	struct ncsi_dev *nd;
1606 	bool found = false;
1607 
1608 	if (vid == 0)
1609 		return 0;
1610 
1611 	nd = ncsi_find_dev(dev);
1612 	if (!nd) {
1613 		netdev_warn(dev, "NCSI: No net_device?\n");
1614 		return 0;
1615 	}
1616 
1617 	ndp = TO_NCSI_DEV_PRIV(nd);
1618 
1619 	/* Add the VLAN id to our internal list */
1620 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1621 		n_vids++;
1622 		if (vlan->vid == vid) {
1623 			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1624 				   vid);
1625 			return 0;
1626 		}
1627 	}
1628 	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1629 		netdev_warn(dev,
1630 			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1631 			    vid, NCSI_MAX_VLAN_VIDS);
1632 		return -ENOSPC;
1633 	}
1634 
1635 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1636 	if (!vlan)
1637 		return -ENOMEM;
1638 
1639 	vlan->proto = proto;
1640 	vlan->vid = vid;
1641 	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1642 
1643 	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1644 
1645 	found = ncsi_kick_channels(ndp) != 0;
1646 
1647 	return found ? ncsi_process_next_channel(ndp) : 0;
1648 }
1649 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1650 
1651 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1652 {
1653 	struct vlan_vid *vlan, *tmp;
1654 	struct ncsi_dev_priv *ndp;
1655 	struct ncsi_dev *nd;
1656 	bool found = false;
1657 
1658 	if (vid == 0)
1659 		return 0;
1660 
1661 	nd = ncsi_find_dev(dev);
1662 	if (!nd) {
1663 		netdev_warn(dev, "NCSI: no net_device?\n");
1664 		return 0;
1665 	}
1666 
1667 	ndp = TO_NCSI_DEV_PRIV(nd);
1668 
1669 	/* Remove the VLAN id from our internal list */
1670 	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1671 		if (vlan->vid == vid) {
1672 			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1673 			list_del_rcu(&vlan->list);
1674 			found = true;
1675 			kfree(vlan);
1676 		}
1677 
1678 	if (!found) {
1679 		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1680 		return -EINVAL;
1681 	}
1682 
1683 	found = ncsi_kick_channels(ndp) != 0;
1684 
1685 	return found ? ncsi_process_next_channel(ndp) : 0;
1686 }
1687 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1688 
1689 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1690 				   void (*handler)(struct ncsi_dev *ndev))
1691 {
1692 	struct ncsi_dev_priv *ndp;
1693 	struct ncsi_dev *nd;
1694 	unsigned long flags;
1695 	int i;
1696 
1697 	/* Check if the device has been registered or not */
1698 	nd = ncsi_find_dev(dev);
1699 	if (nd)
1700 		return nd;
1701 
1702 	/* Create NCSI device */
1703 	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1704 	if (!ndp)
1705 		return NULL;
1706 
1707 	nd = &ndp->ndev;
1708 	nd->state = ncsi_dev_state_registered;
1709 	nd->dev = dev;
1710 	nd->handler = handler;
1711 	ndp->pending_req_num = 0;
1712 	INIT_LIST_HEAD(&ndp->channel_queue);
1713 	INIT_LIST_HEAD(&ndp->vlan_vids);
1714 	INIT_WORK(&ndp->work, ncsi_dev_work);
1715 	ndp->package_whitelist = UINT_MAX;
1716 
1717 	/* Initialize private NCSI device */
1718 	spin_lock_init(&ndp->lock);
1719 	INIT_LIST_HEAD(&ndp->packages);
1720 	ndp->request_id = NCSI_REQ_START_IDX;
1721 	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1722 		ndp->requests[i].id = i;
1723 		ndp->requests[i].ndp = ndp;
1724 		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1725 	}
1726 
1727 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1728 #if IS_ENABLED(CONFIG_IPV6)
1729 	ndp->inet6_addr_num = 0;
1730 	if (list_empty(&ncsi_dev_list))
1731 		register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1732 #endif
1733 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1734 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1735 
1736 	/* Register NCSI packet Rx handler */
1737 	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1738 	ndp->ptype.func = ncsi_rcv_rsp;
1739 	ndp->ptype.dev = dev;
1740 	dev_add_pack(&ndp->ptype);
1741 
1742 	/* Set up generic netlink interface */
1743 	ncsi_init_netlink(dev);
1744 
1745 	return nd;
1746 }
1747 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1748 
1749 int ncsi_start_dev(struct ncsi_dev *nd)
1750 {
1751 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1752 
1753 	if (nd->state != ncsi_dev_state_registered &&
1754 	    nd->state != ncsi_dev_state_functional)
1755 		return -ENOTTY;
1756 
1757 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1758 		ndp->package_probe_id = 0;
1759 		nd->state = ncsi_dev_state_probe;
1760 		schedule_work(&ndp->work);
1761 		return 0;
1762 	}
1763 
1764 	return ncsi_reset_dev(nd);
1765 }
1766 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1767 
1768 void ncsi_stop_dev(struct ncsi_dev *nd)
1769 {
1770 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1771 	struct ncsi_package *np;
1772 	struct ncsi_channel *nc;
1773 	bool chained;
1774 	int old_state;
1775 	unsigned long flags;
1776 
1777 	/* Stop the channel monitor on any active channels. Don't reset the
1778 	 * channel state so we know which were active when ncsi_start_dev()
1779 	 * is next called.
1780 	 */
1781 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1782 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1783 			ncsi_stop_channel_monitor(nc);
1784 
1785 			spin_lock_irqsave(&nc->lock, flags);
1786 			chained = !list_empty(&nc->link);
1787 			old_state = nc->state;
1788 			spin_unlock_irqrestore(&nc->lock, flags);
1789 
1790 			WARN_ON_ONCE(chained ||
1791 				     old_state == NCSI_CHANNEL_INVISIBLE);
1792 		}
1793 	}
1794 
1795 	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1796 	ncsi_report_link(ndp, true);
1797 }
1798 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1799 
1800 int ncsi_reset_dev(struct ncsi_dev *nd)
1801 {
1802 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1803 	struct ncsi_channel *nc, *active, *tmp;
1804 	struct ncsi_package *np;
1805 	unsigned long flags;
1806 
1807 	spin_lock_irqsave(&ndp->lock, flags);
1808 
1809 	if (!(ndp->flags & NCSI_DEV_RESET)) {
1810 		/* Haven't been called yet, check states */
1811 		switch (nd->state & ncsi_dev_state_major) {
1812 		case ncsi_dev_state_registered:
1813 		case ncsi_dev_state_probe:
1814 			/* Not even probed yet - do nothing */
1815 			spin_unlock_irqrestore(&ndp->lock, flags);
1816 			return 0;
1817 		case ncsi_dev_state_suspend:
1818 		case ncsi_dev_state_config:
1819 			/* Wait for the channel to finish its suspend/config
1820 			 * operation; once it finishes it will check for
1821 			 * NCSI_DEV_RESET and reset the state.
1822 			 */
1823 			ndp->flags |= NCSI_DEV_RESET;
1824 			spin_unlock_irqrestore(&ndp->lock, flags);
1825 			return 0;
1826 		}
1827 	} else {
1828 		switch (nd->state) {
1829 		case ncsi_dev_state_suspend_done:
1830 		case ncsi_dev_state_config_done:
1831 		case ncsi_dev_state_functional:
1832 			/* Ok */
1833 			break;
1834 		default:
1835 			/* Current reset operation happening */
1836 			spin_unlock_irqrestore(&ndp->lock, flags);
1837 			return 0;
1838 		}
1839 	}
1840 
1841 	if (!list_empty(&ndp->channel_queue)) {
1842 		/* Clear any channel queue we may have interrupted */
1843 		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1844 			list_del_init(&nc->link);
1845 	}
1846 	spin_unlock_irqrestore(&ndp->lock, flags);
1847 
1848 	active = NULL;
1849 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1850 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1851 			spin_lock_irqsave(&nc->lock, flags);
1852 
1853 			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1854 				active = nc;
1855 				nc->state = NCSI_CHANNEL_INVISIBLE;
1856 				spin_unlock_irqrestore(&nc->lock, flags);
1857 				ncsi_stop_channel_monitor(nc);
1858 				break;
1859 			}
1860 
1861 			spin_unlock_irqrestore(&nc->lock, flags);
1862 		}
1863 		if (active)
1864 			break;
1865 	}
1866 
1867 	if (!active) {
1868 		/* Done */
1869 		spin_lock_irqsave(&ndp->lock, flags);
1870 		ndp->flags &= ~NCSI_DEV_RESET;
1871 		spin_unlock_irqrestore(&ndp->lock, flags);
1872 		return ncsi_choose_active_channel(ndp);
1873 	}
1874 
1875 	spin_lock_irqsave(&ndp->lock, flags);
1876 	ndp->flags |= NCSI_DEV_RESET;
1877 	ndp->active_channel = active;
1878 	ndp->active_package = active->package;
1879 	spin_unlock_irqrestore(&ndp->lock, flags);
1880 
1881 	nd->state = ncsi_dev_state_suspend;
1882 	schedule_work(&ndp->work);
1883 	return 0;
1884 }
1885 
1886 void ncsi_unregister_dev(struct ncsi_dev *nd)
1887 {
1888 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1889 	struct ncsi_package *np, *tmp;
1890 	unsigned long flags;
1891 
1892 	dev_remove_pack(&ndp->ptype);
1893 
1894 	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1895 		ncsi_remove_package(np);
1896 
1897 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1898 	list_del_rcu(&ndp->node);
1899 #if IS_ENABLED(CONFIG_IPV6)
1900 	if (list_empty(&ncsi_dev_list))
1901 		unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1902 #endif
1903 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1904 
1905 	ncsi_unregister_netlink(nd->dev);
1906 
1907 	kfree(ndp);
1908 }
1909 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
1910