xref: /openbmc/linux/net/ncsi/ncsi-manage.c (revision 5a244f48)
1 /*
2  * Copyright Gavin Shan, IBM Corporation 2016.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
16 
17 #include <net/ncsi.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/addrconf.h>
21 #include <net/ipv6.h>
22 #include <net/if_inet6.h>
23 
24 #include "internal.h"
25 #include "ncsi-pkt.h"
26 
27 LIST_HEAD(ncsi_dev_list);
28 DEFINE_SPINLOCK(ncsi_dev_lock);
29 
30 static inline int ncsi_filter_size(int table)
31 {
32 	int sizes[] = { 2, 6, 6, 6 };
33 
34 	BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
35 	if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
36 		return -EINVAL;
37 
38 	return sizes[table];
39 }
40 
41 u32 *ncsi_get_filter(struct ncsi_channel *nc, int table, int index)
42 {
43 	struct ncsi_channel_filter *ncf;
44 	int size;
45 
46 	ncf = nc->filters[table];
47 	if (!ncf)
48 		return NULL;
49 
50 	size = ncsi_filter_size(table);
51 	if (size < 0)
52 		return NULL;
53 
54 	return ncf->data + size * index;
55 }
56 
57 /* Find the first active filter in a filter table that matches the given
58  * data parameter. If data is NULL, this returns the first active filter.
59  */
60 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
61 {
62 	struct ncsi_channel_filter *ncf;
63 	void *bitmap;
64 	int index, size;
65 	unsigned long flags;
66 
67 	ncf = nc->filters[table];
68 	if (!ncf)
69 		return -ENXIO;
70 
71 	size = ncsi_filter_size(table);
72 	if (size < 0)
73 		return size;
74 
75 	spin_lock_irqsave(&nc->lock, flags);
76 	bitmap = (void *)&ncf->bitmap;
77 	index = -1;
78 	while ((index = find_next_bit(bitmap, ncf->total, index + 1))
79 	       < ncf->total) {
80 		if (!data || !memcmp(ncf->data + size * index, data, size)) {
81 			spin_unlock_irqrestore(&nc->lock, flags);
82 			return index;
83 		}
84 	}
85 	spin_unlock_irqrestore(&nc->lock, flags);
86 
87 	return -ENOENT;
88 }
89 
90 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
91 {
92 	struct ncsi_channel_filter *ncf;
93 	int index, size;
94 	void *bitmap;
95 	unsigned long flags;
96 
97 	size = ncsi_filter_size(table);
98 	if (size < 0)
99 		return size;
100 
101 	index = ncsi_find_filter(nc, table, data);
102 	if (index >= 0)
103 		return index;
104 
105 	ncf = nc->filters[table];
106 	if (!ncf)
107 		return -ENODEV;
108 
109 	spin_lock_irqsave(&nc->lock, flags);
110 	bitmap = (void *)&ncf->bitmap;
111 	do {
112 		index = find_next_zero_bit(bitmap, ncf->total, 0);
113 		if (index >= ncf->total) {
114 			spin_unlock_irqrestore(&nc->lock, flags);
115 			return -ENOSPC;
116 		}
117 	} while (test_and_set_bit(index, bitmap));
118 
119 	memcpy(ncf->data + size * index, data, size);
120 	spin_unlock_irqrestore(&nc->lock, flags);
121 
122 	return index;
123 }
124 
125 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
126 {
127 	struct ncsi_channel_filter *ncf;
128 	int size;
129 	void *bitmap;
130 	unsigned long flags;
131 
132 	size = ncsi_filter_size(table);
133 	if (size < 0)
134 		return size;
135 
136 	ncf = nc->filters[table];
137 	if (!ncf || index >= ncf->total)
138 		return -ENODEV;
139 
140 	spin_lock_irqsave(&nc->lock, flags);
141 	bitmap = (void *)&ncf->bitmap;
142 	if (test_and_clear_bit(index, bitmap))
143 		memset(ncf->data + size * index, 0, size);
144 	spin_unlock_irqrestore(&nc->lock, flags);
145 
146 	return 0;
147 }
148 
149 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
150 {
151 	struct ncsi_dev *nd = &ndp->ndev;
152 	struct ncsi_package *np;
153 	struct ncsi_channel *nc;
154 	unsigned long flags;
155 
156 	nd->state = ncsi_dev_state_functional;
157 	if (force_down) {
158 		nd->link_up = 0;
159 		goto report;
160 	}
161 
162 	nd->link_up = 0;
163 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
164 		NCSI_FOR_EACH_CHANNEL(np, nc) {
165 			spin_lock_irqsave(&nc->lock, flags);
166 
167 			if (!list_empty(&nc->link) ||
168 			    nc->state != NCSI_CHANNEL_ACTIVE) {
169 				spin_unlock_irqrestore(&nc->lock, flags);
170 				continue;
171 			}
172 
173 			if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
174 				spin_unlock_irqrestore(&nc->lock, flags);
175 				nd->link_up = 1;
176 				goto report;
177 			}
178 
179 			spin_unlock_irqrestore(&nc->lock, flags);
180 		}
181 	}
182 
183 report:
184 	nd->handler(nd);
185 }
186 
187 static void ncsi_channel_monitor(unsigned long data)
188 {
189 	struct ncsi_channel *nc = (struct ncsi_channel *)data;
190 	struct ncsi_package *np = nc->package;
191 	struct ncsi_dev_priv *ndp = np->ndp;
192 	struct ncsi_cmd_arg nca;
193 	bool enabled, chained;
194 	unsigned int monitor_state;
195 	unsigned long flags;
196 	int state, ret;
197 
198 	spin_lock_irqsave(&nc->lock, flags);
199 	state = nc->state;
200 	chained = !list_empty(&nc->link);
201 	enabled = nc->monitor.enabled;
202 	monitor_state = nc->monitor.state;
203 	spin_unlock_irqrestore(&nc->lock, flags);
204 
205 	if (!enabled || chained)
206 		return;
207 	if (state != NCSI_CHANNEL_INACTIVE &&
208 	    state != NCSI_CHANNEL_ACTIVE)
209 		return;
210 
211 	switch (monitor_state) {
212 	case NCSI_CHANNEL_MONITOR_START:
213 	case NCSI_CHANNEL_MONITOR_RETRY:
214 		nca.ndp = ndp;
215 		nca.package = np->id;
216 		nca.channel = nc->id;
217 		nca.type = NCSI_PKT_CMD_GLS;
218 		nca.req_flags = 0;
219 		ret = ncsi_xmit_cmd(&nca);
220 		if (ret) {
221 			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
222 				   ret);
223 			return;
224 		}
225 
226 		break;
227 	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
228 		break;
229 	default:
230 		if (!(ndp->flags & NCSI_DEV_HWA) &&
231 		    state == NCSI_CHANNEL_ACTIVE) {
232 			ncsi_report_link(ndp, true);
233 			ndp->flags |= NCSI_DEV_RESHUFFLE;
234 		}
235 
236 		spin_lock_irqsave(&nc->lock, flags);
237 		nc->state = NCSI_CHANNEL_INVISIBLE;
238 		spin_unlock_irqrestore(&nc->lock, flags);
239 
240 		spin_lock_irqsave(&ndp->lock, flags);
241 		nc->state = NCSI_CHANNEL_INACTIVE;
242 		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
243 		spin_unlock_irqrestore(&ndp->lock, flags);
244 		ncsi_process_next_channel(ndp);
245 		return;
246 	}
247 
248 	spin_lock_irqsave(&nc->lock, flags);
249 	nc->monitor.state++;
250 	spin_unlock_irqrestore(&nc->lock, flags);
251 	mod_timer(&nc->monitor.timer, jiffies + HZ);
252 }
253 
254 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
255 {
256 	unsigned long flags;
257 
258 	spin_lock_irqsave(&nc->lock, flags);
259 	WARN_ON_ONCE(nc->monitor.enabled);
260 	nc->monitor.enabled = true;
261 	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
262 	spin_unlock_irqrestore(&nc->lock, flags);
263 
264 	mod_timer(&nc->monitor.timer, jiffies + HZ);
265 }
266 
267 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
268 {
269 	unsigned long flags;
270 
271 	spin_lock_irqsave(&nc->lock, flags);
272 	if (!nc->monitor.enabled) {
273 		spin_unlock_irqrestore(&nc->lock, flags);
274 		return;
275 	}
276 	nc->monitor.enabled = false;
277 	spin_unlock_irqrestore(&nc->lock, flags);
278 
279 	del_timer_sync(&nc->monitor.timer);
280 }
281 
282 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
283 				       unsigned char id)
284 {
285 	struct ncsi_channel *nc;
286 
287 	NCSI_FOR_EACH_CHANNEL(np, nc) {
288 		if (nc->id == id)
289 			return nc;
290 	}
291 
292 	return NULL;
293 }
294 
295 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
296 {
297 	struct ncsi_channel *nc, *tmp;
298 	int index;
299 	unsigned long flags;
300 
301 	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
302 	if (!nc)
303 		return NULL;
304 
305 	nc->id = id;
306 	nc->package = np;
307 	nc->state = NCSI_CHANNEL_INACTIVE;
308 	nc->monitor.enabled = false;
309 	setup_timer(&nc->monitor.timer,
310 		    ncsi_channel_monitor, (unsigned long)nc);
311 	spin_lock_init(&nc->lock);
312 	INIT_LIST_HEAD(&nc->link);
313 	for (index = 0; index < NCSI_CAP_MAX; index++)
314 		nc->caps[index].index = index;
315 	for (index = 0; index < NCSI_MODE_MAX; index++)
316 		nc->modes[index].index = index;
317 
318 	spin_lock_irqsave(&np->lock, flags);
319 	tmp = ncsi_find_channel(np, id);
320 	if (tmp) {
321 		spin_unlock_irqrestore(&np->lock, flags);
322 		kfree(nc);
323 		return tmp;
324 	}
325 
326 	list_add_tail_rcu(&nc->node, &np->channels);
327 	np->channel_num++;
328 	spin_unlock_irqrestore(&np->lock, flags);
329 
330 	return nc;
331 }
332 
333 static void ncsi_remove_channel(struct ncsi_channel *nc)
334 {
335 	struct ncsi_package *np = nc->package;
336 	struct ncsi_channel_filter *ncf;
337 	unsigned long flags;
338 	int i;
339 
340 	/* Release filters */
341 	spin_lock_irqsave(&nc->lock, flags);
342 	for (i = 0; i < NCSI_FILTER_MAX; i++) {
343 		ncf = nc->filters[i];
344 		if (!ncf)
345 			continue;
346 
347 		nc->filters[i] = NULL;
348 		kfree(ncf);
349 	}
350 
351 	nc->state = NCSI_CHANNEL_INACTIVE;
352 	spin_unlock_irqrestore(&nc->lock, flags);
353 	ncsi_stop_channel_monitor(nc);
354 
355 	/* Remove and free channel */
356 	spin_lock_irqsave(&np->lock, flags);
357 	list_del_rcu(&nc->node);
358 	np->channel_num--;
359 	spin_unlock_irqrestore(&np->lock, flags);
360 
361 	kfree(nc);
362 }
363 
364 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
365 				       unsigned char id)
366 {
367 	struct ncsi_package *np;
368 
369 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
370 		if (np->id == id)
371 			return np;
372 	}
373 
374 	return NULL;
375 }
376 
377 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
378 				      unsigned char id)
379 {
380 	struct ncsi_package *np, *tmp;
381 	unsigned long flags;
382 
383 	np = kzalloc(sizeof(*np), GFP_ATOMIC);
384 	if (!np)
385 		return NULL;
386 
387 	np->id = id;
388 	np->ndp = ndp;
389 	spin_lock_init(&np->lock);
390 	INIT_LIST_HEAD(&np->channels);
391 
392 	spin_lock_irqsave(&ndp->lock, flags);
393 	tmp = ncsi_find_package(ndp, id);
394 	if (tmp) {
395 		spin_unlock_irqrestore(&ndp->lock, flags);
396 		kfree(np);
397 		return tmp;
398 	}
399 
400 	list_add_tail_rcu(&np->node, &ndp->packages);
401 	ndp->package_num++;
402 	spin_unlock_irqrestore(&ndp->lock, flags);
403 
404 	return np;
405 }
406 
407 void ncsi_remove_package(struct ncsi_package *np)
408 {
409 	struct ncsi_dev_priv *ndp = np->ndp;
410 	struct ncsi_channel *nc, *tmp;
411 	unsigned long flags;
412 
413 	/* Release all child channels */
414 	list_for_each_entry_safe(nc, tmp, &np->channels, node)
415 		ncsi_remove_channel(nc);
416 
417 	/* Remove and free package */
418 	spin_lock_irqsave(&ndp->lock, flags);
419 	list_del_rcu(&np->node);
420 	ndp->package_num--;
421 	spin_unlock_irqrestore(&ndp->lock, flags);
422 
423 	kfree(np);
424 }
425 
426 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
427 				   unsigned char id,
428 				   struct ncsi_package **np,
429 				   struct ncsi_channel **nc)
430 {
431 	struct ncsi_package *p;
432 	struct ncsi_channel *c;
433 
434 	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
435 	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
436 
437 	if (np)
438 		*np = p;
439 	if (nc)
440 		*nc = c;
441 }
442 
443 /* For two consecutive NCSI commands, the packet IDs shouldn't
444  * be same. Otherwise, the bogus response might be replied. So
445  * the available IDs are allocated in round-robin fashion.
446  */
447 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
448 					unsigned int req_flags)
449 {
450 	struct ncsi_request *nr = NULL;
451 	int i, limit = ARRAY_SIZE(ndp->requests);
452 	unsigned long flags;
453 
454 	/* Check if there is one available request until the ceiling */
455 	spin_lock_irqsave(&ndp->lock, flags);
456 	for (i = ndp->request_id; i < limit; i++) {
457 		if (ndp->requests[i].used)
458 			continue;
459 
460 		nr = &ndp->requests[i];
461 		nr->used = true;
462 		nr->flags = req_flags;
463 		ndp->request_id = i + 1;
464 		goto found;
465 	}
466 
467 	/* Fail back to check from the starting cursor */
468 	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
469 		if (ndp->requests[i].used)
470 			continue;
471 
472 		nr = &ndp->requests[i];
473 		nr->used = true;
474 		nr->flags = req_flags;
475 		ndp->request_id = i + 1;
476 		goto found;
477 	}
478 
479 found:
480 	spin_unlock_irqrestore(&ndp->lock, flags);
481 	return nr;
482 }
483 
484 void ncsi_free_request(struct ncsi_request *nr)
485 {
486 	struct ncsi_dev_priv *ndp = nr->ndp;
487 	struct sk_buff *cmd, *rsp;
488 	unsigned long flags;
489 	bool driven;
490 
491 	if (nr->enabled) {
492 		nr->enabled = false;
493 		del_timer_sync(&nr->timer);
494 	}
495 
496 	spin_lock_irqsave(&ndp->lock, flags);
497 	cmd = nr->cmd;
498 	rsp = nr->rsp;
499 	nr->cmd = NULL;
500 	nr->rsp = NULL;
501 	nr->used = false;
502 	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
503 	spin_unlock_irqrestore(&ndp->lock, flags);
504 
505 	if (driven && cmd && --ndp->pending_req_num == 0)
506 		schedule_work(&ndp->work);
507 
508 	/* Release command and response */
509 	consume_skb(cmd);
510 	consume_skb(rsp);
511 }
512 
513 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
514 {
515 	struct ncsi_dev_priv *ndp;
516 
517 	NCSI_FOR_EACH_DEV(ndp) {
518 		if (ndp->ndev.dev == dev)
519 			return &ndp->ndev;
520 	}
521 
522 	return NULL;
523 }
524 
525 static void ncsi_request_timeout(unsigned long data)
526 {
527 	struct ncsi_request *nr = (struct ncsi_request *)data;
528 	struct ncsi_dev_priv *ndp = nr->ndp;
529 	unsigned long flags;
530 
531 	/* If the request already had associated response,
532 	 * let the response handler to release it.
533 	 */
534 	spin_lock_irqsave(&ndp->lock, flags);
535 	nr->enabled = false;
536 	if (nr->rsp || !nr->cmd) {
537 		spin_unlock_irqrestore(&ndp->lock, flags);
538 		return;
539 	}
540 	spin_unlock_irqrestore(&ndp->lock, flags);
541 
542 	/* Release the request */
543 	ncsi_free_request(nr);
544 }
545 
546 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
547 {
548 	struct ncsi_dev *nd = &ndp->ndev;
549 	struct ncsi_package *np = ndp->active_package;
550 	struct ncsi_channel *nc = ndp->active_channel;
551 	struct ncsi_cmd_arg nca;
552 	unsigned long flags;
553 	int ret;
554 
555 	nca.ndp = ndp;
556 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
557 	switch (nd->state) {
558 	case ncsi_dev_state_suspend:
559 		nd->state = ncsi_dev_state_suspend_select;
560 		/* Fall through */
561 	case ncsi_dev_state_suspend_select:
562 		ndp->pending_req_num = 1;
563 
564 		nca.type = NCSI_PKT_CMD_SP;
565 		nca.package = np->id;
566 		nca.channel = NCSI_RESERVED_CHANNEL;
567 		if (ndp->flags & NCSI_DEV_HWA)
568 			nca.bytes[0] = 0;
569 		else
570 			nca.bytes[0] = 1;
571 
572 		/* To retrieve the last link states of channels in current
573 		 * package when current active channel needs fail over to
574 		 * another one. It means we will possibly select another
575 		 * channel as next active one. The link states of channels
576 		 * are most important factor of the selection. So we need
577 		 * accurate link states. Unfortunately, the link states on
578 		 * inactive channels can't be updated with LSC AEN in time.
579 		 */
580 		if (ndp->flags & NCSI_DEV_RESHUFFLE)
581 			nd->state = ncsi_dev_state_suspend_gls;
582 		else
583 			nd->state = ncsi_dev_state_suspend_dcnt;
584 		ret = ncsi_xmit_cmd(&nca);
585 		if (ret)
586 			goto error;
587 
588 		break;
589 	case ncsi_dev_state_suspend_gls:
590 		ndp->pending_req_num = np->channel_num;
591 
592 		nca.type = NCSI_PKT_CMD_GLS;
593 		nca.package = np->id;
594 
595 		nd->state = ncsi_dev_state_suspend_dcnt;
596 		NCSI_FOR_EACH_CHANNEL(np, nc) {
597 			nca.channel = nc->id;
598 			ret = ncsi_xmit_cmd(&nca);
599 			if (ret)
600 				goto error;
601 		}
602 
603 		break;
604 	case ncsi_dev_state_suspend_dcnt:
605 		ndp->pending_req_num = 1;
606 
607 		nca.type = NCSI_PKT_CMD_DCNT;
608 		nca.package = np->id;
609 		nca.channel = nc->id;
610 
611 		nd->state = ncsi_dev_state_suspend_dc;
612 		ret = ncsi_xmit_cmd(&nca);
613 		if (ret)
614 			goto error;
615 
616 		break;
617 	case ncsi_dev_state_suspend_dc:
618 		ndp->pending_req_num = 1;
619 
620 		nca.type = NCSI_PKT_CMD_DC;
621 		nca.package = np->id;
622 		nca.channel = nc->id;
623 		nca.bytes[0] = 1;
624 
625 		nd->state = ncsi_dev_state_suspend_deselect;
626 		ret = ncsi_xmit_cmd(&nca);
627 		if (ret)
628 			goto error;
629 
630 		break;
631 	case ncsi_dev_state_suspend_deselect:
632 		ndp->pending_req_num = 1;
633 
634 		nca.type = NCSI_PKT_CMD_DP;
635 		nca.package = np->id;
636 		nca.channel = NCSI_RESERVED_CHANNEL;
637 
638 		nd->state = ncsi_dev_state_suspend_done;
639 		ret = ncsi_xmit_cmd(&nca);
640 		if (ret)
641 			goto error;
642 
643 		break;
644 	case ncsi_dev_state_suspend_done:
645 		spin_lock_irqsave(&nc->lock, flags);
646 		nc->state = NCSI_CHANNEL_INACTIVE;
647 		spin_unlock_irqrestore(&nc->lock, flags);
648 		ncsi_process_next_channel(ndp);
649 
650 		break;
651 	default:
652 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
653 			    nd->state);
654 	}
655 
656 	return;
657 error:
658 	nd->state = ncsi_dev_state_functional;
659 }
660 
661 /* Check the VLAN filter bitmap for a set filter, and construct a
662  * "Set VLAN Filter - Disable" packet if found.
663  */
664 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
665 			 struct ncsi_cmd_arg *nca)
666 {
667 	int index;
668 	u32 *data;
669 	u16 vid;
670 
671 	index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, NULL);
672 	if (index < 0) {
673 		/* Filter table empty */
674 		return -1;
675 	}
676 
677 	data = ncsi_get_filter(nc, NCSI_FILTER_VLAN, index);
678 	if (!data) {
679 		netdev_err(ndp->ndev.dev,
680 			   "ncsi: failed to retrieve filter %d\n", index);
681 		/* Set the VLAN id to 0 - this will still disable the entry in
682 		 * the filter table, but we won't know what it was.
683 		 */
684 		vid = 0;
685 	} else {
686 		vid = *(u16 *)data;
687 	}
688 
689 	netdev_printk(KERN_DEBUG, ndp->ndev.dev,
690 		      "ncsi: removed vlan tag %u at index %d\n",
691 		      vid, index + 1);
692 	ncsi_remove_filter(nc, NCSI_FILTER_VLAN, index);
693 
694 	nca->type = NCSI_PKT_CMD_SVF;
695 	nca->words[1] = vid;
696 	/* HW filter index starts at 1 */
697 	nca->bytes[6] = index + 1;
698 	nca->bytes[7] = 0x00;
699 	return 0;
700 }
701 
702 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
703  * packet.
704  */
705 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
706 		       struct ncsi_cmd_arg *nca)
707 {
708 	struct vlan_vid *vlan = NULL;
709 	int index = 0;
710 
711 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
712 		index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
713 		if (index < 0) {
714 			/* New tag to add */
715 			netdev_printk(KERN_DEBUG, ndp->ndev.dev,
716 				      "ncsi: new vlan id to set: %u\n",
717 				      vlan->vid);
718 			break;
719 		}
720 		netdev_printk(KERN_DEBUG, ndp->ndev.dev,
721 			      "vid %u already at filter pos %d\n",
722 			      vlan->vid, index);
723 	}
724 
725 	if (!vlan || index >= 0) {
726 		netdev_printk(KERN_DEBUG, ndp->ndev.dev,
727 			      "no vlan ids left to set\n");
728 		return -1;
729 	}
730 
731 	index = ncsi_add_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
732 	if (index < 0) {
733 		netdev_err(ndp->ndev.dev,
734 			   "Failed to add new VLAN tag, error %d\n", index);
735 		return -1;
736 	}
737 
738 	netdev_printk(KERN_DEBUG, ndp->ndev.dev,
739 		      "ncsi: set vid %u in packet, index %u\n",
740 		      vlan->vid, index + 1);
741 	nca->type = NCSI_PKT_CMD_SVF;
742 	nca->words[1] = vlan->vid;
743 	/* HW filter index starts at 1 */
744 	nca->bytes[6] = index + 1;
745 	nca->bytes[7] = 0x01;
746 
747 	return 0;
748 }
749 
750 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
751 {
752 	struct ncsi_dev *nd = &ndp->ndev;
753 	struct net_device *dev = nd->dev;
754 	struct ncsi_package *np = ndp->active_package;
755 	struct ncsi_channel *nc = ndp->active_channel;
756 	struct ncsi_channel *hot_nc = NULL;
757 	struct ncsi_cmd_arg nca;
758 	unsigned char index;
759 	unsigned long flags;
760 	int ret;
761 
762 	nca.ndp = ndp;
763 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
764 	switch (nd->state) {
765 	case ncsi_dev_state_config:
766 	case ncsi_dev_state_config_sp:
767 		ndp->pending_req_num = 1;
768 
769 		/* Select the specific package */
770 		nca.type = NCSI_PKT_CMD_SP;
771 		if (ndp->flags & NCSI_DEV_HWA)
772 			nca.bytes[0] = 0;
773 		else
774 			nca.bytes[0] = 1;
775 		nca.package = np->id;
776 		nca.channel = NCSI_RESERVED_CHANNEL;
777 		ret = ncsi_xmit_cmd(&nca);
778 		if (ret)
779 			goto error;
780 
781 		nd->state = ncsi_dev_state_config_cis;
782 		break;
783 	case ncsi_dev_state_config_cis:
784 		ndp->pending_req_num = 1;
785 
786 		/* Clear initial state */
787 		nca.type = NCSI_PKT_CMD_CIS;
788 		nca.package = np->id;
789 		nca.channel = nc->id;
790 		ret = ncsi_xmit_cmd(&nca);
791 		if (ret)
792 			goto error;
793 
794 		nd->state = ncsi_dev_state_config_clear_vids;
795 		break;
796 	case ncsi_dev_state_config_clear_vids:
797 	case ncsi_dev_state_config_svf:
798 	case ncsi_dev_state_config_ev:
799 	case ncsi_dev_state_config_sma:
800 	case ncsi_dev_state_config_ebf:
801 #if IS_ENABLED(CONFIG_IPV6)
802 	case ncsi_dev_state_config_egmf:
803 #endif
804 	case ncsi_dev_state_config_ecnt:
805 	case ncsi_dev_state_config_ec:
806 	case ncsi_dev_state_config_ae:
807 	case ncsi_dev_state_config_gls:
808 		ndp->pending_req_num = 1;
809 
810 		nca.package = np->id;
811 		nca.channel = nc->id;
812 
813 		/* Clear any active filters on the channel before setting */
814 		if (nd->state == ncsi_dev_state_config_clear_vids) {
815 			ret = clear_one_vid(ndp, nc, &nca);
816 			if (ret) {
817 				nd->state = ncsi_dev_state_config_svf;
818 				schedule_work(&ndp->work);
819 				break;
820 			}
821 			/* Repeat */
822 			nd->state = ncsi_dev_state_config_clear_vids;
823 		/* Add known VLAN tags to the filter */
824 		} else if (nd->state == ncsi_dev_state_config_svf) {
825 			ret = set_one_vid(ndp, nc, &nca);
826 			if (ret) {
827 				nd->state = ncsi_dev_state_config_ev;
828 				schedule_work(&ndp->work);
829 				break;
830 			}
831 			/* Repeat */
832 			nd->state = ncsi_dev_state_config_svf;
833 		/* Enable/Disable the VLAN filter */
834 		} else if (nd->state == ncsi_dev_state_config_ev) {
835 			if (list_empty(&ndp->vlan_vids)) {
836 				nca.type = NCSI_PKT_CMD_DV;
837 			} else {
838 				nca.type = NCSI_PKT_CMD_EV;
839 				nca.bytes[3] = NCSI_CAP_VLAN_NO;
840 			}
841 			nd->state = ncsi_dev_state_config_sma;
842 		} else if (nd->state == ncsi_dev_state_config_sma) {
843 		/* Use first entry in unicast filter table. Note that
844 		 * the MAC filter table starts from entry 1 instead of
845 		 * 0.
846 		 */
847 			nca.type = NCSI_PKT_CMD_SMA;
848 			for (index = 0; index < 6; index++)
849 				nca.bytes[index] = dev->dev_addr[index];
850 			nca.bytes[6] = 0x1;
851 			nca.bytes[7] = 0x1;
852 			nd->state = ncsi_dev_state_config_ebf;
853 		} else if (nd->state == ncsi_dev_state_config_ebf) {
854 			nca.type = NCSI_PKT_CMD_EBF;
855 			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
856 			nd->state = ncsi_dev_state_config_ecnt;
857 #if IS_ENABLED(CONFIG_IPV6)
858 			if (ndp->inet6_addr_num > 0 &&
859 			    (nc->caps[NCSI_CAP_GENERIC].cap &
860 			     NCSI_CAP_GENERIC_MC))
861 				nd->state = ncsi_dev_state_config_egmf;
862 			else
863 				nd->state = ncsi_dev_state_config_ecnt;
864 		} else if (nd->state == ncsi_dev_state_config_egmf) {
865 			nca.type = NCSI_PKT_CMD_EGMF;
866 			nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
867 			nd->state = ncsi_dev_state_config_ecnt;
868 #endif /* CONFIG_IPV6 */
869 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
870 			nca.type = NCSI_PKT_CMD_ECNT;
871 			nd->state = ncsi_dev_state_config_ec;
872 		} else if (nd->state == ncsi_dev_state_config_ec) {
873 			/* Enable AEN if it's supported */
874 			nca.type = NCSI_PKT_CMD_EC;
875 			nd->state = ncsi_dev_state_config_ae;
876 			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
877 				nd->state = ncsi_dev_state_config_gls;
878 		} else if (nd->state == ncsi_dev_state_config_ae) {
879 			nca.type = NCSI_PKT_CMD_AE;
880 			nca.bytes[0] = 0;
881 			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
882 			nd->state = ncsi_dev_state_config_gls;
883 		} else if (nd->state == ncsi_dev_state_config_gls) {
884 			nca.type = NCSI_PKT_CMD_GLS;
885 			nd->state = ncsi_dev_state_config_done;
886 		}
887 
888 		ret = ncsi_xmit_cmd(&nca);
889 		if (ret)
890 			goto error;
891 		break;
892 	case ncsi_dev_state_config_done:
893 		spin_lock_irqsave(&nc->lock, flags);
894 		if (nc->reconfigure_needed) {
895 			/* This channel's configuration has been updated
896 			 * part-way during the config state - start the
897 			 * channel configuration over
898 			 */
899 			nc->reconfigure_needed = false;
900 			nc->state = NCSI_CHANNEL_INACTIVE;
901 			spin_unlock_irqrestore(&nc->lock, flags);
902 
903 			spin_lock_irqsave(&ndp->lock, flags);
904 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
905 			spin_unlock_irqrestore(&ndp->lock, flags);
906 
907 			netdev_printk(KERN_DEBUG, dev,
908 				      "Dirty NCSI channel state reset\n");
909 			ncsi_process_next_channel(ndp);
910 			break;
911 		}
912 
913 		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
914 			hot_nc = nc;
915 			nc->state = NCSI_CHANNEL_ACTIVE;
916 		} else {
917 			hot_nc = NULL;
918 			nc->state = NCSI_CHANNEL_INACTIVE;
919 		}
920 		spin_unlock_irqrestore(&nc->lock, flags);
921 
922 		/* Update the hot channel */
923 		spin_lock_irqsave(&ndp->lock, flags);
924 		ndp->hot_channel = hot_nc;
925 		spin_unlock_irqrestore(&ndp->lock, flags);
926 
927 		ncsi_start_channel_monitor(nc);
928 		ncsi_process_next_channel(ndp);
929 		break;
930 	default:
931 		netdev_warn(dev, "Wrong NCSI state 0x%x in config\n",
932 			    nd->state);
933 	}
934 
935 	return;
936 
937 error:
938 	ncsi_report_link(ndp, true);
939 }
940 
941 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
942 {
943 	struct ncsi_package *np;
944 	struct ncsi_channel *nc, *found, *hot_nc;
945 	struct ncsi_channel_mode *ncm;
946 	unsigned long flags;
947 
948 	spin_lock_irqsave(&ndp->lock, flags);
949 	hot_nc = ndp->hot_channel;
950 	spin_unlock_irqrestore(&ndp->lock, flags);
951 
952 	/* The search is done once an inactive channel with up
953 	 * link is found.
954 	 */
955 	found = NULL;
956 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
957 		NCSI_FOR_EACH_CHANNEL(np, nc) {
958 			spin_lock_irqsave(&nc->lock, flags);
959 
960 			if (!list_empty(&nc->link) ||
961 			    nc->state != NCSI_CHANNEL_INACTIVE) {
962 				spin_unlock_irqrestore(&nc->lock, flags);
963 				continue;
964 			}
965 
966 			if (!found)
967 				found = nc;
968 
969 			if (nc == hot_nc)
970 				found = nc;
971 
972 			ncm = &nc->modes[NCSI_MODE_LINK];
973 			if (ncm->data[2] & 0x1) {
974 				spin_unlock_irqrestore(&nc->lock, flags);
975 				found = nc;
976 				goto out;
977 			}
978 
979 			spin_unlock_irqrestore(&nc->lock, flags);
980 		}
981 	}
982 
983 	if (!found) {
984 		ncsi_report_link(ndp, true);
985 		return -ENODEV;
986 	}
987 
988 out:
989 	spin_lock_irqsave(&ndp->lock, flags);
990 	list_add_tail_rcu(&found->link, &ndp->channel_queue);
991 	spin_unlock_irqrestore(&ndp->lock, flags);
992 
993 	return ncsi_process_next_channel(ndp);
994 }
995 
996 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
997 {
998 	struct ncsi_package *np;
999 	struct ncsi_channel *nc;
1000 	unsigned int cap;
1001 
1002 	/* The hardware arbitration is disabled if any one channel
1003 	 * doesn't support explicitly.
1004 	 */
1005 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1006 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1007 			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1008 			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1009 			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1010 			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1011 				ndp->flags &= ~NCSI_DEV_HWA;
1012 				return false;
1013 			}
1014 		}
1015 	}
1016 
1017 	ndp->flags |= NCSI_DEV_HWA;
1018 	return true;
1019 }
1020 
1021 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
1022 {
1023 	struct ncsi_package *np;
1024 	struct ncsi_channel *nc;
1025 	unsigned long flags;
1026 
1027 	/* Move all available channels to processing queue */
1028 	spin_lock_irqsave(&ndp->lock, flags);
1029 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1030 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1031 			WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
1032 				     !list_empty(&nc->link));
1033 			ncsi_stop_channel_monitor(nc);
1034 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1035 		}
1036 	}
1037 	spin_unlock_irqrestore(&ndp->lock, flags);
1038 
1039 	/* We can have no channels in extremely case */
1040 	if (list_empty(&ndp->channel_queue)) {
1041 		ncsi_report_link(ndp, false);
1042 		return -ENOENT;
1043 	}
1044 
1045 	return ncsi_process_next_channel(ndp);
1046 }
1047 
1048 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1049 {
1050 	struct ncsi_dev *nd = &ndp->ndev;
1051 	struct ncsi_package *np;
1052 	struct ncsi_channel *nc;
1053 	struct ncsi_cmd_arg nca;
1054 	unsigned char index;
1055 	int ret;
1056 
1057 	nca.ndp = ndp;
1058 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1059 	switch (nd->state) {
1060 	case ncsi_dev_state_probe:
1061 		nd->state = ncsi_dev_state_probe_deselect;
1062 		/* Fall through */
1063 	case ncsi_dev_state_probe_deselect:
1064 		ndp->pending_req_num = 8;
1065 
1066 		/* Deselect all possible packages */
1067 		nca.type = NCSI_PKT_CMD_DP;
1068 		nca.channel = NCSI_RESERVED_CHANNEL;
1069 		for (index = 0; index < 8; index++) {
1070 			nca.package = index;
1071 			ret = ncsi_xmit_cmd(&nca);
1072 			if (ret)
1073 				goto error;
1074 		}
1075 
1076 		nd->state = ncsi_dev_state_probe_package;
1077 		break;
1078 	case ncsi_dev_state_probe_package:
1079 		ndp->pending_req_num = 16;
1080 
1081 		/* Select all possible packages */
1082 		nca.type = NCSI_PKT_CMD_SP;
1083 		nca.bytes[0] = 1;
1084 		nca.channel = NCSI_RESERVED_CHANNEL;
1085 		for (index = 0; index < 8; index++) {
1086 			nca.package = index;
1087 			ret = ncsi_xmit_cmd(&nca);
1088 			if (ret)
1089 				goto error;
1090 		}
1091 
1092 		/* Disable all possible packages */
1093 		nca.type = NCSI_PKT_CMD_DP;
1094 		for (index = 0; index < 8; index++) {
1095 			nca.package = index;
1096 			ret = ncsi_xmit_cmd(&nca);
1097 			if (ret)
1098 				goto error;
1099 		}
1100 
1101 		nd->state = ncsi_dev_state_probe_channel;
1102 		break;
1103 	case ncsi_dev_state_probe_channel:
1104 		if (!ndp->active_package)
1105 			ndp->active_package = list_first_or_null_rcu(
1106 				&ndp->packages, struct ncsi_package, node);
1107 		else if (list_is_last(&ndp->active_package->node,
1108 				      &ndp->packages))
1109 			ndp->active_package = NULL;
1110 		else
1111 			ndp->active_package = list_next_entry(
1112 				ndp->active_package, node);
1113 
1114 		/* All available packages and channels are enumerated. The
1115 		 * enumeration happens for once when the NCSI interface is
1116 		 * started. So we need continue to start the interface after
1117 		 * the enumeration.
1118 		 *
1119 		 * We have to choose an active channel before configuring it.
1120 		 * Note that we possibly don't have active channel in extreme
1121 		 * situation.
1122 		 */
1123 		if (!ndp->active_package) {
1124 			ndp->flags |= NCSI_DEV_PROBED;
1125 			if (ncsi_check_hwa(ndp))
1126 				ncsi_enable_hwa(ndp);
1127 			else
1128 				ncsi_choose_active_channel(ndp);
1129 			return;
1130 		}
1131 
1132 		/* Select the active package */
1133 		ndp->pending_req_num = 1;
1134 		nca.type = NCSI_PKT_CMD_SP;
1135 		nca.bytes[0] = 1;
1136 		nca.package = ndp->active_package->id;
1137 		nca.channel = NCSI_RESERVED_CHANNEL;
1138 		ret = ncsi_xmit_cmd(&nca);
1139 		if (ret)
1140 			goto error;
1141 
1142 		nd->state = ncsi_dev_state_probe_cis;
1143 		break;
1144 	case ncsi_dev_state_probe_cis:
1145 		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1146 
1147 		/* Clear initial state */
1148 		nca.type = NCSI_PKT_CMD_CIS;
1149 		nca.package = ndp->active_package->id;
1150 		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1151 			nca.channel = index;
1152 			ret = ncsi_xmit_cmd(&nca);
1153 			if (ret)
1154 				goto error;
1155 		}
1156 
1157 		nd->state = ncsi_dev_state_probe_gvi;
1158 		break;
1159 	case ncsi_dev_state_probe_gvi:
1160 	case ncsi_dev_state_probe_gc:
1161 	case ncsi_dev_state_probe_gls:
1162 		np = ndp->active_package;
1163 		ndp->pending_req_num = np->channel_num;
1164 
1165 		/* Retrieve version, capability or link status */
1166 		if (nd->state == ncsi_dev_state_probe_gvi)
1167 			nca.type = NCSI_PKT_CMD_GVI;
1168 		else if (nd->state == ncsi_dev_state_probe_gc)
1169 			nca.type = NCSI_PKT_CMD_GC;
1170 		else
1171 			nca.type = NCSI_PKT_CMD_GLS;
1172 
1173 		nca.package = np->id;
1174 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1175 			nca.channel = nc->id;
1176 			ret = ncsi_xmit_cmd(&nca);
1177 			if (ret)
1178 				goto error;
1179 		}
1180 
1181 		if (nd->state == ncsi_dev_state_probe_gvi)
1182 			nd->state = ncsi_dev_state_probe_gc;
1183 		else if (nd->state == ncsi_dev_state_probe_gc)
1184 			nd->state = ncsi_dev_state_probe_gls;
1185 		else
1186 			nd->state = ncsi_dev_state_probe_dp;
1187 		break;
1188 	case ncsi_dev_state_probe_dp:
1189 		ndp->pending_req_num = 1;
1190 
1191 		/* Deselect the active package */
1192 		nca.type = NCSI_PKT_CMD_DP;
1193 		nca.package = ndp->active_package->id;
1194 		nca.channel = NCSI_RESERVED_CHANNEL;
1195 		ret = ncsi_xmit_cmd(&nca);
1196 		if (ret)
1197 			goto error;
1198 
1199 		/* Scan channels in next package */
1200 		nd->state = ncsi_dev_state_probe_channel;
1201 		break;
1202 	default:
1203 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1204 			    nd->state);
1205 	}
1206 
1207 	return;
1208 error:
1209 	ncsi_report_link(ndp, true);
1210 }
1211 
1212 static void ncsi_dev_work(struct work_struct *work)
1213 {
1214 	struct ncsi_dev_priv *ndp = container_of(work,
1215 			struct ncsi_dev_priv, work);
1216 	struct ncsi_dev *nd = &ndp->ndev;
1217 
1218 	switch (nd->state & ncsi_dev_state_major) {
1219 	case ncsi_dev_state_probe:
1220 		ncsi_probe_channel(ndp);
1221 		break;
1222 	case ncsi_dev_state_suspend:
1223 		ncsi_suspend_channel(ndp);
1224 		break;
1225 	case ncsi_dev_state_config:
1226 		ncsi_configure_channel(ndp);
1227 		break;
1228 	default:
1229 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1230 			    nd->state);
1231 	}
1232 }
1233 
1234 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1235 {
1236 	struct ncsi_channel *nc;
1237 	int old_state;
1238 	unsigned long flags;
1239 
1240 	spin_lock_irqsave(&ndp->lock, flags);
1241 	nc = list_first_or_null_rcu(&ndp->channel_queue,
1242 				    struct ncsi_channel, link);
1243 	if (!nc) {
1244 		spin_unlock_irqrestore(&ndp->lock, flags);
1245 		goto out;
1246 	}
1247 
1248 	list_del_init(&nc->link);
1249 	spin_unlock_irqrestore(&ndp->lock, flags);
1250 
1251 	spin_lock_irqsave(&nc->lock, flags);
1252 	old_state = nc->state;
1253 	nc->state = NCSI_CHANNEL_INVISIBLE;
1254 	spin_unlock_irqrestore(&nc->lock, flags);
1255 
1256 	ndp->active_channel = nc;
1257 	ndp->active_package = nc->package;
1258 
1259 	switch (old_state) {
1260 	case NCSI_CHANNEL_INACTIVE:
1261 		ndp->ndev.state = ncsi_dev_state_config;
1262 		ncsi_configure_channel(ndp);
1263 		break;
1264 	case NCSI_CHANNEL_ACTIVE:
1265 		ndp->ndev.state = ncsi_dev_state_suspend;
1266 		ncsi_suspend_channel(ndp);
1267 		break;
1268 	default:
1269 		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1270 			   old_state, nc->package->id, nc->id);
1271 		ncsi_report_link(ndp, false);
1272 		return -EINVAL;
1273 	}
1274 
1275 	return 0;
1276 
1277 out:
1278 	ndp->active_channel = NULL;
1279 	ndp->active_package = NULL;
1280 	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1281 		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1282 		return ncsi_choose_active_channel(ndp);
1283 	}
1284 
1285 	ncsi_report_link(ndp, false);
1286 	return -ENODEV;
1287 }
1288 
1289 #if IS_ENABLED(CONFIG_IPV6)
1290 static int ncsi_inet6addr_event(struct notifier_block *this,
1291 				unsigned long event, void *data)
1292 {
1293 	struct inet6_ifaddr *ifa = data;
1294 	struct net_device *dev = ifa->idev->dev;
1295 	struct ncsi_dev *nd = ncsi_find_dev(dev);
1296 	struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1297 	struct ncsi_package *np;
1298 	struct ncsi_channel *nc;
1299 	struct ncsi_cmd_arg nca;
1300 	bool action;
1301 	int ret;
1302 
1303 	if (!ndp || (ipv6_addr_type(&ifa->addr) &
1304 	    (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1305 		return NOTIFY_OK;
1306 
1307 	switch (event) {
1308 	case NETDEV_UP:
1309 		action = (++ndp->inet6_addr_num) == 1;
1310 		nca.type = NCSI_PKT_CMD_EGMF;
1311 		break;
1312 	case NETDEV_DOWN:
1313 		action = (--ndp->inet6_addr_num == 0);
1314 		nca.type = NCSI_PKT_CMD_DGMF;
1315 		break;
1316 	default:
1317 		return NOTIFY_OK;
1318 	}
1319 
1320 	/* We might not have active channel or packages. The IPv6
1321 	 * required multicast will be enabled when active channel
1322 	 * or packages are chosen.
1323 	 */
1324 	np = ndp->active_package;
1325 	nc = ndp->active_channel;
1326 	if (!action || !np || !nc)
1327 		return NOTIFY_OK;
1328 
1329 	/* We needn't enable or disable it if the function isn't supported */
1330 	if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1331 		return NOTIFY_OK;
1332 
1333 	nca.ndp = ndp;
1334 	nca.req_flags = 0;
1335 	nca.package = np->id;
1336 	nca.channel = nc->id;
1337 	nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1338 	ret = ncsi_xmit_cmd(&nca);
1339 	if (ret) {
1340 		netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1341 			    (event == NETDEV_UP) ? "enable" : "disable", ret);
1342 		return NOTIFY_DONE;
1343 	}
1344 
1345 	return NOTIFY_OK;
1346 }
1347 
1348 static struct notifier_block ncsi_inet6addr_notifier = {
1349 	.notifier_call = ncsi_inet6addr_event,
1350 };
1351 #endif /* CONFIG_IPV6 */
1352 
1353 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1354 {
1355 	struct ncsi_dev *nd = &ndp->ndev;
1356 	struct ncsi_channel *nc;
1357 	struct ncsi_package *np;
1358 	unsigned long flags;
1359 	unsigned int n = 0;
1360 
1361 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1362 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1363 			spin_lock_irqsave(&nc->lock, flags);
1364 
1365 			/* Channels may be busy, mark dirty instead of
1366 			 * kicking if;
1367 			 * a) not ACTIVE (configured)
1368 			 * b) in the channel_queue (to be configured)
1369 			 * c) it's ndev is in the config state
1370 			 */
1371 			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1372 				if ((ndp->ndev.state & 0xff00) ==
1373 						ncsi_dev_state_config ||
1374 						!list_empty(&nc->link)) {
1375 					netdev_printk(KERN_DEBUG, nd->dev,
1376 						      "ncsi: channel %p marked dirty\n",
1377 						      nc);
1378 					nc->reconfigure_needed = true;
1379 				}
1380 				spin_unlock_irqrestore(&nc->lock, flags);
1381 				continue;
1382 			}
1383 
1384 			spin_unlock_irqrestore(&nc->lock, flags);
1385 
1386 			ncsi_stop_channel_monitor(nc);
1387 			spin_lock_irqsave(&nc->lock, flags);
1388 			nc->state = NCSI_CHANNEL_INACTIVE;
1389 			spin_unlock_irqrestore(&nc->lock, flags);
1390 
1391 			spin_lock_irqsave(&ndp->lock, flags);
1392 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1393 			spin_unlock_irqrestore(&ndp->lock, flags);
1394 
1395 			netdev_printk(KERN_DEBUG, nd->dev,
1396 				      "ncsi: kicked channel %p\n", nc);
1397 			n++;
1398 		}
1399 	}
1400 
1401 	return n;
1402 }
1403 
1404 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1405 {
1406 	struct ncsi_channel_filter *ncf;
1407 	struct ncsi_dev_priv *ndp;
1408 	unsigned int n_vids = 0;
1409 	struct vlan_vid *vlan;
1410 	struct ncsi_dev *nd;
1411 	bool found = false;
1412 
1413 	if (vid == 0)
1414 		return 0;
1415 
1416 	nd = ncsi_find_dev(dev);
1417 	if (!nd) {
1418 		netdev_warn(dev, "ncsi: No net_device?\n");
1419 		return 0;
1420 	}
1421 
1422 	ndp = TO_NCSI_DEV_PRIV(nd);
1423 	ncf = ndp->hot_channel->filters[NCSI_FILTER_VLAN];
1424 
1425 	/* Add the VLAN id to our internal list */
1426 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1427 		n_vids++;
1428 		if (vlan->vid == vid) {
1429 			netdev_printk(KERN_DEBUG, dev,
1430 				      "vid %u already registered\n", vid);
1431 			return 0;
1432 		}
1433 	}
1434 
1435 	if (n_vids >= ncf->total) {
1436 		netdev_info(dev,
1437 			    "NCSI Channel supports up to %u VLAN tags but %u are already set\n",
1438 			    ncf->total, n_vids);
1439 		return -EINVAL;
1440 	}
1441 
1442 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1443 	if (!vlan)
1444 		return -ENOMEM;
1445 
1446 	vlan->proto = proto;
1447 	vlan->vid = vid;
1448 	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1449 
1450 	netdev_printk(KERN_DEBUG, dev, "Added new vid %u\n", vid);
1451 
1452 	found = ncsi_kick_channels(ndp) != 0;
1453 
1454 	return found ? ncsi_process_next_channel(ndp) : 0;
1455 }
1456 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1457 
1458 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1459 {
1460 	struct vlan_vid *vlan, *tmp;
1461 	struct ncsi_dev_priv *ndp;
1462 	struct ncsi_dev *nd;
1463 	bool found = false;
1464 
1465 	if (vid == 0)
1466 		return 0;
1467 
1468 	nd = ncsi_find_dev(dev);
1469 	if (!nd) {
1470 		netdev_warn(dev, "ncsi: no net_device?\n");
1471 		return 0;
1472 	}
1473 
1474 	ndp = TO_NCSI_DEV_PRIV(nd);
1475 
1476 	/* Remove the VLAN id from our internal list */
1477 	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1478 		if (vlan->vid == vid) {
1479 			netdev_printk(KERN_DEBUG, dev,
1480 				      "vid %u found, removing\n", vid);
1481 			list_del_rcu(&vlan->list);
1482 			found = true;
1483 			kfree(vlan);
1484 		}
1485 
1486 	if (!found) {
1487 		netdev_err(dev, "ncsi: vid %u wasn't registered!\n", vid);
1488 		return -EINVAL;
1489 	}
1490 
1491 	found = ncsi_kick_channels(ndp) != 0;
1492 
1493 	return found ? ncsi_process_next_channel(ndp) : 0;
1494 }
1495 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1496 
1497 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1498 				   void (*handler)(struct ncsi_dev *ndev))
1499 {
1500 	struct ncsi_dev_priv *ndp;
1501 	struct ncsi_dev *nd;
1502 	unsigned long flags;
1503 	int i;
1504 
1505 	/* Check if the device has been registered or not */
1506 	nd = ncsi_find_dev(dev);
1507 	if (nd)
1508 		return nd;
1509 
1510 	/* Create NCSI device */
1511 	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1512 	if (!ndp)
1513 		return NULL;
1514 
1515 	nd = &ndp->ndev;
1516 	nd->state = ncsi_dev_state_registered;
1517 	nd->dev = dev;
1518 	nd->handler = handler;
1519 	ndp->pending_req_num = 0;
1520 	INIT_LIST_HEAD(&ndp->channel_queue);
1521 	INIT_LIST_HEAD(&ndp->vlan_vids);
1522 	INIT_WORK(&ndp->work, ncsi_dev_work);
1523 
1524 	/* Initialize private NCSI device */
1525 	spin_lock_init(&ndp->lock);
1526 	INIT_LIST_HEAD(&ndp->packages);
1527 	ndp->request_id = NCSI_REQ_START_IDX;
1528 	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1529 		ndp->requests[i].id = i;
1530 		ndp->requests[i].ndp = ndp;
1531 		setup_timer(&ndp->requests[i].timer,
1532 			    ncsi_request_timeout,
1533 			    (unsigned long)&ndp->requests[i]);
1534 	}
1535 
1536 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1537 #if IS_ENABLED(CONFIG_IPV6)
1538 	ndp->inet6_addr_num = 0;
1539 	if (list_empty(&ncsi_dev_list))
1540 		register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1541 #endif
1542 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1543 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1544 
1545 	/* Register NCSI packet Rx handler */
1546 	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1547 	ndp->ptype.func = ncsi_rcv_rsp;
1548 	ndp->ptype.dev = dev;
1549 	dev_add_pack(&ndp->ptype);
1550 
1551 	return nd;
1552 }
1553 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1554 
1555 int ncsi_start_dev(struct ncsi_dev *nd)
1556 {
1557 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1558 	int ret;
1559 
1560 	if (nd->state != ncsi_dev_state_registered &&
1561 	    nd->state != ncsi_dev_state_functional)
1562 		return -ENOTTY;
1563 
1564 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1565 		nd->state = ncsi_dev_state_probe;
1566 		schedule_work(&ndp->work);
1567 		return 0;
1568 	}
1569 
1570 	if (ndp->flags & NCSI_DEV_HWA)
1571 		ret = ncsi_enable_hwa(ndp);
1572 	else
1573 		ret = ncsi_choose_active_channel(ndp);
1574 
1575 	return ret;
1576 }
1577 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1578 
1579 void ncsi_stop_dev(struct ncsi_dev *nd)
1580 {
1581 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1582 	struct ncsi_package *np;
1583 	struct ncsi_channel *nc;
1584 	bool chained;
1585 	int old_state;
1586 	unsigned long flags;
1587 
1588 	/* Stop the channel monitor and reset channel's state */
1589 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1590 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1591 			ncsi_stop_channel_monitor(nc);
1592 
1593 			spin_lock_irqsave(&nc->lock, flags);
1594 			chained = !list_empty(&nc->link);
1595 			old_state = nc->state;
1596 			nc->state = NCSI_CHANNEL_INACTIVE;
1597 			spin_unlock_irqrestore(&nc->lock, flags);
1598 
1599 			WARN_ON_ONCE(chained ||
1600 				     old_state == NCSI_CHANNEL_INVISIBLE);
1601 		}
1602 	}
1603 
1604 	ncsi_report_link(ndp, true);
1605 }
1606 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1607 
1608 void ncsi_unregister_dev(struct ncsi_dev *nd)
1609 {
1610 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1611 	struct ncsi_package *np, *tmp;
1612 	unsigned long flags;
1613 
1614 	dev_remove_pack(&ndp->ptype);
1615 
1616 	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1617 		ncsi_remove_package(np);
1618 
1619 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1620 	list_del_rcu(&ndp->node);
1621 #if IS_ENABLED(CONFIG_IPV6)
1622 	if (list_empty(&ncsi_dev_list))
1623 		unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1624 #endif
1625 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1626 
1627 	kfree(ndp);
1628 }
1629 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
1630