xref: /openbmc/linux/net/ncsi/ncsi-manage.c (revision e0f6d1a5)
1 /*
2  * Copyright Gavin Shan, IBM Corporation 2016.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 
16 #include <net/ncsi.h>
17 #include <net/net_namespace.h>
18 #include <net/sock.h>
19 #include <net/addrconf.h>
20 #include <net/ipv6.h>
21 #include <net/if_inet6.h>
22 
23 #include "internal.h"
24 #include "ncsi-pkt.h"
25 #include "ncsi-netlink.h"
26 
27 LIST_HEAD(ncsi_dev_list);
28 DEFINE_SPINLOCK(ncsi_dev_lock);
29 
30 static inline int ncsi_filter_size(int table)
31 {
32 	int sizes[] = { 2, 6, 6, 6 };
33 
34 	BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
35 	if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
36 		return -EINVAL;
37 
38 	return sizes[table];
39 }
40 
41 u32 *ncsi_get_filter(struct ncsi_channel *nc, int table, int index)
42 {
43 	struct ncsi_channel_filter *ncf;
44 	int size;
45 
46 	ncf = nc->filters[table];
47 	if (!ncf)
48 		return NULL;
49 
50 	size = ncsi_filter_size(table);
51 	if (size < 0)
52 		return NULL;
53 
54 	return ncf->data + size * index;
55 }
56 
57 /* Find the first active filter in a filter table that matches the given
58  * data parameter. If data is NULL, this returns the first active filter.
59  */
60 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
61 {
62 	struct ncsi_channel_filter *ncf;
63 	void *bitmap;
64 	int index, size;
65 	unsigned long flags;
66 
67 	ncf = nc->filters[table];
68 	if (!ncf)
69 		return -ENXIO;
70 
71 	size = ncsi_filter_size(table);
72 	if (size < 0)
73 		return size;
74 
75 	spin_lock_irqsave(&nc->lock, flags);
76 	bitmap = (void *)&ncf->bitmap;
77 	index = -1;
78 	while ((index = find_next_bit(bitmap, ncf->total, index + 1))
79 	       < ncf->total) {
80 		if (!data || !memcmp(ncf->data + size * index, data, size)) {
81 			spin_unlock_irqrestore(&nc->lock, flags);
82 			return index;
83 		}
84 	}
85 	spin_unlock_irqrestore(&nc->lock, flags);
86 
87 	return -ENOENT;
88 }
89 
90 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
91 {
92 	struct ncsi_channel_filter *ncf;
93 	int index, size;
94 	void *bitmap;
95 	unsigned long flags;
96 
97 	size = ncsi_filter_size(table);
98 	if (size < 0)
99 		return size;
100 
101 	index = ncsi_find_filter(nc, table, data);
102 	if (index >= 0)
103 		return index;
104 
105 	ncf = nc->filters[table];
106 	if (!ncf)
107 		return -ENODEV;
108 
109 	spin_lock_irqsave(&nc->lock, flags);
110 	bitmap = (void *)&ncf->bitmap;
111 	do {
112 		index = find_next_zero_bit(bitmap, ncf->total, 0);
113 		if (index >= ncf->total) {
114 			spin_unlock_irqrestore(&nc->lock, flags);
115 			return -ENOSPC;
116 		}
117 	} while (test_and_set_bit(index, bitmap));
118 
119 	memcpy(ncf->data + size * index, data, size);
120 	spin_unlock_irqrestore(&nc->lock, flags);
121 
122 	return index;
123 }
124 
125 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
126 {
127 	struct ncsi_channel_filter *ncf;
128 	int size;
129 	void *bitmap;
130 	unsigned long flags;
131 
132 	size = ncsi_filter_size(table);
133 	if (size < 0)
134 		return size;
135 
136 	ncf = nc->filters[table];
137 	if (!ncf || index >= ncf->total)
138 		return -ENODEV;
139 
140 	spin_lock_irqsave(&nc->lock, flags);
141 	bitmap = (void *)&ncf->bitmap;
142 	if (test_and_clear_bit(index, bitmap))
143 		memset(ncf->data + size * index, 0, size);
144 	spin_unlock_irqrestore(&nc->lock, flags);
145 
146 	return 0;
147 }
148 
149 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
150 {
151 	struct ncsi_dev *nd = &ndp->ndev;
152 	struct ncsi_package *np;
153 	struct ncsi_channel *nc;
154 	unsigned long flags;
155 
156 	nd->state = ncsi_dev_state_functional;
157 	if (force_down) {
158 		nd->link_up = 0;
159 		goto report;
160 	}
161 
162 	nd->link_up = 0;
163 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
164 		NCSI_FOR_EACH_CHANNEL(np, nc) {
165 			spin_lock_irqsave(&nc->lock, flags);
166 
167 			if (!list_empty(&nc->link) ||
168 			    nc->state != NCSI_CHANNEL_ACTIVE) {
169 				spin_unlock_irqrestore(&nc->lock, flags);
170 				continue;
171 			}
172 
173 			if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
174 				spin_unlock_irqrestore(&nc->lock, flags);
175 				nd->link_up = 1;
176 				goto report;
177 			}
178 
179 			spin_unlock_irqrestore(&nc->lock, flags);
180 		}
181 	}
182 
183 report:
184 	nd->handler(nd);
185 }
186 
187 static void ncsi_channel_monitor(struct timer_list *t)
188 {
189 	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
190 	struct ncsi_package *np = nc->package;
191 	struct ncsi_dev_priv *ndp = np->ndp;
192 	struct ncsi_channel_mode *ncm;
193 	struct ncsi_cmd_arg nca;
194 	bool enabled, chained;
195 	unsigned int monitor_state;
196 	unsigned long flags;
197 	int state, ret;
198 
199 	spin_lock_irqsave(&nc->lock, flags);
200 	state = nc->state;
201 	chained = !list_empty(&nc->link);
202 	enabled = nc->monitor.enabled;
203 	monitor_state = nc->monitor.state;
204 	spin_unlock_irqrestore(&nc->lock, flags);
205 
206 	if (!enabled || chained) {
207 		ncsi_stop_channel_monitor(nc);
208 		return;
209 	}
210 	if (state != NCSI_CHANNEL_INACTIVE &&
211 	    state != NCSI_CHANNEL_ACTIVE) {
212 		ncsi_stop_channel_monitor(nc);
213 		return;
214 	}
215 
216 	switch (monitor_state) {
217 	case NCSI_CHANNEL_MONITOR_START:
218 	case NCSI_CHANNEL_MONITOR_RETRY:
219 		nca.ndp = ndp;
220 		nca.package = np->id;
221 		nca.channel = nc->id;
222 		nca.type = NCSI_PKT_CMD_GLS;
223 		nca.req_flags = 0;
224 		ret = ncsi_xmit_cmd(&nca);
225 		if (ret)
226 			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
227 				   ret);
228 		break;
229 	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
230 		break;
231 	default:
232 		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
233 			   nc->id);
234 		if (!(ndp->flags & NCSI_DEV_HWA)) {
235 			ncsi_report_link(ndp, true);
236 			ndp->flags |= NCSI_DEV_RESHUFFLE;
237 		}
238 
239 		ncsi_stop_channel_monitor(nc);
240 
241 		ncm = &nc->modes[NCSI_MODE_LINK];
242 		spin_lock_irqsave(&nc->lock, flags);
243 		nc->state = NCSI_CHANNEL_INVISIBLE;
244 		ncm->data[2] &= ~0x1;
245 		spin_unlock_irqrestore(&nc->lock, flags);
246 
247 		spin_lock_irqsave(&ndp->lock, flags);
248 		nc->state = NCSI_CHANNEL_ACTIVE;
249 		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
250 		spin_unlock_irqrestore(&ndp->lock, flags);
251 		ncsi_process_next_channel(ndp);
252 		return;
253 	}
254 
255 	spin_lock_irqsave(&nc->lock, flags);
256 	nc->monitor.state++;
257 	spin_unlock_irqrestore(&nc->lock, flags);
258 	mod_timer(&nc->monitor.timer, jiffies + HZ);
259 }
260 
261 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
262 {
263 	unsigned long flags;
264 
265 	spin_lock_irqsave(&nc->lock, flags);
266 	WARN_ON_ONCE(nc->monitor.enabled);
267 	nc->monitor.enabled = true;
268 	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
269 	spin_unlock_irqrestore(&nc->lock, flags);
270 
271 	mod_timer(&nc->monitor.timer, jiffies + HZ);
272 }
273 
274 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
275 {
276 	unsigned long flags;
277 
278 	spin_lock_irqsave(&nc->lock, flags);
279 	if (!nc->monitor.enabled) {
280 		spin_unlock_irqrestore(&nc->lock, flags);
281 		return;
282 	}
283 	nc->monitor.enabled = false;
284 	spin_unlock_irqrestore(&nc->lock, flags);
285 
286 	del_timer_sync(&nc->monitor.timer);
287 }
288 
289 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
290 				       unsigned char id)
291 {
292 	struct ncsi_channel *nc;
293 
294 	NCSI_FOR_EACH_CHANNEL(np, nc) {
295 		if (nc->id == id)
296 			return nc;
297 	}
298 
299 	return NULL;
300 }
301 
302 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
303 {
304 	struct ncsi_channel *nc, *tmp;
305 	int index;
306 	unsigned long flags;
307 
308 	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
309 	if (!nc)
310 		return NULL;
311 
312 	nc->id = id;
313 	nc->package = np;
314 	nc->state = NCSI_CHANNEL_INACTIVE;
315 	nc->monitor.enabled = false;
316 	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
317 	spin_lock_init(&nc->lock);
318 	INIT_LIST_HEAD(&nc->link);
319 	for (index = 0; index < NCSI_CAP_MAX; index++)
320 		nc->caps[index].index = index;
321 	for (index = 0; index < NCSI_MODE_MAX; index++)
322 		nc->modes[index].index = index;
323 
324 	spin_lock_irqsave(&np->lock, flags);
325 	tmp = ncsi_find_channel(np, id);
326 	if (tmp) {
327 		spin_unlock_irqrestore(&np->lock, flags);
328 		kfree(nc);
329 		return tmp;
330 	}
331 
332 	list_add_tail_rcu(&nc->node, &np->channels);
333 	np->channel_num++;
334 	spin_unlock_irqrestore(&np->lock, flags);
335 
336 	return nc;
337 }
338 
339 static void ncsi_remove_channel(struct ncsi_channel *nc)
340 {
341 	struct ncsi_package *np = nc->package;
342 	struct ncsi_channel_filter *ncf;
343 	unsigned long flags;
344 	int i;
345 
346 	/* Release filters */
347 	spin_lock_irqsave(&nc->lock, flags);
348 	for (i = 0; i < NCSI_FILTER_MAX; i++) {
349 		ncf = nc->filters[i];
350 		if (!ncf)
351 			continue;
352 
353 		nc->filters[i] = NULL;
354 		kfree(ncf);
355 	}
356 
357 	nc->state = NCSI_CHANNEL_INACTIVE;
358 	spin_unlock_irqrestore(&nc->lock, flags);
359 	ncsi_stop_channel_monitor(nc);
360 
361 	/* Remove and free channel */
362 	spin_lock_irqsave(&np->lock, flags);
363 	list_del_rcu(&nc->node);
364 	np->channel_num--;
365 	spin_unlock_irqrestore(&np->lock, flags);
366 
367 	kfree(nc);
368 }
369 
370 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
371 				       unsigned char id)
372 {
373 	struct ncsi_package *np;
374 
375 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
376 		if (np->id == id)
377 			return np;
378 	}
379 
380 	return NULL;
381 }
382 
383 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
384 				      unsigned char id)
385 {
386 	struct ncsi_package *np, *tmp;
387 	unsigned long flags;
388 
389 	np = kzalloc(sizeof(*np), GFP_ATOMIC);
390 	if (!np)
391 		return NULL;
392 
393 	np->id = id;
394 	np->ndp = ndp;
395 	spin_lock_init(&np->lock);
396 	INIT_LIST_HEAD(&np->channels);
397 
398 	spin_lock_irqsave(&ndp->lock, flags);
399 	tmp = ncsi_find_package(ndp, id);
400 	if (tmp) {
401 		spin_unlock_irqrestore(&ndp->lock, flags);
402 		kfree(np);
403 		return tmp;
404 	}
405 
406 	list_add_tail_rcu(&np->node, &ndp->packages);
407 	ndp->package_num++;
408 	spin_unlock_irqrestore(&ndp->lock, flags);
409 
410 	return np;
411 }
412 
413 void ncsi_remove_package(struct ncsi_package *np)
414 {
415 	struct ncsi_dev_priv *ndp = np->ndp;
416 	struct ncsi_channel *nc, *tmp;
417 	unsigned long flags;
418 
419 	/* Release all child channels */
420 	list_for_each_entry_safe(nc, tmp, &np->channels, node)
421 		ncsi_remove_channel(nc);
422 
423 	/* Remove and free package */
424 	spin_lock_irqsave(&ndp->lock, flags);
425 	list_del_rcu(&np->node);
426 	ndp->package_num--;
427 	spin_unlock_irqrestore(&ndp->lock, flags);
428 
429 	kfree(np);
430 }
431 
432 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
433 				   unsigned char id,
434 				   struct ncsi_package **np,
435 				   struct ncsi_channel **nc)
436 {
437 	struct ncsi_package *p;
438 	struct ncsi_channel *c;
439 
440 	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
441 	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
442 
443 	if (np)
444 		*np = p;
445 	if (nc)
446 		*nc = c;
447 }
448 
449 /* For two consecutive NCSI commands, the packet IDs shouldn't
450  * be same. Otherwise, the bogus response might be replied. So
451  * the available IDs are allocated in round-robin fashion.
452  */
453 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
454 					unsigned int req_flags)
455 {
456 	struct ncsi_request *nr = NULL;
457 	int i, limit = ARRAY_SIZE(ndp->requests);
458 	unsigned long flags;
459 
460 	/* Check if there is one available request until the ceiling */
461 	spin_lock_irqsave(&ndp->lock, flags);
462 	for (i = ndp->request_id; i < limit; i++) {
463 		if (ndp->requests[i].used)
464 			continue;
465 
466 		nr = &ndp->requests[i];
467 		nr->used = true;
468 		nr->flags = req_flags;
469 		ndp->request_id = i + 1;
470 		goto found;
471 	}
472 
473 	/* Fail back to check from the starting cursor */
474 	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
475 		if (ndp->requests[i].used)
476 			continue;
477 
478 		nr = &ndp->requests[i];
479 		nr->used = true;
480 		nr->flags = req_flags;
481 		ndp->request_id = i + 1;
482 		goto found;
483 	}
484 
485 found:
486 	spin_unlock_irqrestore(&ndp->lock, flags);
487 	return nr;
488 }
489 
490 void ncsi_free_request(struct ncsi_request *nr)
491 {
492 	struct ncsi_dev_priv *ndp = nr->ndp;
493 	struct sk_buff *cmd, *rsp;
494 	unsigned long flags;
495 	bool driven;
496 
497 	if (nr->enabled) {
498 		nr->enabled = false;
499 		del_timer_sync(&nr->timer);
500 	}
501 
502 	spin_lock_irqsave(&ndp->lock, flags);
503 	cmd = nr->cmd;
504 	rsp = nr->rsp;
505 	nr->cmd = NULL;
506 	nr->rsp = NULL;
507 	nr->used = false;
508 	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
509 	spin_unlock_irqrestore(&ndp->lock, flags);
510 
511 	if (driven && cmd && --ndp->pending_req_num == 0)
512 		schedule_work(&ndp->work);
513 
514 	/* Release command and response */
515 	consume_skb(cmd);
516 	consume_skb(rsp);
517 }
518 
519 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
520 {
521 	struct ncsi_dev_priv *ndp;
522 
523 	NCSI_FOR_EACH_DEV(ndp) {
524 		if (ndp->ndev.dev == dev)
525 			return &ndp->ndev;
526 	}
527 
528 	return NULL;
529 }
530 
531 static void ncsi_request_timeout(struct timer_list *t)
532 {
533 	struct ncsi_request *nr = from_timer(nr, t, timer);
534 	struct ncsi_dev_priv *ndp = nr->ndp;
535 	unsigned long flags;
536 
537 	/* If the request already had associated response,
538 	 * let the response handler to release it.
539 	 */
540 	spin_lock_irqsave(&ndp->lock, flags);
541 	nr->enabled = false;
542 	if (nr->rsp || !nr->cmd) {
543 		spin_unlock_irqrestore(&ndp->lock, flags);
544 		return;
545 	}
546 	spin_unlock_irqrestore(&ndp->lock, flags);
547 
548 	/* Release the request */
549 	ncsi_free_request(nr);
550 }
551 
552 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
553 {
554 	struct ncsi_dev *nd = &ndp->ndev;
555 	struct ncsi_package *np = ndp->active_package;
556 	struct ncsi_channel *nc = ndp->active_channel;
557 	struct ncsi_cmd_arg nca;
558 	unsigned long flags;
559 	int ret;
560 
561 	nca.ndp = ndp;
562 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
563 	switch (nd->state) {
564 	case ncsi_dev_state_suspend:
565 		nd->state = ncsi_dev_state_suspend_select;
566 		/* Fall through */
567 	case ncsi_dev_state_suspend_select:
568 		ndp->pending_req_num = 1;
569 
570 		nca.type = NCSI_PKT_CMD_SP;
571 		nca.package = np->id;
572 		nca.channel = NCSI_RESERVED_CHANNEL;
573 		if (ndp->flags & NCSI_DEV_HWA)
574 			nca.bytes[0] = 0;
575 		else
576 			nca.bytes[0] = 1;
577 
578 		/* To retrieve the last link states of channels in current
579 		 * package when current active channel needs fail over to
580 		 * another one. It means we will possibly select another
581 		 * channel as next active one. The link states of channels
582 		 * are most important factor of the selection. So we need
583 		 * accurate link states. Unfortunately, the link states on
584 		 * inactive channels can't be updated with LSC AEN in time.
585 		 */
586 		if (ndp->flags & NCSI_DEV_RESHUFFLE)
587 			nd->state = ncsi_dev_state_suspend_gls;
588 		else
589 			nd->state = ncsi_dev_state_suspend_dcnt;
590 		ret = ncsi_xmit_cmd(&nca);
591 		if (ret)
592 			goto error;
593 
594 		break;
595 	case ncsi_dev_state_suspend_gls:
596 		ndp->pending_req_num = np->channel_num;
597 
598 		nca.type = NCSI_PKT_CMD_GLS;
599 		nca.package = np->id;
600 
601 		nd->state = ncsi_dev_state_suspend_dcnt;
602 		NCSI_FOR_EACH_CHANNEL(np, nc) {
603 			nca.channel = nc->id;
604 			ret = ncsi_xmit_cmd(&nca);
605 			if (ret)
606 				goto error;
607 		}
608 
609 		break;
610 	case ncsi_dev_state_suspend_dcnt:
611 		ndp->pending_req_num = 1;
612 
613 		nca.type = NCSI_PKT_CMD_DCNT;
614 		nca.package = np->id;
615 		nca.channel = nc->id;
616 
617 		nd->state = ncsi_dev_state_suspend_dc;
618 		ret = ncsi_xmit_cmd(&nca);
619 		if (ret)
620 			goto error;
621 
622 		break;
623 	case ncsi_dev_state_suspend_dc:
624 		ndp->pending_req_num = 1;
625 
626 		nca.type = NCSI_PKT_CMD_DC;
627 		nca.package = np->id;
628 		nca.channel = nc->id;
629 		nca.bytes[0] = 1;
630 
631 		nd->state = ncsi_dev_state_suspend_deselect;
632 		ret = ncsi_xmit_cmd(&nca);
633 		if (ret)
634 			goto error;
635 
636 		break;
637 	case ncsi_dev_state_suspend_deselect:
638 		ndp->pending_req_num = 1;
639 
640 		nca.type = NCSI_PKT_CMD_DP;
641 		nca.package = np->id;
642 		nca.channel = NCSI_RESERVED_CHANNEL;
643 
644 		nd->state = ncsi_dev_state_suspend_done;
645 		ret = ncsi_xmit_cmd(&nca);
646 		if (ret)
647 			goto error;
648 
649 		break;
650 	case ncsi_dev_state_suspend_done:
651 		spin_lock_irqsave(&nc->lock, flags);
652 		nc->state = NCSI_CHANNEL_INACTIVE;
653 		spin_unlock_irqrestore(&nc->lock, flags);
654 		ncsi_process_next_channel(ndp);
655 
656 		break;
657 	default:
658 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
659 			    nd->state);
660 	}
661 
662 	return;
663 error:
664 	nd->state = ncsi_dev_state_functional;
665 }
666 
667 /* Check the VLAN filter bitmap for a set filter, and construct a
668  * "Set VLAN Filter - Disable" packet if found.
669  */
670 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
671 			 struct ncsi_cmd_arg *nca)
672 {
673 	int index;
674 	u32 *data;
675 	u16 vid;
676 
677 	index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, NULL);
678 	if (index < 0) {
679 		/* Filter table empty */
680 		return -1;
681 	}
682 
683 	data = ncsi_get_filter(nc, NCSI_FILTER_VLAN, index);
684 	if (!data) {
685 		netdev_err(ndp->ndev.dev,
686 			   "NCSI: failed to retrieve filter %d\n", index);
687 		/* Set the VLAN id to 0 - this will still disable the entry in
688 		 * the filter table, but we won't know what it was.
689 		 */
690 		vid = 0;
691 	} else {
692 		vid = *(u16 *)data;
693 	}
694 
695 	netdev_printk(KERN_DEBUG, ndp->ndev.dev,
696 		      "NCSI: removed vlan tag %u at index %d\n",
697 		      vid, index + 1);
698 	ncsi_remove_filter(nc, NCSI_FILTER_VLAN, index);
699 
700 	nca->type = NCSI_PKT_CMD_SVF;
701 	nca->words[1] = vid;
702 	/* HW filter index starts at 1 */
703 	nca->bytes[6] = index + 1;
704 	nca->bytes[7] = 0x00;
705 	return 0;
706 }
707 
708 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
709  * packet.
710  */
711 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
712 		       struct ncsi_cmd_arg *nca)
713 {
714 	struct vlan_vid *vlan = NULL;
715 	int index = 0;
716 
717 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
718 		index = ncsi_find_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
719 		if (index < 0) {
720 			/* New tag to add */
721 			netdev_printk(KERN_DEBUG, ndp->ndev.dev,
722 				      "NCSI: new vlan id to set: %u\n",
723 				      vlan->vid);
724 			break;
725 		}
726 		netdev_printk(KERN_DEBUG, ndp->ndev.dev,
727 			      "vid %u already at filter pos %d\n",
728 			      vlan->vid, index);
729 	}
730 
731 	if (!vlan || index >= 0) {
732 		netdev_printk(KERN_DEBUG, ndp->ndev.dev,
733 			      "no vlan ids left to set\n");
734 		return -1;
735 	}
736 
737 	index = ncsi_add_filter(nc, NCSI_FILTER_VLAN, &vlan->vid);
738 	if (index < 0) {
739 		netdev_err(ndp->ndev.dev,
740 			   "Failed to add new VLAN tag, error %d\n", index);
741 		if (index == -ENOSPC)
742 			netdev_err(ndp->ndev.dev,
743 				   "Channel %u already has all VLAN filters set\n",
744 				   nc->id);
745 		return -1;
746 	}
747 
748 	netdev_printk(KERN_DEBUG, ndp->ndev.dev,
749 		      "NCSI: set vid %u in packet, index %u\n",
750 		      vlan->vid, index + 1);
751 	nca->type = NCSI_PKT_CMD_SVF;
752 	nca->words[1] = vlan->vid;
753 	/* HW filter index starts at 1 */
754 	nca->bytes[6] = index + 1;
755 	nca->bytes[7] = 0x01;
756 
757 	return 0;
758 }
759 
760 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
761 {
762 	struct ncsi_dev *nd = &ndp->ndev;
763 	struct net_device *dev = nd->dev;
764 	struct ncsi_package *np = ndp->active_package;
765 	struct ncsi_channel *nc = ndp->active_channel;
766 	struct ncsi_channel *hot_nc = NULL;
767 	struct ncsi_cmd_arg nca;
768 	unsigned char index;
769 	unsigned long flags;
770 	int ret;
771 
772 	nca.ndp = ndp;
773 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
774 	switch (nd->state) {
775 	case ncsi_dev_state_config:
776 	case ncsi_dev_state_config_sp:
777 		ndp->pending_req_num = 1;
778 
779 		/* Select the specific package */
780 		nca.type = NCSI_PKT_CMD_SP;
781 		if (ndp->flags & NCSI_DEV_HWA)
782 			nca.bytes[0] = 0;
783 		else
784 			nca.bytes[0] = 1;
785 		nca.package = np->id;
786 		nca.channel = NCSI_RESERVED_CHANNEL;
787 		ret = ncsi_xmit_cmd(&nca);
788 		if (ret) {
789 			netdev_err(ndp->ndev.dev,
790 				   "NCSI: Failed to transmit CMD_SP\n");
791 			goto error;
792 		}
793 
794 		nd->state = ncsi_dev_state_config_cis;
795 		break;
796 	case ncsi_dev_state_config_cis:
797 		ndp->pending_req_num = 1;
798 
799 		/* Clear initial state */
800 		nca.type = NCSI_PKT_CMD_CIS;
801 		nca.package = np->id;
802 		nca.channel = nc->id;
803 		ret = ncsi_xmit_cmd(&nca);
804 		if (ret) {
805 			netdev_err(ndp->ndev.dev,
806 				   "NCSI: Failed to transmit CMD_CIS\n");
807 			goto error;
808 		}
809 
810 		nd->state = ncsi_dev_state_config_clear_vids;
811 		break;
812 	case ncsi_dev_state_config_clear_vids:
813 	case ncsi_dev_state_config_svf:
814 	case ncsi_dev_state_config_ev:
815 	case ncsi_dev_state_config_sma:
816 	case ncsi_dev_state_config_ebf:
817 #if IS_ENABLED(CONFIG_IPV6)
818 	case ncsi_dev_state_config_egmf:
819 #endif
820 	case ncsi_dev_state_config_ecnt:
821 	case ncsi_dev_state_config_ec:
822 	case ncsi_dev_state_config_ae:
823 	case ncsi_dev_state_config_gls:
824 		ndp->pending_req_num = 1;
825 
826 		nca.package = np->id;
827 		nca.channel = nc->id;
828 
829 		/* Clear any active filters on the channel before setting */
830 		if (nd->state == ncsi_dev_state_config_clear_vids) {
831 			ret = clear_one_vid(ndp, nc, &nca);
832 			if (ret) {
833 				nd->state = ncsi_dev_state_config_svf;
834 				schedule_work(&ndp->work);
835 				break;
836 			}
837 			/* Repeat */
838 			nd->state = ncsi_dev_state_config_clear_vids;
839 		/* Add known VLAN tags to the filter */
840 		} else if (nd->state == ncsi_dev_state_config_svf) {
841 			ret = set_one_vid(ndp, nc, &nca);
842 			if (ret) {
843 				nd->state = ncsi_dev_state_config_ev;
844 				schedule_work(&ndp->work);
845 				break;
846 			}
847 			/* Repeat */
848 			nd->state = ncsi_dev_state_config_svf;
849 		/* Enable/Disable the VLAN filter */
850 		} else if (nd->state == ncsi_dev_state_config_ev) {
851 			if (list_empty(&ndp->vlan_vids)) {
852 				nca.type = NCSI_PKT_CMD_DV;
853 			} else {
854 				nca.type = NCSI_PKT_CMD_EV;
855 				nca.bytes[3] = NCSI_CAP_VLAN_NO;
856 			}
857 			nd->state = ncsi_dev_state_config_sma;
858 		} else if (nd->state == ncsi_dev_state_config_sma) {
859 		/* Use first entry in unicast filter table. Note that
860 		 * the MAC filter table starts from entry 1 instead of
861 		 * 0.
862 		 */
863 			nca.type = NCSI_PKT_CMD_SMA;
864 			for (index = 0; index < 6; index++)
865 				nca.bytes[index] = dev->dev_addr[index];
866 			nca.bytes[6] = 0x1;
867 			nca.bytes[7] = 0x1;
868 			nd->state = ncsi_dev_state_config_ebf;
869 		} else if (nd->state == ncsi_dev_state_config_ebf) {
870 			nca.type = NCSI_PKT_CMD_EBF;
871 			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
872 			nd->state = ncsi_dev_state_config_ecnt;
873 #if IS_ENABLED(CONFIG_IPV6)
874 			if (ndp->inet6_addr_num > 0 &&
875 			    (nc->caps[NCSI_CAP_GENERIC].cap &
876 			     NCSI_CAP_GENERIC_MC))
877 				nd->state = ncsi_dev_state_config_egmf;
878 			else
879 				nd->state = ncsi_dev_state_config_ecnt;
880 		} else if (nd->state == ncsi_dev_state_config_egmf) {
881 			nca.type = NCSI_PKT_CMD_EGMF;
882 			nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
883 			nd->state = ncsi_dev_state_config_ecnt;
884 #endif /* CONFIG_IPV6 */
885 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
886 			nca.type = NCSI_PKT_CMD_ECNT;
887 			nd->state = ncsi_dev_state_config_ec;
888 		} else if (nd->state == ncsi_dev_state_config_ec) {
889 			/* Enable AEN if it's supported */
890 			nca.type = NCSI_PKT_CMD_EC;
891 			nd->state = ncsi_dev_state_config_ae;
892 			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
893 				nd->state = ncsi_dev_state_config_gls;
894 		} else if (nd->state == ncsi_dev_state_config_ae) {
895 			nca.type = NCSI_PKT_CMD_AE;
896 			nca.bytes[0] = 0;
897 			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
898 			nd->state = ncsi_dev_state_config_gls;
899 		} else if (nd->state == ncsi_dev_state_config_gls) {
900 			nca.type = NCSI_PKT_CMD_GLS;
901 			nd->state = ncsi_dev_state_config_done;
902 		}
903 
904 		ret = ncsi_xmit_cmd(&nca);
905 		if (ret) {
906 			netdev_err(ndp->ndev.dev,
907 				   "NCSI: Failed to transmit CMD %x\n",
908 				   nca.type);
909 			goto error;
910 		}
911 		break;
912 	case ncsi_dev_state_config_done:
913 		netdev_printk(KERN_DEBUG, ndp->ndev.dev,
914 			      "NCSI: channel %u config done\n", nc->id);
915 		spin_lock_irqsave(&nc->lock, flags);
916 		if (nc->reconfigure_needed) {
917 			/* This channel's configuration has been updated
918 			 * part-way during the config state - start the
919 			 * channel configuration over
920 			 */
921 			nc->reconfigure_needed = false;
922 			nc->state = NCSI_CHANNEL_INACTIVE;
923 			spin_unlock_irqrestore(&nc->lock, flags);
924 
925 			spin_lock_irqsave(&ndp->lock, flags);
926 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
927 			spin_unlock_irqrestore(&ndp->lock, flags);
928 
929 			netdev_printk(KERN_DEBUG, dev,
930 				      "Dirty NCSI channel state reset\n");
931 			ncsi_process_next_channel(ndp);
932 			break;
933 		}
934 
935 		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
936 			hot_nc = nc;
937 			nc->state = NCSI_CHANNEL_ACTIVE;
938 		} else {
939 			hot_nc = NULL;
940 			nc->state = NCSI_CHANNEL_INACTIVE;
941 			netdev_warn(ndp->ndev.dev,
942 				    "NCSI: channel %u link down after config\n",
943 				    nc->id);
944 		}
945 		spin_unlock_irqrestore(&nc->lock, flags);
946 
947 		/* Update the hot channel */
948 		spin_lock_irqsave(&ndp->lock, flags);
949 		ndp->hot_channel = hot_nc;
950 		spin_unlock_irqrestore(&ndp->lock, flags);
951 
952 		ncsi_start_channel_monitor(nc);
953 		ncsi_process_next_channel(ndp);
954 		break;
955 	default:
956 		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
957 			     nd->state);
958 	}
959 
960 	return;
961 
962 error:
963 	ncsi_report_link(ndp, true);
964 }
965 
966 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
967 {
968 	struct ncsi_package *np, *force_package;
969 	struct ncsi_channel *nc, *found, *hot_nc, *force_channel;
970 	struct ncsi_channel_mode *ncm;
971 	unsigned long flags;
972 
973 	spin_lock_irqsave(&ndp->lock, flags);
974 	hot_nc = ndp->hot_channel;
975 	force_channel = ndp->force_channel;
976 	force_package = ndp->force_package;
977 	spin_unlock_irqrestore(&ndp->lock, flags);
978 
979 	/* Force a specific channel whether or not it has link if we have been
980 	 * configured to do so
981 	 */
982 	if (force_package && force_channel) {
983 		found = force_channel;
984 		ncm = &found->modes[NCSI_MODE_LINK];
985 		if (!(ncm->data[2] & 0x1))
986 			netdev_info(ndp->ndev.dev,
987 				    "NCSI: Channel %u forced, but it is link down\n",
988 				    found->id);
989 		goto out;
990 	}
991 
992 	/* The search is done once an inactive channel with up
993 	 * link is found.
994 	 */
995 	found = NULL;
996 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
997 		if (ndp->force_package && np != ndp->force_package)
998 			continue;
999 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1000 			spin_lock_irqsave(&nc->lock, flags);
1001 
1002 			if (!list_empty(&nc->link) ||
1003 			    nc->state != NCSI_CHANNEL_INACTIVE) {
1004 				spin_unlock_irqrestore(&nc->lock, flags);
1005 				continue;
1006 			}
1007 
1008 			if (!found)
1009 				found = nc;
1010 
1011 			if (nc == hot_nc)
1012 				found = nc;
1013 
1014 			ncm = &nc->modes[NCSI_MODE_LINK];
1015 			if (ncm->data[2] & 0x1) {
1016 				spin_unlock_irqrestore(&nc->lock, flags);
1017 				found = nc;
1018 				goto out;
1019 			}
1020 
1021 			spin_unlock_irqrestore(&nc->lock, flags);
1022 		}
1023 	}
1024 
1025 	if (!found) {
1026 		netdev_warn(ndp->ndev.dev,
1027 			    "NCSI: No channel found with link\n");
1028 		ncsi_report_link(ndp, true);
1029 		return -ENODEV;
1030 	}
1031 
1032 	ncm = &found->modes[NCSI_MODE_LINK];
1033 	netdev_printk(KERN_DEBUG, ndp->ndev.dev,
1034 		      "NCSI: Channel %u added to queue (link %s)\n",
1035 		      found->id, ncm->data[2] & 0x1 ? "up" : "down");
1036 
1037 out:
1038 	spin_lock_irqsave(&ndp->lock, flags);
1039 	list_add_tail_rcu(&found->link, &ndp->channel_queue);
1040 	spin_unlock_irqrestore(&ndp->lock, flags);
1041 
1042 	return ncsi_process_next_channel(ndp);
1043 }
1044 
1045 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1046 {
1047 	struct ncsi_package *np;
1048 	struct ncsi_channel *nc;
1049 	unsigned int cap;
1050 	bool has_channel = false;
1051 
1052 	/* The hardware arbitration is disabled if any one channel
1053 	 * doesn't support explicitly.
1054 	 */
1055 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1056 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1057 			has_channel = true;
1058 
1059 			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1060 			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1061 			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1062 			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1063 				ndp->flags &= ~NCSI_DEV_HWA;
1064 				return false;
1065 			}
1066 		}
1067 	}
1068 
1069 	if (has_channel) {
1070 		ndp->flags |= NCSI_DEV_HWA;
1071 		return true;
1072 	}
1073 
1074 	ndp->flags &= ~NCSI_DEV_HWA;
1075 	return false;
1076 }
1077 
1078 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
1079 {
1080 	struct ncsi_package *np;
1081 	struct ncsi_channel *nc;
1082 	unsigned long flags;
1083 
1084 	/* Move all available channels to processing queue */
1085 	spin_lock_irqsave(&ndp->lock, flags);
1086 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1087 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1088 			WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
1089 				     !list_empty(&nc->link));
1090 			ncsi_stop_channel_monitor(nc);
1091 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1092 		}
1093 	}
1094 	spin_unlock_irqrestore(&ndp->lock, flags);
1095 
1096 	/* We can have no channels in extremely case */
1097 	if (list_empty(&ndp->channel_queue)) {
1098 		netdev_err(ndp->ndev.dev,
1099 			   "NCSI: No available channels for HWA\n");
1100 		ncsi_report_link(ndp, false);
1101 		return -ENOENT;
1102 	}
1103 
1104 	return ncsi_process_next_channel(ndp);
1105 }
1106 
1107 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1108 {
1109 	struct ncsi_dev *nd = &ndp->ndev;
1110 	struct ncsi_package *np;
1111 	struct ncsi_channel *nc;
1112 	struct ncsi_cmd_arg nca;
1113 	unsigned char index;
1114 	int ret;
1115 
1116 	nca.ndp = ndp;
1117 	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1118 	switch (nd->state) {
1119 	case ncsi_dev_state_probe:
1120 		nd->state = ncsi_dev_state_probe_deselect;
1121 		/* Fall through */
1122 	case ncsi_dev_state_probe_deselect:
1123 		ndp->pending_req_num = 8;
1124 
1125 		/* Deselect all possible packages */
1126 		nca.type = NCSI_PKT_CMD_DP;
1127 		nca.channel = NCSI_RESERVED_CHANNEL;
1128 		for (index = 0; index < 8; index++) {
1129 			nca.package = index;
1130 			ret = ncsi_xmit_cmd(&nca);
1131 			if (ret)
1132 				goto error;
1133 		}
1134 
1135 		nd->state = ncsi_dev_state_probe_package;
1136 		break;
1137 	case ncsi_dev_state_probe_package:
1138 		ndp->pending_req_num = 16;
1139 
1140 		/* Select all possible packages */
1141 		nca.type = NCSI_PKT_CMD_SP;
1142 		nca.bytes[0] = 1;
1143 		nca.channel = NCSI_RESERVED_CHANNEL;
1144 		for (index = 0; index < 8; index++) {
1145 			nca.package = index;
1146 			ret = ncsi_xmit_cmd(&nca);
1147 			if (ret)
1148 				goto error;
1149 		}
1150 
1151 		/* Disable all possible packages */
1152 		nca.type = NCSI_PKT_CMD_DP;
1153 		for (index = 0; index < 8; index++) {
1154 			nca.package = index;
1155 			ret = ncsi_xmit_cmd(&nca);
1156 			if (ret)
1157 				goto error;
1158 		}
1159 
1160 		nd->state = ncsi_dev_state_probe_channel;
1161 		break;
1162 	case ncsi_dev_state_probe_channel:
1163 		if (!ndp->active_package)
1164 			ndp->active_package = list_first_or_null_rcu(
1165 				&ndp->packages, struct ncsi_package, node);
1166 		else if (list_is_last(&ndp->active_package->node,
1167 				      &ndp->packages))
1168 			ndp->active_package = NULL;
1169 		else
1170 			ndp->active_package = list_next_entry(
1171 				ndp->active_package, node);
1172 
1173 		/* All available packages and channels are enumerated. The
1174 		 * enumeration happens for once when the NCSI interface is
1175 		 * started. So we need continue to start the interface after
1176 		 * the enumeration.
1177 		 *
1178 		 * We have to choose an active channel before configuring it.
1179 		 * Note that we possibly don't have active channel in extreme
1180 		 * situation.
1181 		 */
1182 		if (!ndp->active_package) {
1183 			ndp->flags |= NCSI_DEV_PROBED;
1184 			if (ncsi_check_hwa(ndp))
1185 				ncsi_enable_hwa(ndp);
1186 			else
1187 				ncsi_choose_active_channel(ndp);
1188 			return;
1189 		}
1190 
1191 		/* Select the active package */
1192 		ndp->pending_req_num = 1;
1193 		nca.type = NCSI_PKT_CMD_SP;
1194 		nca.bytes[0] = 1;
1195 		nca.package = ndp->active_package->id;
1196 		nca.channel = NCSI_RESERVED_CHANNEL;
1197 		ret = ncsi_xmit_cmd(&nca);
1198 		if (ret)
1199 			goto error;
1200 
1201 		nd->state = ncsi_dev_state_probe_cis;
1202 		break;
1203 	case ncsi_dev_state_probe_cis:
1204 		ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1205 
1206 		/* Clear initial state */
1207 		nca.type = NCSI_PKT_CMD_CIS;
1208 		nca.package = ndp->active_package->id;
1209 		for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1210 			nca.channel = index;
1211 			ret = ncsi_xmit_cmd(&nca);
1212 			if (ret)
1213 				goto error;
1214 		}
1215 
1216 		nd->state = ncsi_dev_state_probe_gvi;
1217 		break;
1218 	case ncsi_dev_state_probe_gvi:
1219 	case ncsi_dev_state_probe_gc:
1220 	case ncsi_dev_state_probe_gls:
1221 		np = ndp->active_package;
1222 		ndp->pending_req_num = np->channel_num;
1223 
1224 		/* Retrieve version, capability or link status */
1225 		if (nd->state == ncsi_dev_state_probe_gvi)
1226 			nca.type = NCSI_PKT_CMD_GVI;
1227 		else if (nd->state == ncsi_dev_state_probe_gc)
1228 			nca.type = NCSI_PKT_CMD_GC;
1229 		else
1230 			nca.type = NCSI_PKT_CMD_GLS;
1231 
1232 		nca.package = np->id;
1233 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1234 			nca.channel = nc->id;
1235 			ret = ncsi_xmit_cmd(&nca);
1236 			if (ret)
1237 				goto error;
1238 		}
1239 
1240 		if (nd->state == ncsi_dev_state_probe_gvi)
1241 			nd->state = ncsi_dev_state_probe_gc;
1242 		else if (nd->state == ncsi_dev_state_probe_gc)
1243 			nd->state = ncsi_dev_state_probe_gls;
1244 		else
1245 			nd->state = ncsi_dev_state_probe_dp;
1246 		break;
1247 	case ncsi_dev_state_probe_dp:
1248 		ndp->pending_req_num = 1;
1249 
1250 		/* Deselect the active package */
1251 		nca.type = NCSI_PKT_CMD_DP;
1252 		nca.package = ndp->active_package->id;
1253 		nca.channel = NCSI_RESERVED_CHANNEL;
1254 		ret = ncsi_xmit_cmd(&nca);
1255 		if (ret)
1256 			goto error;
1257 
1258 		/* Scan channels in next package */
1259 		nd->state = ncsi_dev_state_probe_channel;
1260 		break;
1261 	default:
1262 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1263 			    nd->state);
1264 	}
1265 
1266 	return;
1267 error:
1268 	netdev_err(ndp->ndev.dev,
1269 		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1270 		   nca.type);
1271 	ncsi_report_link(ndp, true);
1272 }
1273 
1274 static void ncsi_dev_work(struct work_struct *work)
1275 {
1276 	struct ncsi_dev_priv *ndp = container_of(work,
1277 			struct ncsi_dev_priv, work);
1278 	struct ncsi_dev *nd = &ndp->ndev;
1279 
1280 	switch (nd->state & ncsi_dev_state_major) {
1281 	case ncsi_dev_state_probe:
1282 		ncsi_probe_channel(ndp);
1283 		break;
1284 	case ncsi_dev_state_suspend:
1285 		ncsi_suspend_channel(ndp);
1286 		break;
1287 	case ncsi_dev_state_config:
1288 		ncsi_configure_channel(ndp);
1289 		break;
1290 	default:
1291 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1292 			    nd->state);
1293 	}
1294 }
1295 
1296 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1297 {
1298 	struct ncsi_channel *nc;
1299 	int old_state;
1300 	unsigned long flags;
1301 
1302 	spin_lock_irqsave(&ndp->lock, flags);
1303 	nc = list_first_or_null_rcu(&ndp->channel_queue,
1304 				    struct ncsi_channel, link);
1305 	if (!nc) {
1306 		spin_unlock_irqrestore(&ndp->lock, flags);
1307 		goto out;
1308 	}
1309 
1310 	list_del_init(&nc->link);
1311 	spin_unlock_irqrestore(&ndp->lock, flags);
1312 
1313 	spin_lock_irqsave(&nc->lock, flags);
1314 	old_state = nc->state;
1315 	nc->state = NCSI_CHANNEL_INVISIBLE;
1316 	spin_unlock_irqrestore(&nc->lock, flags);
1317 
1318 	ndp->active_channel = nc;
1319 	ndp->active_package = nc->package;
1320 
1321 	switch (old_state) {
1322 	case NCSI_CHANNEL_INACTIVE:
1323 		ndp->ndev.state = ncsi_dev_state_config;
1324 		netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1325 			    nc->id);
1326 		ncsi_configure_channel(ndp);
1327 		break;
1328 	case NCSI_CHANNEL_ACTIVE:
1329 		ndp->ndev.state = ncsi_dev_state_suspend;
1330 		netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1331 			    nc->id);
1332 		ncsi_suspend_channel(ndp);
1333 		break;
1334 	default:
1335 		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1336 			   old_state, nc->package->id, nc->id);
1337 		ncsi_report_link(ndp, false);
1338 		return -EINVAL;
1339 	}
1340 
1341 	return 0;
1342 
1343 out:
1344 	ndp->active_channel = NULL;
1345 	ndp->active_package = NULL;
1346 	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1347 		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1348 		return ncsi_choose_active_channel(ndp);
1349 	}
1350 
1351 	netdev_printk(KERN_DEBUG, ndp->ndev.dev,
1352 		      "NCSI: No more channels to process\n");
1353 	ncsi_report_link(ndp, false);
1354 	return -ENODEV;
1355 }
1356 
1357 #if IS_ENABLED(CONFIG_IPV6)
1358 static int ncsi_inet6addr_event(struct notifier_block *this,
1359 				unsigned long event, void *data)
1360 {
1361 	struct inet6_ifaddr *ifa = data;
1362 	struct net_device *dev = ifa->idev->dev;
1363 	struct ncsi_dev *nd = ncsi_find_dev(dev);
1364 	struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1365 	struct ncsi_package *np;
1366 	struct ncsi_channel *nc;
1367 	struct ncsi_cmd_arg nca;
1368 	bool action;
1369 	int ret;
1370 
1371 	if (!ndp || (ipv6_addr_type(&ifa->addr) &
1372 	    (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1373 		return NOTIFY_OK;
1374 
1375 	switch (event) {
1376 	case NETDEV_UP:
1377 		action = (++ndp->inet6_addr_num) == 1;
1378 		nca.type = NCSI_PKT_CMD_EGMF;
1379 		break;
1380 	case NETDEV_DOWN:
1381 		action = (--ndp->inet6_addr_num == 0);
1382 		nca.type = NCSI_PKT_CMD_DGMF;
1383 		break;
1384 	default:
1385 		return NOTIFY_OK;
1386 	}
1387 
1388 	/* We might not have active channel or packages. The IPv6
1389 	 * required multicast will be enabled when active channel
1390 	 * or packages are chosen.
1391 	 */
1392 	np = ndp->active_package;
1393 	nc = ndp->active_channel;
1394 	if (!action || !np || !nc)
1395 		return NOTIFY_OK;
1396 
1397 	/* We needn't enable or disable it if the function isn't supported */
1398 	if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1399 		return NOTIFY_OK;
1400 
1401 	nca.ndp = ndp;
1402 	nca.req_flags = 0;
1403 	nca.package = np->id;
1404 	nca.channel = nc->id;
1405 	nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1406 	ret = ncsi_xmit_cmd(&nca);
1407 	if (ret) {
1408 		netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1409 			    (event == NETDEV_UP) ? "enable" : "disable", ret);
1410 		return NOTIFY_DONE;
1411 	}
1412 
1413 	return NOTIFY_OK;
1414 }
1415 
1416 static struct notifier_block ncsi_inet6addr_notifier = {
1417 	.notifier_call = ncsi_inet6addr_event,
1418 };
1419 #endif /* CONFIG_IPV6 */
1420 
1421 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1422 {
1423 	struct ncsi_dev *nd = &ndp->ndev;
1424 	struct ncsi_channel *nc;
1425 	struct ncsi_package *np;
1426 	unsigned long flags;
1427 	unsigned int n = 0;
1428 
1429 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1430 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1431 			spin_lock_irqsave(&nc->lock, flags);
1432 
1433 			/* Channels may be busy, mark dirty instead of
1434 			 * kicking if;
1435 			 * a) not ACTIVE (configured)
1436 			 * b) in the channel_queue (to be configured)
1437 			 * c) it's ndev is in the config state
1438 			 */
1439 			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1440 				if ((ndp->ndev.state & 0xff00) ==
1441 						ncsi_dev_state_config ||
1442 						!list_empty(&nc->link)) {
1443 					netdev_printk(KERN_DEBUG, nd->dev,
1444 						      "NCSI: channel %p marked dirty\n",
1445 						      nc);
1446 					nc->reconfigure_needed = true;
1447 				}
1448 				spin_unlock_irqrestore(&nc->lock, flags);
1449 				continue;
1450 			}
1451 
1452 			spin_unlock_irqrestore(&nc->lock, flags);
1453 
1454 			ncsi_stop_channel_monitor(nc);
1455 			spin_lock_irqsave(&nc->lock, flags);
1456 			nc->state = NCSI_CHANNEL_INACTIVE;
1457 			spin_unlock_irqrestore(&nc->lock, flags);
1458 
1459 			spin_lock_irqsave(&ndp->lock, flags);
1460 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1461 			spin_unlock_irqrestore(&ndp->lock, flags);
1462 
1463 			netdev_printk(KERN_DEBUG, nd->dev,
1464 				      "NCSI: kicked channel %p\n", nc);
1465 			n++;
1466 		}
1467 	}
1468 
1469 	return n;
1470 }
1471 
1472 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1473 {
1474 	struct ncsi_dev_priv *ndp;
1475 	unsigned int n_vids = 0;
1476 	struct vlan_vid *vlan;
1477 	struct ncsi_dev *nd;
1478 	bool found = false;
1479 
1480 	if (vid == 0)
1481 		return 0;
1482 
1483 	nd = ncsi_find_dev(dev);
1484 	if (!nd) {
1485 		netdev_warn(dev, "NCSI: No net_device?\n");
1486 		return 0;
1487 	}
1488 
1489 	ndp = TO_NCSI_DEV_PRIV(nd);
1490 
1491 	/* Add the VLAN id to our internal list */
1492 	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1493 		n_vids++;
1494 		if (vlan->vid == vid) {
1495 			netdev_printk(KERN_DEBUG, dev,
1496 				      "NCSI: vid %u already registered\n", vid);
1497 			return 0;
1498 		}
1499 	}
1500 	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1501 		netdev_warn(dev,
1502 			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1503 			    vid, NCSI_MAX_VLAN_VIDS);
1504 		return -ENOSPC;
1505 	}
1506 
1507 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1508 	if (!vlan)
1509 		return -ENOMEM;
1510 
1511 	vlan->proto = proto;
1512 	vlan->vid = vid;
1513 	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1514 
1515 	netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid);
1516 
1517 	found = ncsi_kick_channels(ndp) != 0;
1518 
1519 	return found ? ncsi_process_next_channel(ndp) : 0;
1520 }
1521 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1522 
1523 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1524 {
1525 	struct vlan_vid *vlan, *tmp;
1526 	struct ncsi_dev_priv *ndp;
1527 	struct ncsi_dev *nd;
1528 	bool found = false;
1529 
1530 	if (vid == 0)
1531 		return 0;
1532 
1533 	nd = ncsi_find_dev(dev);
1534 	if (!nd) {
1535 		netdev_warn(dev, "NCSI: no net_device?\n");
1536 		return 0;
1537 	}
1538 
1539 	ndp = TO_NCSI_DEV_PRIV(nd);
1540 
1541 	/* Remove the VLAN id from our internal list */
1542 	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1543 		if (vlan->vid == vid) {
1544 			netdev_printk(KERN_DEBUG, dev,
1545 				      "NCSI: vid %u found, removing\n", vid);
1546 			list_del_rcu(&vlan->list);
1547 			found = true;
1548 			kfree(vlan);
1549 		}
1550 
1551 	if (!found) {
1552 		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1553 		return -EINVAL;
1554 	}
1555 
1556 	found = ncsi_kick_channels(ndp) != 0;
1557 
1558 	return found ? ncsi_process_next_channel(ndp) : 0;
1559 }
1560 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1561 
1562 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1563 				   void (*handler)(struct ncsi_dev *ndev))
1564 {
1565 	struct ncsi_dev_priv *ndp;
1566 	struct ncsi_dev *nd;
1567 	unsigned long flags;
1568 	int i;
1569 
1570 	/* Check if the device has been registered or not */
1571 	nd = ncsi_find_dev(dev);
1572 	if (nd)
1573 		return nd;
1574 
1575 	/* Create NCSI device */
1576 	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1577 	if (!ndp)
1578 		return NULL;
1579 
1580 	nd = &ndp->ndev;
1581 	nd->state = ncsi_dev_state_registered;
1582 	nd->dev = dev;
1583 	nd->handler = handler;
1584 	ndp->pending_req_num = 0;
1585 	INIT_LIST_HEAD(&ndp->channel_queue);
1586 	INIT_LIST_HEAD(&ndp->vlan_vids);
1587 	INIT_WORK(&ndp->work, ncsi_dev_work);
1588 
1589 	/* Initialize private NCSI device */
1590 	spin_lock_init(&ndp->lock);
1591 	INIT_LIST_HEAD(&ndp->packages);
1592 	ndp->request_id = NCSI_REQ_START_IDX;
1593 	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1594 		ndp->requests[i].id = i;
1595 		ndp->requests[i].ndp = ndp;
1596 		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1597 	}
1598 
1599 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1600 #if IS_ENABLED(CONFIG_IPV6)
1601 	ndp->inet6_addr_num = 0;
1602 	if (list_empty(&ncsi_dev_list))
1603 		register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1604 #endif
1605 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1606 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1607 
1608 	/* Register NCSI packet Rx handler */
1609 	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1610 	ndp->ptype.func = ncsi_rcv_rsp;
1611 	ndp->ptype.dev = dev;
1612 	dev_add_pack(&ndp->ptype);
1613 
1614 	/* Set up generic netlink interface */
1615 	ncsi_init_netlink(dev);
1616 
1617 	return nd;
1618 }
1619 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1620 
1621 int ncsi_start_dev(struct ncsi_dev *nd)
1622 {
1623 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1624 	int ret;
1625 
1626 	if (nd->state != ncsi_dev_state_registered &&
1627 	    nd->state != ncsi_dev_state_functional)
1628 		return -ENOTTY;
1629 
1630 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1631 		nd->state = ncsi_dev_state_probe;
1632 		schedule_work(&ndp->work);
1633 		return 0;
1634 	}
1635 
1636 	if (ndp->flags & NCSI_DEV_HWA) {
1637 		netdev_info(ndp->ndev.dev, "NCSI: Enabling HWA mode\n");
1638 		ret = ncsi_enable_hwa(ndp);
1639 	} else {
1640 		ret = ncsi_choose_active_channel(ndp);
1641 	}
1642 
1643 	return ret;
1644 }
1645 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1646 
1647 void ncsi_stop_dev(struct ncsi_dev *nd)
1648 {
1649 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1650 	struct ncsi_package *np;
1651 	struct ncsi_channel *nc;
1652 	bool chained;
1653 	int old_state;
1654 	unsigned long flags;
1655 
1656 	/* Stop the channel monitor and reset channel's state */
1657 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1658 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1659 			ncsi_stop_channel_monitor(nc);
1660 
1661 			spin_lock_irqsave(&nc->lock, flags);
1662 			chained = !list_empty(&nc->link);
1663 			old_state = nc->state;
1664 			nc->state = NCSI_CHANNEL_INACTIVE;
1665 			spin_unlock_irqrestore(&nc->lock, flags);
1666 
1667 			WARN_ON_ONCE(chained ||
1668 				     old_state == NCSI_CHANNEL_INVISIBLE);
1669 		}
1670 	}
1671 
1672 	netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n");
1673 	ncsi_report_link(ndp, true);
1674 }
1675 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1676 
1677 void ncsi_unregister_dev(struct ncsi_dev *nd)
1678 {
1679 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1680 	struct ncsi_package *np, *tmp;
1681 	unsigned long flags;
1682 
1683 	dev_remove_pack(&ndp->ptype);
1684 
1685 	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1686 		ncsi_remove_package(np);
1687 
1688 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1689 	list_del_rcu(&ndp->node);
1690 #if IS_ENABLED(CONFIG_IPV6)
1691 	if (list_empty(&ncsi_dev_list))
1692 		unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1693 #endif
1694 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1695 
1696 	ncsi_unregister_netlink(nd->dev);
1697 
1698 	kfree(ndp);
1699 }
1700 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
1701