xref: /openbmc/linux/net/ncsi/ncsi-manage.c (revision 9efac679)
1 /*
2  * Copyright Gavin Shan, IBM Corporation 2016.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
16 
17 #include <net/ncsi.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/addrconf.h>
21 #include <net/ipv6.h>
22 #include <net/if_inet6.h>
23 
24 #include "internal.h"
25 #include "ncsi-pkt.h"
26 
27 LIST_HEAD(ncsi_dev_list);
28 DEFINE_SPINLOCK(ncsi_dev_lock);
29 
30 static inline int ncsi_filter_size(int table)
31 {
32 	int sizes[] = { 2, 6, 6, 6 };
33 
34 	BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
35 	if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
36 		return -EINVAL;
37 
38 	return sizes[table];
39 }
40 
41 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
42 {
43 	struct ncsi_channel_filter *ncf;
44 	void *bitmap;
45 	int index, size;
46 	unsigned long flags;
47 
48 	ncf = nc->filters[table];
49 	if (!ncf)
50 		return -ENXIO;
51 
52 	size = ncsi_filter_size(table);
53 	if (size < 0)
54 		return size;
55 
56 	spin_lock_irqsave(&nc->lock, flags);
57 	bitmap = (void *)&ncf->bitmap;
58 	index = -1;
59 	while ((index = find_next_bit(bitmap, ncf->total, index + 1))
60 	       < ncf->total) {
61 		if (!memcmp(ncf->data + size * index, data, size)) {
62 			spin_unlock_irqrestore(&nc->lock, flags);
63 			return index;
64 		}
65 	}
66 	spin_unlock_irqrestore(&nc->lock, flags);
67 
68 	return -ENOENT;
69 }
70 
71 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
72 {
73 	struct ncsi_channel_filter *ncf;
74 	int index, size;
75 	void *bitmap;
76 	unsigned long flags;
77 
78 	size = ncsi_filter_size(table);
79 	if (size < 0)
80 		return size;
81 
82 	index = ncsi_find_filter(nc, table, data);
83 	if (index >= 0)
84 		return index;
85 
86 	ncf = nc->filters[table];
87 	if (!ncf)
88 		return -ENODEV;
89 
90 	spin_lock_irqsave(&nc->lock, flags);
91 	bitmap = (void *)&ncf->bitmap;
92 	do {
93 		index = find_next_zero_bit(bitmap, ncf->total, 0);
94 		if (index >= ncf->total) {
95 			spin_unlock_irqrestore(&nc->lock, flags);
96 			return -ENOSPC;
97 		}
98 	} while (test_and_set_bit(index, bitmap));
99 
100 	memcpy(ncf->data + size * index, data, size);
101 	spin_unlock_irqrestore(&nc->lock, flags);
102 
103 	return index;
104 }
105 
106 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
107 {
108 	struct ncsi_channel_filter *ncf;
109 	int size;
110 	void *bitmap;
111 	unsigned long flags;
112 
113 	size = ncsi_filter_size(table);
114 	if (size < 0)
115 		return size;
116 
117 	ncf = nc->filters[table];
118 	if (!ncf || index >= ncf->total)
119 		return -ENODEV;
120 
121 	spin_lock_irqsave(&nc->lock, flags);
122 	bitmap = (void *)&ncf->bitmap;
123 	if (test_and_clear_bit(index, bitmap))
124 		memset(ncf->data + size * index, 0, size);
125 	spin_unlock_irqrestore(&nc->lock, flags);
126 
127 	return 0;
128 }
129 
130 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
131 {
132 	struct ncsi_dev *nd = &ndp->ndev;
133 	struct ncsi_package *np;
134 	struct ncsi_channel *nc;
135 
136 	nd->state = ncsi_dev_state_functional;
137 	if (force_down) {
138 		nd->link_up = 0;
139 		goto report;
140 	}
141 
142 	nd->link_up = 0;
143 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
144 		NCSI_FOR_EACH_CHANNEL(np, nc) {
145 			if (!list_empty(&nc->link) ||
146 			    nc->state != NCSI_CHANNEL_ACTIVE)
147 				continue;
148 
149 			if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
150 				nd->link_up = 1;
151 				goto report;
152 			}
153 		}
154 	}
155 
156 report:
157 	nd->handler(nd);
158 }
159 
160 static void ncsi_channel_monitor(unsigned long data)
161 {
162 	struct ncsi_channel *nc = (struct ncsi_channel *)data;
163 	struct ncsi_package *np = nc->package;
164 	struct ncsi_dev_priv *ndp = np->ndp;
165 	struct ncsi_cmd_arg nca;
166 	bool enabled;
167 	unsigned int timeout;
168 	unsigned long flags;
169 	int ret;
170 
171 	spin_lock_irqsave(&nc->lock, flags);
172 	timeout = nc->timeout;
173 	enabled = nc->enabled;
174 	spin_unlock_irqrestore(&nc->lock, flags);
175 
176 	if (!enabled || !list_empty(&nc->link))
177 		return;
178 	if (nc->state != NCSI_CHANNEL_INACTIVE &&
179 	    nc->state != NCSI_CHANNEL_ACTIVE)
180 		return;
181 
182 	if (!(timeout % 2)) {
183 		nca.ndp = ndp;
184 		nca.package = np->id;
185 		nca.channel = nc->id;
186 		nca.type = NCSI_PKT_CMD_GLS;
187 		nca.driven = false;
188 		ret = ncsi_xmit_cmd(&nca);
189 		if (ret) {
190 			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
191 				   ret);
192 			return;
193 		}
194 	}
195 
196 	if (timeout + 1 >= 3) {
197 		if (!(ndp->flags & NCSI_DEV_HWA) &&
198 		    nc->state == NCSI_CHANNEL_ACTIVE)
199 			ncsi_report_link(ndp, true);
200 
201 		spin_lock_irqsave(&ndp->lock, flags);
202 		xchg(&nc->state, NCSI_CHANNEL_INACTIVE);
203 		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
204 		spin_unlock_irqrestore(&ndp->lock, flags);
205 		ncsi_process_next_channel(ndp);
206 		return;
207 	}
208 
209 	spin_lock_irqsave(&nc->lock, flags);
210 	nc->timeout = timeout + 1;
211 	nc->enabled = true;
212 	spin_unlock_irqrestore(&nc->lock, flags);
213 	mod_timer(&nc->timer, jiffies + HZ * (1 << (nc->timeout / 2)));
214 }
215 
216 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
217 {
218 	unsigned long flags;
219 
220 	spin_lock_irqsave(&nc->lock, flags);
221 	WARN_ON_ONCE(nc->enabled);
222 	nc->timeout = 0;
223 	nc->enabled = true;
224 	spin_unlock_irqrestore(&nc->lock, flags);
225 
226 	mod_timer(&nc->timer, jiffies + HZ * (1 << (nc->timeout / 2)));
227 }
228 
229 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
230 {
231 	unsigned long flags;
232 
233 	spin_lock_irqsave(&nc->lock, flags);
234 	if (!nc->enabled) {
235 		spin_unlock_irqrestore(&nc->lock, flags);
236 		return;
237 	}
238 	nc->enabled = false;
239 	spin_unlock_irqrestore(&nc->lock, flags);
240 
241 	del_timer_sync(&nc->timer);
242 }
243 
244 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
245 				       unsigned char id)
246 {
247 	struct ncsi_channel *nc;
248 
249 	NCSI_FOR_EACH_CHANNEL(np, nc) {
250 		if (nc->id == id)
251 			return nc;
252 	}
253 
254 	return NULL;
255 }
256 
257 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
258 {
259 	struct ncsi_channel *nc, *tmp;
260 	int index;
261 	unsigned long flags;
262 
263 	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
264 	if (!nc)
265 		return NULL;
266 
267 	nc->id = id;
268 	nc->package = np;
269 	nc->state = NCSI_CHANNEL_INACTIVE;
270 	nc->enabled = false;
271 	setup_timer(&nc->timer, ncsi_channel_monitor, (unsigned long)nc);
272 	spin_lock_init(&nc->lock);
273 	INIT_LIST_HEAD(&nc->link);
274 	for (index = 0; index < NCSI_CAP_MAX; index++)
275 		nc->caps[index].index = index;
276 	for (index = 0; index < NCSI_MODE_MAX; index++)
277 		nc->modes[index].index = index;
278 
279 	spin_lock_irqsave(&np->lock, flags);
280 	tmp = ncsi_find_channel(np, id);
281 	if (tmp) {
282 		spin_unlock_irqrestore(&np->lock, flags);
283 		kfree(nc);
284 		return tmp;
285 	}
286 
287 	list_add_tail_rcu(&nc->node, &np->channels);
288 	np->channel_num++;
289 	spin_unlock_irqrestore(&np->lock, flags);
290 
291 	return nc;
292 }
293 
294 static void ncsi_remove_channel(struct ncsi_channel *nc)
295 {
296 	struct ncsi_package *np = nc->package;
297 	struct ncsi_channel_filter *ncf;
298 	unsigned long flags;
299 	int i;
300 
301 	/* Release filters */
302 	spin_lock_irqsave(&nc->lock, flags);
303 	for (i = 0; i < NCSI_FILTER_MAX; i++) {
304 		ncf = nc->filters[i];
305 		if (!ncf)
306 			continue;
307 
308 		nc->filters[i] = NULL;
309 		kfree(ncf);
310 	}
311 
312 	nc->state = NCSI_CHANNEL_INACTIVE;
313 	spin_unlock_irqrestore(&nc->lock, flags);
314 	ncsi_stop_channel_monitor(nc);
315 
316 	/* Remove and free channel */
317 	spin_lock_irqsave(&np->lock, flags);
318 	list_del_rcu(&nc->node);
319 	np->channel_num--;
320 	spin_unlock_irqrestore(&np->lock, flags);
321 
322 	kfree(nc);
323 }
324 
325 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
326 				       unsigned char id)
327 {
328 	struct ncsi_package *np;
329 
330 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
331 		if (np->id == id)
332 			return np;
333 	}
334 
335 	return NULL;
336 }
337 
338 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
339 				      unsigned char id)
340 {
341 	struct ncsi_package *np, *tmp;
342 	unsigned long flags;
343 
344 	np = kzalloc(sizeof(*np), GFP_ATOMIC);
345 	if (!np)
346 		return NULL;
347 
348 	np->id = id;
349 	np->ndp = ndp;
350 	spin_lock_init(&np->lock);
351 	INIT_LIST_HEAD(&np->channels);
352 
353 	spin_lock_irqsave(&ndp->lock, flags);
354 	tmp = ncsi_find_package(ndp, id);
355 	if (tmp) {
356 		spin_unlock_irqrestore(&ndp->lock, flags);
357 		kfree(np);
358 		return tmp;
359 	}
360 
361 	list_add_tail_rcu(&np->node, &ndp->packages);
362 	ndp->package_num++;
363 	spin_unlock_irqrestore(&ndp->lock, flags);
364 
365 	return np;
366 }
367 
368 void ncsi_remove_package(struct ncsi_package *np)
369 {
370 	struct ncsi_dev_priv *ndp = np->ndp;
371 	struct ncsi_channel *nc, *tmp;
372 	unsigned long flags;
373 
374 	/* Release all child channels */
375 	list_for_each_entry_safe(nc, tmp, &np->channels, node)
376 		ncsi_remove_channel(nc);
377 
378 	/* Remove and free package */
379 	spin_lock_irqsave(&ndp->lock, flags);
380 	list_del_rcu(&np->node);
381 	ndp->package_num--;
382 	spin_unlock_irqrestore(&ndp->lock, flags);
383 
384 	kfree(np);
385 }
386 
387 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
388 				   unsigned char id,
389 				   struct ncsi_package **np,
390 				   struct ncsi_channel **nc)
391 {
392 	struct ncsi_package *p;
393 	struct ncsi_channel *c;
394 
395 	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
396 	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
397 
398 	if (np)
399 		*np = p;
400 	if (nc)
401 		*nc = c;
402 }
403 
404 /* For two consecutive NCSI commands, the packet IDs shouldn't
405  * be same. Otherwise, the bogus response might be replied. So
406  * the available IDs are allocated in round-robin fashion.
407  */
408 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp, bool driven)
409 {
410 	struct ncsi_request *nr = NULL;
411 	int i, limit = ARRAY_SIZE(ndp->requests);
412 	unsigned long flags;
413 
414 	/* Check if there is one available request until the ceiling */
415 	spin_lock_irqsave(&ndp->lock, flags);
416 	for (i = ndp->request_id; !nr && i < limit; i++) {
417 		if (ndp->requests[i].used)
418 			continue;
419 
420 		nr = &ndp->requests[i];
421 		nr->used = true;
422 		nr->driven = driven;
423 		if (++ndp->request_id >= limit)
424 			ndp->request_id = 0;
425 	}
426 
427 	/* Fail back to check from the starting cursor */
428 	for (i = 0; !nr && i < ndp->request_id; i++) {
429 		if (ndp->requests[i].used)
430 			continue;
431 
432 		nr = &ndp->requests[i];
433 		nr->used = true;
434 		nr->driven = driven;
435 		if (++ndp->request_id >= limit)
436 			ndp->request_id = 0;
437 	}
438 	spin_unlock_irqrestore(&ndp->lock, flags);
439 
440 	return nr;
441 }
442 
443 void ncsi_free_request(struct ncsi_request *nr)
444 {
445 	struct ncsi_dev_priv *ndp = nr->ndp;
446 	struct sk_buff *cmd, *rsp;
447 	unsigned long flags;
448 	bool driven;
449 
450 	if (nr->enabled) {
451 		nr->enabled = false;
452 		del_timer_sync(&nr->timer);
453 	}
454 
455 	spin_lock_irqsave(&ndp->lock, flags);
456 	cmd = nr->cmd;
457 	rsp = nr->rsp;
458 	nr->cmd = NULL;
459 	nr->rsp = NULL;
460 	nr->used = false;
461 	driven = nr->driven;
462 	spin_unlock_irqrestore(&ndp->lock, flags);
463 
464 	if (driven && cmd && --ndp->pending_req_num == 0)
465 		schedule_work(&ndp->work);
466 
467 	/* Release command and response */
468 	consume_skb(cmd);
469 	consume_skb(rsp);
470 }
471 
472 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
473 {
474 	struct ncsi_dev_priv *ndp;
475 
476 	NCSI_FOR_EACH_DEV(ndp) {
477 		if (ndp->ndev.dev == dev)
478 			return &ndp->ndev;
479 	}
480 
481 	return NULL;
482 }
483 
484 static void ncsi_request_timeout(unsigned long data)
485 {
486 	struct ncsi_request *nr = (struct ncsi_request *)data;
487 	struct ncsi_dev_priv *ndp = nr->ndp;
488 	unsigned long flags;
489 
490 	/* If the request already had associated response,
491 	 * let the response handler to release it.
492 	 */
493 	spin_lock_irqsave(&ndp->lock, flags);
494 	nr->enabled = false;
495 	if (nr->rsp || !nr->cmd) {
496 		spin_unlock_irqrestore(&ndp->lock, flags);
497 		return;
498 	}
499 	spin_unlock_irqrestore(&ndp->lock, flags);
500 
501 	/* Release the request */
502 	ncsi_free_request(nr);
503 }
504 
505 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
506 {
507 	struct ncsi_dev *nd = &ndp->ndev;
508 	struct ncsi_package *np = ndp->active_package;
509 	struct ncsi_channel *nc = ndp->active_channel;
510 	struct ncsi_cmd_arg nca;
511 	int ret;
512 
513 	nca.ndp = ndp;
514 	nca.driven = true;
515 	switch (nd->state) {
516 	case ncsi_dev_state_suspend:
517 		nd->state = ncsi_dev_state_suspend_select;
518 		/* Fall through */
519 	case ncsi_dev_state_suspend_select:
520 	case ncsi_dev_state_suspend_dcnt:
521 	case ncsi_dev_state_suspend_dc:
522 	case ncsi_dev_state_suspend_deselect:
523 		ndp->pending_req_num = 1;
524 
525 		np = ndp->active_package;
526 		nc = ndp->active_channel;
527 		nca.package = np->id;
528 		if (nd->state == ncsi_dev_state_suspend_select) {
529 			nca.type = NCSI_PKT_CMD_SP;
530 			nca.channel = 0x1f;
531 			if (ndp->flags & NCSI_DEV_HWA)
532 				nca.bytes[0] = 0;
533 			else
534 				nca.bytes[0] = 1;
535 			nd->state = ncsi_dev_state_suspend_dcnt;
536 		} else if (nd->state == ncsi_dev_state_suspend_dcnt) {
537 			nca.type = NCSI_PKT_CMD_DCNT;
538 			nca.channel = nc->id;
539 			nd->state = ncsi_dev_state_suspend_dc;
540 		} else if (nd->state == ncsi_dev_state_suspend_dc) {
541 			nca.type = NCSI_PKT_CMD_DC;
542 			nca.channel = nc->id;
543 			nca.bytes[0] = 1;
544 			nd->state = ncsi_dev_state_suspend_deselect;
545 		} else if (nd->state == ncsi_dev_state_suspend_deselect) {
546 			nca.type = NCSI_PKT_CMD_DP;
547 			nca.channel = 0x1f;
548 			nd->state = ncsi_dev_state_suspend_done;
549 		}
550 
551 		ret = ncsi_xmit_cmd(&nca);
552 		if (ret) {
553 			nd->state = ncsi_dev_state_functional;
554 			return;
555 		}
556 
557 		break;
558 	case ncsi_dev_state_suspend_done:
559 		xchg(&nc->state, NCSI_CHANNEL_INACTIVE);
560 		ncsi_process_next_channel(ndp);
561 
562 		break;
563 	default:
564 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
565 			    nd->state);
566 	}
567 }
568 
569 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
570 {
571 	struct ncsi_dev *nd = &ndp->ndev;
572 	struct net_device *dev = nd->dev;
573 	struct ncsi_package *np = ndp->active_package;
574 	struct ncsi_channel *nc = ndp->active_channel;
575 	struct ncsi_cmd_arg nca;
576 	unsigned char index;
577 	int ret;
578 
579 	nca.ndp = ndp;
580 	nca.driven = true;
581 	switch (nd->state) {
582 	case ncsi_dev_state_config:
583 	case ncsi_dev_state_config_sp:
584 		ndp->pending_req_num = 1;
585 
586 		/* Select the specific package */
587 		nca.type = NCSI_PKT_CMD_SP;
588 		if (ndp->flags & NCSI_DEV_HWA)
589 			nca.bytes[0] = 0;
590 		else
591 			nca.bytes[0] = 1;
592 		nca.package = np->id;
593 		nca.channel = 0x1f;
594 		ret = ncsi_xmit_cmd(&nca);
595 		if (ret)
596 			goto error;
597 
598 		nd->state = ncsi_dev_state_config_cis;
599 		break;
600 	case ncsi_dev_state_config_cis:
601 		ndp->pending_req_num = 1;
602 
603 		/* Clear initial state */
604 		nca.type = NCSI_PKT_CMD_CIS;
605 		nca.package = np->id;
606 		nca.channel = nc->id;
607 		ret = ncsi_xmit_cmd(&nca);
608 		if (ret)
609 			goto error;
610 
611 		nd->state = ncsi_dev_state_config_sma;
612 		break;
613 	case ncsi_dev_state_config_sma:
614 	case ncsi_dev_state_config_ebf:
615 #if IS_ENABLED(CONFIG_IPV6)
616 	case ncsi_dev_state_config_egmf:
617 #endif
618 	case ncsi_dev_state_config_ecnt:
619 	case ncsi_dev_state_config_ec:
620 	case ncsi_dev_state_config_ae:
621 	case ncsi_dev_state_config_gls:
622 		ndp->pending_req_num = 1;
623 
624 		nca.package = np->id;
625 		nca.channel = nc->id;
626 
627 		/* Use first entry in unicast filter table. Note that
628 		 * the MAC filter table starts from entry 1 instead of
629 		 * 0.
630 		 */
631 		if (nd->state == ncsi_dev_state_config_sma) {
632 			nca.type = NCSI_PKT_CMD_SMA;
633 			for (index = 0; index < 6; index++)
634 				nca.bytes[index] = dev->dev_addr[index];
635 			nca.bytes[6] = 0x1;
636 			nca.bytes[7] = 0x1;
637 			nd->state = ncsi_dev_state_config_ebf;
638 		} else if (nd->state == ncsi_dev_state_config_ebf) {
639 			nca.type = NCSI_PKT_CMD_EBF;
640 			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
641 			nd->state = ncsi_dev_state_config_ecnt;
642 #if IS_ENABLED(CONFIG_IPV6)
643 			if (ndp->inet6_addr_num > 0 &&
644 			    (nc->caps[NCSI_CAP_GENERIC].cap &
645 			     NCSI_CAP_GENERIC_MC))
646 				nd->state = ncsi_dev_state_config_egmf;
647 			else
648 				nd->state = ncsi_dev_state_config_ecnt;
649 		} else if (nd->state == ncsi_dev_state_config_egmf) {
650 			nca.type = NCSI_PKT_CMD_EGMF;
651 			nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
652 			nd->state = ncsi_dev_state_config_ecnt;
653 #endif /* CONFIG_IPV6 */
654 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
655 			nca.type = NCSI_PKT_CMD_ECNT;
656 			nd->state = ncsi_dev_state_config_ec;
657 		} else if (nd->state == ncsi_dev_state_config_ec) {
658 			/* Enable AEN if it's supported */
659 			nca.type = NCSI_PKT_CMD_EC;
660 			nd->state = ncsi_dev_state_config_ae;
661 			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
662 				nd->state = ncsi_dev_state_config_gls;
663 		} else if (nd->state == ncsi_dev_state_config_ae) {
664 			nca.type = NCSI_PKT_CMD_AE;
665 			nca.bytes[0] = 0;
666 			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
667 			nd->state = ncsi_dev_state_config_gls;
668 		} else if (nd->state == ncsi_dev_state_config_gls) {
669 			nca.type = NCSI_PKT_CMD_GLS;
670 			nd->state = ncsi_dev_state_config_done;
671 		}
672 
673 		ret = ncsi_xmit_cmd(&nca);
674 		if (ret)
675 			goto error;
676 		break;
677 	case ncsi_dev_state_config_done:
678 		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1)
679 			xchg(&nc->state, NCSI_CHANNEL_ACTIVE);
680 		else
681 			xchg(&nc->state, NCSI_CHANNEL_INACTIVE);
682 
683 		ncsi_start_channel_monitor(nc);
684 		ncsi_process_next_channel(ndp);
685 		break;
686 	default:
687 		netdev_warn(dev, "Wrong NCSI state 0x%x in config\n",
688 			    nd->state);
689 	}
690 
691 	return;
692 
693 error:
694 	ncsi_report_link(ndp, true);
695 }
696 
697 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
698 {
699 	struct ncsi_package *np;
700 	struct ncsi_channel *nc, *found;
701 	struct ncsi_channel_mode *ncm;
702 	unsigned long flags;
703 
704 	/* The search is done once an inactive channel with up
705 	 * link is found.
706 	 */
707 	found = NULL;
708 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
709 		NCSI_FOR_EACH_CHANNEL(np, nc) {
710 			if (!list_empty(&nc->link) ||
711 			    nc->state != NCSI_CHANNEL_INACTIVE)
712 				continue;
713 
714 			if (!found)
715 				found = nc;
716 
717 			ncm = &nc->modes[NCSI_MODE_LINK];
718 			if (ncm->data[2] & 0x1) {
719 				found = nc;
720 				goto out;
721 			}
722 		}
723 	}
724 
725 	if (!found) {
726 		ncsi_report_link(ndp, true);
727 		return -ENODEV;
728 	}
729 
730 out:
731 	spin_lock_irqsave(&ndp->lock, flags);
732 	list_add_tail_rcu(&found->link, &ndp->channel_queue);
733 	spin_unlock_irqrestore(&ndp->lock, flags);
734 
735 	return ncsi_process_next_channel(ndp);
736 }
737 
738 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
739 {
740 	struct ncsi_package *np;
741 	struct ncsi_channel *nc;
742 	unsigned int cap;
743 
744 	/* The hardware arbitration is disabled if any one channel
745 	 * doesn't support explicitly.
746 	 */
747 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
748 		NCSI_FOR_EACH_CHANNEL(np, nc) {
749 			cap = nc->caps[NCSI_CAP_GENERIC].cap;
750 			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
751 			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
752 			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
753 				ndp->flags &= ~NCSI_DEV_HWA;
754 				return false;
755 			}
756 		}
757 	}
758 
759 	ndp->flags |= NCSI_DEV_HWA;
760 	return true;
761 }
762 
763 static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
764 {
765 	struct ncsi_package *np;
766 	struct ncsi_channel *nc;
767 	unsigned long flags;
768 
769 	/* Move all available channels to processing queue */
770 	spin_lock_irqsave(&ndp->lock, flags);
771 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
772 		NCSI_FOR_EACH_CHANNEL(np, nc) {
773 			WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
774 				     !list_empty(&nc->link));
775 			ncsi_stop_channel_monitor(nc);
776 			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
777 		}
778 	}
779 	spin_unlock_irqrestore(&ndp->lock, flags);
780 
781 	/* We can have no channels in extremely case */
782 	if (list_empty(&ndp->channel_queue)) {
783 		ncsi_report_link(ndp, false);
784 		return -ENOENT;
785 	}
786 
787 	return ncsi_process_next_channel(ndp);
788 }
789 
790 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
791 {
792 	struct ncsi_dev *nd = &ndp->ndev;
793 	struct ncsi_package *np;
794 	struct ncsi_channel *nc;
795 	struct ncsi_cmd_arg nca;
796 	unsigned char index;
797 	int ret;
798 
799 	nca.ndp = ndp;
800 	nca.driven = true;
801 	switch (nd->state) {
802 	case ncsi_dev_state_probe:
803 		nd->state = ncsi_dev_state_probe_deselect;
804 		/* Fall through */
805 	case ncsi_dev_state_probe_deselect:
806 		ndp->pending_req_num = 8;
807 
808 		/* Deselect all possible packages */
809 		nca.type = NCSI_PKT_CMD_DP;
810 		nca.channel = 0x1f;
811 		for (index = 0; index < 8; index++) {
812 			nca.package = index;
813 			ret = ncsi_xmit_cmd(&nca);
814 			if (ret)
815 				goto error;
816 		}
817 
818 		nd->state = ncsi_dev_state_probe_package;
819 		break;
820 	case ncsi_dev_state_probe_package:
821 		ndp->pending_req_num = 16;
822 
823 		/* Select all possible packages */
824 		nca.type = NCSI_PKT_CMD_SP;
825 		nca.bytes[0] = 1;
826 		nca.channel = 0x1f;
827 		for (index = 0; index < 8; index++) {
828 			nca.package = index;
829 			ret = ncsi_xmit_cmd(&nca);
830 			if (ret)
831 				goto error;
832 		}
833 
834 		/* Disable all possible packages */
835 		nca.type = NCSI_PKT_CMD_DP;
836 		for (index = 0; index < 8; index++) {
837 			nca.package = index;
838 			ret = ncsi_xmit_cmd(&nca);
839 			if (ret)
840 				goto error;
841 		}
842 
843 		nd->state = ncsi_dev_state_probe_channel;
844 		break;
845 	case ncsi_dev_state_probe_channel:
846 		if (!ndp->active_package)
847 			ndp->active_package = list_first_or_null_rcu(
848 				&ndp->packages, struct ncsi_package, node);
849 		else if (list_is_last(&ndp->active_package->node,
850 				      &ndp->packages))
851 			ndp->active_package = NULL;
852 		else
853 			ndp->active_package = list_next_entry(
854 				ndp->active_package, node);
855 
856 		/* All available packages and channels are enumerated. The
857 		 * enumeration happens for once when the NCSI interface is
858 		 * started. So we need continue to start the interface after
859 		 * the enumeration.
860 		 *
861 		 * We have to choose an active channel before configuring it.
862 		 * Note that we possibly don't have active channel in extreme
863 		 * situation.
864 		 */
865 		if (!ndp->active_package) {
866 			ndp->flags |= NCSI_DEV_PROBED;
867 			if (ncsi_check_hwa(ndp))
868 				ncsi_enable_hwa(ndp);
869 			else
870 				ncsi_choose_active_channel(ndp);
871 			return;
872 		}
873 
874 		/* Select the active package */
875 		ndp->pending_req_num = 1;
876 		nca.type = NCSI_PKT_CMD_SP;
877 		nca.bytes[0] = 1;
878 		nca.package = ndp->active_package->id;
879 		nca.channel = 0x1f;
880 		ret = ncsi_xmit_cmd(&nca);
881 		if (ret)
882 			goto error;
883 
884 		nd->state = ncsi_dev_state_probe_cis;
885 		break;
886 	case ncsi_dev_state_probe_cis:
887 		ndp->pending_req_num = 32;
888 
889 		/* Clear initial state */
890 		nca.type = NCSI_PKT_CMD_CIS;
891 		nca.package = ndp->active_package->id;
892 		for (index = 0; index < 0x20; index++) {
893 			nca.channel = index;
894 			ret = ncsi_xmit_cmd(&nca);
895 			if (ret)
896 				goto error;
897 		}
898 
899 		nd->state = ncsi_dev_state_probe_gvi;
900 		break;
901 	case ncsi_dev_state_probe_gvi:
902 	case ncsi_dev_state_probe_gc:
903 	case ncsi_dev_state_probe_gls:
904 		np = ndp->active_package;
905 		ndp->pending_req_num = np->channel_num;
906 
907 		/* Retrieve version, capability or link status */
908 		if (nd->state == ncsi_dev_state_probe_gvi)
909 			nca.type = NCSI_PKT_CMD_GVI;
910 		else if (nd->state == ncsi_dev_state_probe_gc)
911 			nca.type = NCSI_PKT_CMD_GC;
912 		else
913 			nca.type = NCSI_PKT_CMD_GLS;
914 
915 		nca.package = np->id;
916 		NCSI_FOR_EACH_CHANNEL(np, nc) {
917 			nca.channel = nc->id;
918 			ret = ncsi_xmit_cmd(&nca);
919 			if (ret)
920 				goto error;
921 		}
922 
923 		if (nd->state == ncsi_dev_state_probe_gvi)
924 			nd->state = ncsi_dev_state_probe_gc;
925 		else if (nd->state == ncsi_dev_state_probe_gc)
926 			nd->state = ncsi_dev_state_probe_gls;
927 		else
928 			nd->state = ncsi_dev_state_probe_dp;
929 		break;
930 	case ncsi_dev_state_probe_dp:
931 		ndp->pending_req_num = 1;
932 
933 		/* Deselect the active package */
934 		nca.type = NCSI_PKT_CMD_DP;
935 		nca.package = ndp->active_package->id;
936 		nca.channel = 0x1f;
937 		ret = ncsi_xmit_cmd(&nca);
938 		if (ret)
939 			goto error;
940 
941 		/* Scan channels in next package */
942 		nd->state = ncsi_dev_state_probe_channel;
943 		break;
944 	default:
945 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
946 			    nd->state);
947 	}
948 
949 	return;
950 error:
951 	ncsi_report_link(ndp, true);
952 }
953 
954 static void ncsi_dev_work(struct work_struct *work)
955 {
956 	struct ncsi_dev_priv *ndp = container_of(work,
957 			struct ncsi_dev_priv, work);
958 	struct ncsi_dev *nd = &ndp->ndev;
959 
960 	switch (nd->state & ncsi_dev_state_major) {
961 	case ncsi_dev_state_probe:
962 		ncsi_probe_channel(ndp);
963 		break;
964 	case ncsi_dev_state_suspend:
965 		ncsi_suspend_channel(ndp);
966 		break;
967 	case ncsi_dev_state_config:
968 		ncsi_configure_channel(ndp);
969 		break;
970 	default:
971 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
972 			    nd->state);
973 	}
974 }
975 
976 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
977 {
978 	struct ncsi_channel *nc;
979 	int old_state;
980 	unsigned long flags;
981 
982 	spin_lock_irqsave(&ndp->lock, flags);
983 	nc = list_first_or_null_rcu(&ndp->channel_queue,
984 				    struct ncsi_channel, link);
985 	if (!nc) {
986 		spin_unlock_irqrestore(&ndp->lock, flags);
987 		goto out;
988 	}
989 
990 	old_state = xchg(&nc->state, NCSI_CHANNEL_INVISIBLE);
991 	list_del_init(&nc->link);
992 
993 	spin_unlock_irqrestore(&ndp->lock, flags);
994 
995 	ndp->active_channel = nc;
996 	ndp->active_package = nc->package;
997 
998 	switch (old_state) {
999 	case NCSI_CHANNEL_INACTIVE:
1000 		ndp->ndev.state = ncsi_dev_state_config;
1001 		ncsi_configure_channel(ndp);
1002 		break;
1003 	case NCSI_CHANNEL_ACTIVE:
1004 		ndp->ndev.state = ncsi_dev_state_suspend;
1005 		ncsi_suspend_channel(ndp);
1006 		break;
1007 	default:
1008 		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1009 			   nc->state, nc->package->id, nc->id);
1010 		ncsi_report_link(ndp, false);
1011 		return -EINVAL;
1012 	}
1013 
1014 	return 0;
1015 
1016 out:
1017 	ndp->active_channel = NULL;
1018 	ndp->active_package = NULL;
1019 	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1020 		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1021 		return ncsi_choose_active_channel(ndp);
1022 	}
1023 
1024 	ncsi_report_link(ndp, false);
1025 	return -ENODEV;
1026 }
1027 
1028 #if IS_ENABLED(CONFIG_IPV6)
1029 static int ncsi_inet6addr_event(struct notifier_block *this,
1030 				unsigned long event, void *data)
1031 {
1032 	struct inet6_ifaddr *ifa = data;
1033 	struct net_device *dev = ifa->idev->dev;
1034 	struct ncsi_dev *nd = ncsi_find_dev(dev);
1035 	struct ncsi_dev_priv *ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
1036 	struct ncsi_package *np;
1037 	struct ncsi_channel *nc;
1038 	struct ncsi_cmd_arg nca;
1039 	bool action;
1040 	int ret;
1041 
1042 	if (!ndp || (ipv6_addr_type(&ifa->addr) &
1043 	    (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)))
1044 		return NOTIFY_OK;
1045 
1046 	switch (event) {
1047 	case NETDEV_UP:
1048 		action = (++ndp->inet6_addr_num) == 1;
1049 		nca.type = NCSI_PKT_CMD_EGMF;
1050 		break;
1051 	case NETDEV_DOWN:
1052 		action = (--ndp->inet6_addr_num == 0);
1053 		nca.type = NCSI_PKT_CMD_DGMF;
1054 		break;
1055 	default:
1056 		return NOTIFY_OK;
1057 	}
1058 
1059 	/* We might not have active channel or packages. The IPv6
1060 	 * required multicast will be enabled when active channel
1061 	 * or packages are chosen.
1062 	 */
1063 	np = ndp->active_package;
1064 	nc = ndp->active_channel;
1065 	if (!action || !np || !nc)
1066 		return NOTIFY_OK;
1067 
1068 	/* We needn't enable or disable it if the function isn't supported */
1069 	if (!(nc->caps[NCSI_CAP_GENERIC].cap & NCSI_CAP_GENERIC_MC))
1070 		return NOTIFY_OK;
1071 
1072 	nca.ndp = ndp;
1073 	nca.driven = false;
1074 	nca.package = np->id;
1075 	nca.channel = nc->id;
1076 	nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
1077 	ret = ncsi_xmit_cmd(&nca);
1078 	if (ret) {
1079 		netdev_warn(dev, "Fail to %s global multicast filter (%d)\n",
1080 			    (event == NETDEV_UP) ? "enable" : "disable", ret);
1081 		return NOTIFY_DONE;
1082 	}
1083 
1084 	return NOTIFY_OK;
1085 }
1086 
1087 static struct notifier_block ncsi_inet6addr_notifier = {
1088 	.notifier_call = ncsi_inet6addr_event,
1089 };
1090 #endif /* CONFIG_IPV6 */
1091 
1092 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1093 				   void (*handler)(struct ncsi_dev *ndev))
1094 {
1095 	struct ncsi_dev_priv *ndp;
1096 	struct ncsi_dev *nd;
1097 	unsigned long flags;
1098 	int i;
1099 
1100 	/* Check if the device has been registered or not */
1101 	nd = ncsi_find_dev(dev);
1102 	if (nd)
1103 		return nd;
1104 
1105 	/* Create NCSI device */
1106 	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1107 	if (!ndp)
1108 		return NULL;
1109 
1110 	nd = &ndp->ndev;
1111 	nd->state = ncsi_dev_state_registered;
1112 	nd->dev = dev;
1113 	nd->handler = handler;
1114 	ndp->pending_req_num = 0;
1115 	INIT_LIST_HEAD(&ndp->channel_queue);
1116 	INIT_WORK(&ndp->work, ncsi_dev_work);
1117 
1118 	/* Initialize private NCSI device */
1119 	spin_lock_init(&ndp->lock);
1120 	INIT_LIST_HEAD(&ndp->packages);
1121 	ndp->request_id = 0;
1122 	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1123 		ndp->requests[i].id = i;
1124 		ndp->requests[i].ndp = ndp;
1125 		setup_timer(&ndp->requests[i].timer,
1126 			    ncsi_request_timeout,
1127 			    (unsigned long)&ndp->requests[i]);
1128 	}
1129 
1130 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1131 #if IS_ENABLED(CONFIG_IPV6)
1132 	ndp->inet6_addr_num = 0;
1133 	if (list_empty(&ncsi_dev_list))
1134 		register_inet6addr_notifier(&ncsi_inet6addr_notifier);
1135 #endif
1136 	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1137 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1138 
1139 	/* Register NCSI packet Rx handler */
1140 	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1141 	ndp->ptype.func = ncsi_rcv_rsp;
1142 	ndp->ptype.dev = dev;
1143 	dev_add_pack(&ndp->ptype);
1144 
1145 	return nd;
1146 }
1147 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1148 
1149 int ncsi_start_dev(struct ncsi_dev *nd)
1150 {
1151 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1152 	struct ncsi_package *np;
1153 	struct ncsi_channel *nc;
1154 	int old_state, ret;
1155 
1156 	if (nd->state != ncsi_dev_state_registered &&
1157 	    nd->state != ncsi_dev_state_functional)
1158 		return -ENOTTY;
1159 
1160 	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1161 		nd->state = ncsi_dev_state_probe;
1162 		schedule_work(&ndp->work);
1163 		return 0;
1164 	}
1165 
1166 	/* Reset channel's state and start over */
1167 	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1168 		NCSI_FOR_EACH_CHANNEL(np, nc) {
1169 			old_state = xchg(&nc->state, NCSI_CHANNEL_INACTIVE);
1170 			WARN_ON_ONCE(!list_empty(&nc->link) ||
1171 				     old_state == NCSI_CHANNEL_INVISIBLE);
1172 		}
1173 	}
1174 
1175 	if (ndp->flags & NCSI_DEV_HWA)
1176 		ret = ncsi_enable_hwa(ndp);
1177 	else
1178 		ret = ncsi_choose_active_channel(ndp);
1179 
1180 	return ret;
1181 }
1182 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1183 
1184 void ncsi_unregister_dev(struct ncsi_dev *nd)
1185 {
1186 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1187 	struct ncsi_package *np, *tmp;
1188 	unsigned long flags;
1189 
1190 	dev_remove_pack(&ndp->ptype);
1191 
1192 	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1193 		ncsi_remove_package(np);
1194 
1195 	spin_lock_irqsave(&ncsi_dev_lock, flags);
1196 	list_del_rcu(&ndp->node);
1197 #if IS_ENABLED(CONFIG_IPV6)
1198 	if (list_empty(&ncsi_dev_list))
1199 		unregister_inet6addr_notifier(&ncsi_inet6addr_notifier);
1200 #endif
1201 	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1202 
1203 	kfree(ndp);
1204 }
1205 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
1206