xref: /openbmc/linux/net/ncsi/ncsi-manage.c (revision 360823a09426347ea8f232b0b0b5156d0aed0302)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * Copyright Gavin Shan, IBM Corporation 2016.
4   */
5  
6  #include <linux/module.h>
7  #include <linux/kernel.h>
8  #include <linux/init.h>
9  #include <linux/netdevice.h>
10  #include <linux/skbuff.h>
11  #include <linux/of.h>
12  #include <linux/platform_device.h>
13  
14  #include <net/ncsi.h>
15  #include <net/net_namespace.h>
16  #include <net/sock.h>
17  #include <net/addrconf.h>
18  #include <net/ipv6.h>
19  #include <net/genetlink.h>
20  
21  #include "internal.h"
22  #include "ncsi-pkt.h"
23  #include "ncsi-netlink.h"
24  
25  LIST_HEAD(ncsi_dev_list);
26  DEFINE_SPINLOCK(ncsi_dev_lock);
27  
ncsi_channel_has_link(struct ncsi_channel * channel)28  bool ncsi_channel_has_link(struct ncsi_channel *channel)
29  {
30  	return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
31  }
32  
ncsi_channel_is_last(struct ncsi_dev_priv * ndp,struct ncsi_channel * channel)33  bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
34  			  struct ncsi_channel *channel)
35  {
36  	struct ncsi_package *np;
37  	struct ncsi_channel *nc;
38  
39  	NCSI_FOR_EACH_PACKAGE(ndp, np)
40  		NCSI_FOR_EACH_CHANNEL(np, nc) {
41  			if (nc == channel)
42  				continue;
43  			if (nc->state == NCSI_CHANNEL_ACTIVE &&
44  			    ncsi_channel_has_link(nc))
45  				return false;
46  		}
47  
48  	return true;
49  }
50  
ncsi_report_link(struct ncsi_dev_priv * ndp,bool force_down)51  static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
52  {
53  	struct ncsi_dev *nd = &ndp->ndev;
54  	struct ncsi_package *np;
55  	struct ncsi_channel *nc;
56  	unsigned long flags;
57  
58  	nd->state = ncsi_dev_state_functional;
59  	if (force_down) {
60  		nd->link_up = 0;
61  		goto report;
62  	}
63  
64  	nd->link_up = 0;
65  	NCSI_FOR_EACH_PACKAGE(ndp, np) {
66  		NCSI_FOR_EACH_CHANNEL(np, nc) {
67  			spin_lock_irqsave(&nc->lock, flags);
68  
69  			if (!list_empty(&nc->link) ||
70  			    nc->state != NCSI_CHANNEL_ACTIVE) {
71  				spin_unlock_irqrestore(&nc->lock, flags);
72  				continue;
73  			}
74  
75  			if (ncsi_channel_has_link(nc)) {
76  				spin_unlock_irqrestore(&nc->lock, flags);
77  				nd->link_up = 1;
78  				goto report;
79  			}
80  
81  			spin_unlock_irqrestore(&nc->lock, flags);
82  		}
83  	}
84  
85  report:
86  	nd->handler(nd);
87  }
88  
ncsi_channel_monitor(struct timer_list * t)89  static void ncsi_channel_monitor(struct timer_list *t)
90  {
91  	struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
92  	struct ncsi_package *np = nc->package;
93  	struct ncsi_dev_priv *ndp = np->ndp;
94  	struct ncsi_channel_mode *ncm;
95  	struct ncsi_cmd_arg nca;
96  	bool enabled, chained;
97  	unsigned int monitor_state;
98  	unsigned long flags;
99  	int state, ret;
100  
101  	spin_lock_irqsave(&nc->lock, flags);
102  	state = nc->state;
103  	chained = !list_empty(&nc->link);
104  	enabled = nc->monitor.enabled;
105  	monitor_state = nc->monitor.state;
106  	spin_unlock_irqrestore(&nc->lock, flags);
107  
108  	if (!enabled)
109  		return;		/* expected race disabling timer */
110  	if (WARN_ON_ONCE(chained))
111  		goto bad_state;
112  
113  	if (state != NCSI_CHANNEL_INACTIVE &&
114  	    state != NCSI_CHANNEL_ACTIVE) {
115  bad_state:
116  		netdev_warn(ndp->ndev.dev,
117  			    "Bad NCSI monitor state channel %d 0x%x %s queue\n",
118  			    nc->id, state, chained ? "on" : "off");
119  		spin_lock_irqsave(&nc->lock, flags);
120  		nc->monitor.enabled = false;
121  		spin_unlock_irqrestore(&nc->lock, flags);
122  		return;
123  	}
124  
125  	switch (monitor_state) {
126  	case NCSI_CHANNEL_MONITOR_START:
127  	case NCSI_CHANNEL_MONITOR_RETRY:
128  		nca.ndp = ndp;
129  		nca.package = np->id;
130  		nca.channel = nc->id;
131  		nca.type = NCSI_PKT_CMD_GLS;
132  		nca.req_flags = 0;
133  		ret = ncsi_xmit_cmd(&nca);
134  		if (ret)
135  			netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
136  				   ret);
137  		break;
138  	case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
139  		break;
140  	default:
141  		netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
142  			   nc->id);
143  		ncsi_report_link(ndp, true);
144  		ndp->flags |= NCSI_DEV_RESHUFFLE;
145  
146  		ncm = &nc->modes[NCSI_MODE_LINK];
147  		spin_lock_irqsave(&nc->lock, flags);
148  		nc->monitor.enabled = false;
149  		nc->state = NCSI_CHANNEL_INVISIBLE;
150  		ncm->data[2] &= ~0x1;
151  		spin_unlock_irqrestore(&nc->lock, flags);
152  
153  		spin_lock_irqsave(&ndp->lock, flags);
154  		nc->state = NCSI_CHANNEL_ACTIVE;
155  		list_add_tail_rcu(&nc->link, &ndp->channel_queue);
156  		spin_unlock_irqrestore(&ndp->lock, flags);
157  		ncsi_process_next_channel(ndp);
158  		return;
159  	}
160  
161  	spin_lock_irqsave(&nc->lock, flags);
162  	nc->monitor.state++;
163  	spin_unlock_irqrestore(&nc->lock, flags);
164  	mod_timer(&nc->monitor.timer, jiffies + HZ);
165  }
166  
ncsi_start_channel_monitor(struct ncsi_channel * nc)167  void ncsi_start_channel_monitor(struct ncsi_channel *nc)
168  {
169  	unsigned long flags;
170  
171  	spin_lock_irqsave(&nc->lock, flags);
172  	WARN_ON_ONCE(nc->monitor.enabled);
173  	nc->monitor.enabled = true;
174  	nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
175  	spin_unlock_irqrestore(&nc->lock, flags);
176  
177  	mod_timer(&nc->monitor.timer, jiffies + HZ);
178  }
179  
ncsi_stop_channel_monitor(struct ncsi_channel * nc)180  void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
181  {
182  	unsigned long flags;
183  
184  	spin_lock_irqsave(&nc->lock, flags);
185  	if (!nc->monitor.enabled) {
186  		spin_unlock_irqrestore(&nc->lock, flags);
187  		return;
188  	}
189  	nc->monitor.enabled = false;
190  	spin_unlock_irqrestore(&nc->lock, flags);
191  
192  	del_timer_sync(&nc->monitor.timer);
193  }
194  
ncsi_find_channel(struct ncsi_package * np,unsigned char id)195  struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
196  				       unsigned char id)
197  {
198  	struct ncsi_channel *nc;
199  
200  	NCSI_FOR_EACH_CHANNEL(np, nc) {
201  		if (nc->id == id)
202  			return nc;
203  	}
204  
205  	return NULL;
206  }
207  
ncsi_add_channel(struct ncsi_package * np,unsigned char id)208  struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
209  {
210  	struct ncsi_channel *nc, *tmp;
211  	int index;
212  	unsigned long flags;
213  
214  	nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
215  	if (!nc)
216  		return NULL;
217  
218  	nc->id = id;
219  	nc->package = np;
220  	nc->state = NCSI_CHANNEL_INACTIVE;
221  	nc->monitor.enabled = false;
222  	timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
223  	spin_lock_init(&nc->lock);
224  	INIT_LIST_HEAD(&nc->link);
225  	for (index = 0; index < NCSI_CAP_MAX; index++)
226  		nc->caps[index].index = index;
227  	for (index = 0; index < NCSI_MODE_MAX; index++)
228  		nc->modes[index].index = index;
229  
230  	spin_lock_irqsave(&np->lock, flags);
231  	tmp = ncsi_find_channel(np, id);
232  	if (tmp) {
233  		spin_unlock_irqrestore(&np->lock, flags);
234  		kfree(nc);
235  		return tmp;
236  	}
237  
238  	list_add_tail_rcu(&nc->node, &np->channels);
239  	np->channel_num++;
240  	spin_unlock_irqrestore(&np->lock, flags);
241  
242  	return nc;
243  }
244  
ncsi_remove_channel(struct ncsi_channel * nc)245  static void ncsi_remove_channel(struct ncsi_channel *nc)
246  {
247  	struct ncsi_package *np = nc->package;
248  	unsigned long flags;
249  
250  	spin_lock_irqsave(&nc->lock, flags);
251  
252  	/* Release filters */
253  	kfree(nc->mac_filter.addrs);
254  	kfree(nc->vlan_filter.vids);
255  
256  	nc->state = NCSI_CHANNEL_INACTIVE;
257  	spin_unlock_irqrestore(&nc->lock, flags);
258  	ncsi_stop_channel_monitor(nc);
259  
260  	/* Remove and free channel */
261  	spin_lock_irqsave(&np->lock, flags);
262  	list_del_rcu(&nc->node);
263  	np->channel_num--;
264  	spin_unlock_irqrestore(&np->lock, flags);
265  
266  	kfree(nc);
267  }
268  
ncsi_find_package(struct ncsi_dev_priv * ndp,unsigned char id)269  struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
270  				       unsigned char id)
271  {
272  	struct ncsi_package *np;
273  
274  	NCSI_FOR_EACH_PACKAGE(ndp, np) {
275  		if (np->id == id)
276  			return np;
277  	}
278  
279  	return NULL;
280  }
281  
ncsi_add_package(struct ncsi_dev_priv * ndp,unsigned char id)282  struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
283  				      unsigned char id)
284  {
285  	struct ncsi_package *np, *tmp;
286  	unsigned long flags;
287  
288  	np = kzalloc(sizeof(*np), GFP_ATOMIC);
289  	if (!np)
290  		return NULL;
291  
292  	np->id = id;
293  	np->ndp = ndp;
294  	spin_lock_init(&np->lock);
295  	INIT_LIST_HEAD(&np->channels);
296  	np->channel_whitelist = UINT_MAX;
297  
298  	spin_lock_irqsave(&ndp->lock, flags);
299  	tmp = ncsi_find_package(ndp, id);
300  	if (tmp) {
301  		spin_unlock_irqrestore(&ndp->lock, flags);
302  		kfree(np);
303  		return tmp;
304  	}
305  
306  	list_add_tail_rcu(&np->node, &ndp->packages);
307  	ndp->package_num++;
308  	spin_unlock_irqrestore(&ndp->lock, flags);
309  
310  	return np;
311  }
312  
ncsi_remove_package(struct ncsi_package * np)313  void ncsi_remove_package(struct ncsi_package *np)
314  {
315  	struct ncsi_dev_priv *ndp = np->ndp;
316  	struct ncsi_channel *nc, *tmp;
317  	unsigned long flags;
318  
319  	/* Release all child channels */
320  	list_for_each_entry_safe(nc, tmp, &np->channels, node)
321  		ncsi_remove_channel(nc);
322  
323  	/* Remove and free package */
324  	spin_lock_irqsave(&ndp->lock, flags);
325  	list_del_rcu(&np->node);
326  	ndp->package_num--;
327  	spin_unlock_irqrestore(&ndp->lock, flags);
328  
329  	kfree(np);
330  }
331  
ncsi_find_package_and_channel(struct ncsi_dev_priv * ndp,unsigned char id,struct ncsi_package ** np,struct ncsi_channel ** nc)332  void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
333  				   unsigned char id,
334  				   struct ncsi_package **np,
335  				   struct ncsi_channel **nc)
336  {
337  	struct ncsi_package *p;
338  	struct ncsi_channel *c;
339  
340  	p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
341  	c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
342  
343  	if (np)
344  		*np = p;
345  	if (nc)
346  		*nc = c;
347  }
348  
349  /* For two consecutive NCSI commands, the packet IDs shouldn't
350   * be same. Otherwise, the bogus response might be replied. So
351   * the available IDs are allocated in round-robin fashion.
352   */
ncsi_alloc_request(struct ncsi_dev_priv * ndp,unsigned int req_flags)353  struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
354  					unsigned int req_flags)
355  {
356  	struct ncsi_request *nr = NULL;
357  	int i, limit = ARRAY_SIZE(ndp->requests);
358  	unsigned long flags;
359  
360  	/* Check if there is one available request until the ceiling */
361  	spin_lock_irqsave(&ndp->lock, flags);
362  	for (i = ndp->request_id; i < limit; i++) {
363  		if (ndp->requests[i].used)
364  			continue;
365  
366  		nr = &ndp->requests[i];
367  		nr->used = true;
368  		nr->flags = req_flags;
369  		ndp->request_id = i + 1;
370  		goto found;
371  	}
372  
373  	/* Fail back to check from the starting cursor */
374  	for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
375  		if (ndp->requests[i].used)
376  			continue;
377  
378  		nr = &ndp->requests[i];
379  		nr->used = true;
380  		nr->flags = req_flags;
381  		ndp->request_id = i + 1;
382  		goto found;
383  	}
384  
385  found:
386  	spin_unlock_irqrestore(&ndp->lock, flags);
387  	return nr;
388  }
389  
ncsi_free_request(struct ncsi_request * nr)390  void ncsi_free_request(struct ncsi_request *nr)
391  {
392  	struct ncsi_dev_priv *ndp = nr->ndp;
393  	struct sk_buff *cmd, *rsp;
394  	unsigned long flags;
395  	bool driven;
396  
397  	if (nr->enabled) {
398  		nr->enabled = false;
399  		del_timer_sync(&nr->timer);
400  	}
401  
402  	spin_lock_irqsave(&ndp->lock, flags);
403  	cmd = nr->cmd;
404  	rsp = nr->rsp;
405  	nr->cmd = NULL;
406  	nr->rsp = NULL;
407  	nr->used = false;
408  	driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
409  	spin_unlock_irqrestore(&ndp->lock, flags);
410  
411  	if (driven && cmd && --ndp->pending_req_num == 0)
412  		schedule_work(&ndp->work);
413  
414  	/* Release command and response */
415  	consume_skb(cmd);
416  	consume_skb(rsp);
417  }
418  
ncsi_find_dev(struct net_device * dev)419  struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
420  {
421  	struct ncsi_dev_priv *ndp;
422  
423  	NCSI_FOR_EACH_DEV(ndp) {
424  		if (ndp->ndev.dev == dev)
425  			return &ndp->ndev;
426  	}
427  
428  	return NULL;
429  }
430  
ncsi_request_timeout(struct timer_list * t)431  static void ncsi_request_timeout(struct timer_list *t)
432  {
433  	struct ncsi_request *nr = from_timer(nr, t, timer);
434  	struct ncsi_dev_priv *ndp = nr->ndp;
435  	struct ncsi_cmd_pkt *cmd;
436  	struct ncsi_package *np;
437  	struct ncsi_channel *nc;
438  	unsigned long flags;
439  
440  	/* If the request already had associated response,
441  	 * let the response handler to release it.
442  	 */
443  	spin_lock_irqsave(&ndp->lock, flags);
444  	nr->enabled = false;
445  	if (nr->rsp || !nr->cmd) {
446  		spin_unlock_irqrestore(&ndp->lock, flags);
447  		return;
448  	}
449  	spin_unlock_irqrestore(&ndp->lock, flags);
450  
451  	if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
452  		if (nr->cmd) {
453  			/* Find the package */
454  			cmd = (struct ncsi_cmd_pkt *)
455  			      skb_network_header(nr->cmd);
456  			ncsi_find_package_and_channel(ndp,
457  						      cmd->cmd.common.channel,
458  						      &np, &nc);
459  			ncsi_send_netlink_timeout(nr, np, nc);
460  		}
461  	}
462  
463  	/* Release the request */
464  	ncsi_free_request(nr);
465  }
466  
ncsi_suspend_channel(struct ncsi_dev_priv * ndp)467  static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
468  {
469  	struct ncsi_dev *nd = &ndp->ndev;
470  	struct ncsi_package *np;
471  	struct ncsi_channel *nc, *tmp;
472  	struct ncsi_cmd_arg nca;
473  	unsigned long flags;
474  	int ret;
475  
476  	np = ndp->active_package;
477  	nc = ndp->active_channel;
478  	nca.ndp = ndp;
479  	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
480  	switch (nd->state) {
481  	case ncsi_dev_state_suspend:
482  		nd->state = ncsi_dev_state_suspend_select;
483  		fallthrough;
484  	case ncsi_dev_state_suspend_select:
485  		ndp->pending_req_num = 1;
486  
487  		nca.type = NCSI_PKT_CMD_SP;
488  		nca.package = np->id;
489  		nca.channel = NCSI_RESERVED_CHANNEL;
490  		if (ndp->flags & NCSI_DEV_HWA)
491  			nca.bytes[0] = 0;
492  		else
493  			nca.bytes[0] = 1;
494  
495  		/* To retrieve the last link states of channels in current
496  		 * package when current active channel needs fail over to
497  		 * another one. It means we will possibly select another
498  		 * channel as next active one. The link states of channels
499  		 * are most important factor of the selection. So we need
500  		 * accurate link states. Unfortunately, the link states on
501  		 * inactive channels can't be updated with LSC AEN in time.
502  		 */
503  		if (ndp->flags & NCSI_DEV_RESHUFFLE)
504  			nd->state = ncsi_dev_state_suspend_gls;
505  		else
506  			nd->state = ncsi_dev_state_suspend_dcnt;
507  		ret = ncsi_xmit_cmd(&nca);
508  		if (ret)
509  			goto error;
510  
511  		break;
512  	case ncsi_dev_state_suspend_gls:
513  		ndp->pending_req_num = 1;
514  
515  		nca.type = NCSI_PKT_CMD_GLS;
516  		nca.package = np->id;
517  		nca.channel = ndp->channel_probe_id;
518  		ret = ncsi_xmit_cmd(&nca);
519  		if (ret)
520  			goto error;
521  		ndp->channel_probe_id++;
522  
523  		if (ndp->channel_probe_id == ndp->channel_count) {
524  			ndp->channel_probe_id = 0;
525  			nd->state = ncsi_dev_state_suspend_dcnt;
526  		}
527  
528  		break;
529  	case ncsi_dev_state_suspend_dcnt:
530  		ndp->pending_req_num = 1;
531  
532  		nca.type = NCSI_PKT_CMD_DCNT;
533  		nca.package = np->id;
534  		nca.channel = nc->id;
535  
536  		nd->state = ncsi_dev_state_suspend_dc;
537  		ret = ncsi_xmit_cmd(&nca);
538  		if (ret)
539  			goto error;
540  
541  		break;
542  	case ncsi_dev_state_suspend_dc:
543  		ndp->pending_req_num = 1;
544  
545  		nca.type = NCSI_PKT_CMD_DC;
546  		nca.package = np->id;
547  		nca.channel = nc->id;
548  		nca.bytes[0] = 1;
549  
550  		nd->state = ncsi_dev_state_suspend_deselect;
551  		ret = ncsi_xmit_cmd(&nca);
552  		if (ret)
553  			goto error;
554  
555  		NCSI_FOR_EACH_CHANNEL(np, tmp) {
556  			/* If there is another channel active on this package
557  			 * do not deselect the package.
558  			 */
559  			if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
560  				nd->state = ncsi_dev_state_suspend_done;
561  				break;
562  			}
563  		}
564  		break;
565  	case ncsi_dev_state_suspend_deselect:
566  		ndp->pending_req_num = 1;
567  
568  		nca.type = NCSI_PKT_CMD_DP;
569  		nca.package = np->id;
570  		nca.channel = NCSI_RESERVED_CHANNEL;
571  
572  		nd->state = ncsi_dev_state_suspend_done;
573  		ret = ncsi_xmit_cmd(&nca);
574  		if (ret)
575  			goto error;
576  
577  		break;
578  	case ncsi_dev_state_suspend_done:
579  		spin_lock_irqsave(&nc->lock, flags);
580  		nc->state = NCSI_CHANNEL_INACTIVE;
581  		spin_unlock_irqrestore(&nc->lock, flags);
582  		if (ndp->flags & NCSI_DEV_RESET)
583  			ncsi_reset_dev(nd);
584  		else
585  			ncsi_process_next_channel(ndp);
586  		break;
587  	default:
588  		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
589  			    nd->state);
590  	}
591  
592  	return;
593  error:
594  	nd->state = ncsi_dev_state_functional;
595  }
596  
597  /* Check the VLAN filter bitmap for a set filter, and construct a
598   * "Set VLAN Filter - Disable" packet if found.
599   */
clear_one_vid(struct ncsi_dev_priv * ndp,struct ncsi_channel * nc,struct ncsi_cmd_arg * nca)600  static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
601  			 struct ncsi_cmd_arg *nca)
602  {
603  	struct ncsi_channel_vlan_filter *ncf;
604  	unsigned long flags;
605  	void *bitmap;
606  	int index;
607  	u16 vid;
608  
609  	ncf = &nc->vlan_filter;
610  	bitmap = &ncf->bitmap;
611  
612  	spin_lock_irqsave(&nc->lock, flags);
613  	index = find_first_bit(bitmap, ncf->n_vids);
614  	if (index >= ncf->n_vids) {
615  		spin_unlock_irqrestore(&nc->lock, flags);
616  		return -1;
617  	}
618  	vid = ncf->vids[index];
619  
620  	clear_bit(index, bitmap);
621  	ncf->vids[index] = 0;
622  	spin_unlock_irqrestore(&nc->lock, flags);
623  
624  	nca->type = NCSI_PKT_CMD_SVF;
625  	nca->words[1] = vid;
626  	/* HW filter index starts at 1 */
627  	nca->bytes[6] = index + 1;
628  	nca->bytes[7] = 0x00;
629  	return 0;
630  }
631  
632  /* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
633   * packet.
634   */
set_one_vid(struct ncsi_dev_priv * ndp,struct ncsi_channel * nc,struct ncsi_cmd_arg * nca)635  static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
636  		       struct ncsi_cmd_arg *nca)
637  {
638  	struct ncsi_channel_vlan_filter *ncf;
639  	struct vlan_vid *vlan = NULL;
640  	unsigned long flags;
641  	int i, index;
642  	void *bitmap;
643  	u16 vid;
644  
645  	if (list_empty(&ndp->vlan_vids))
646  		return -1;
647  
648  	ncf = &nc->vlan_filter;
649  	bitmap = &ncf->bitmap;
650  
651  	spin_lock_irqsave(&nc->lock, flags);
652  
653  	rcu_read_lock();
654  	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
655  		vid = vlan->vid;
656  		for (i = 0; i < ncf->n_vids; i++)
657  			if (ncf->vids[i] == vid) {
658  				vid = 0;
659  				break;
660  			}
661  		if (vid)
662  			break;
663  	}
664  	rcu_read_unlock();
665  
666  	if (!vid) {
667  		/* No VLAN ID is not set */
668  		spin_unlock_irqrestore(&nc->lock, flags);
669  		return -1;
670  	}
671  
672  	index = find_first_zero_bit(bitmap, ncf->n_vids);
673  	if (index < 0 || index >= ncf->n_vids) {
674  		netdev_err(ndp->ndev.dev,
675  			   "Channel %u already has all VLAN filters set\n",
676  			   nc->id);
677  		spin_unlock_irqrestore(&nc->lock, flags);
678  		return -1;
679  	}
680  
681  	ncf->vids[index] = vid;
682  	set_bit(index, bitmap);
683  	spin_unlock_irqrestore(&nc->lock, flags);
684  
685  	nca->type = NCSI_PKT_CMD_SVF;
686  	nca->words[1] = vid;
687  	/* HW filter index starts at 1 */
688  	nca->bytes[6] = index + 1;
689  	nca->bytes[7] = 0x01;
690  
691  	return 0;
692  }
693  
ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg * nca)694  static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg *nca)
695  {
696  	unsigned char data[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN];
697  	int ret = 0;
698  
699  	nca->payload = NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN;
700  
701  	memset(data, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN);
702  	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
703  
704  	data[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY;
705  
706  	/* PHY Link up attribute */
707  	data[6] = 0x1;
708  
709  	nca->data = data;
710  
711  	ret = ncsi_xmit_cmd(nca);
712  	if (ret)
713  		netdev_err(nca->ndp->ndev.dev,
714  			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
715  			   nca->type);
716  	return ret;
717  }
718  
719  /* NCSI OEM Command APIs */
ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg * nca)720  static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
721  {
722  	unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
723  	int ret = 0;
724  
725  	nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
726  
727  	memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
728  	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_BCM_ID);
729  	data[5] = NCSI_OEM_BCM_CMD_GMA;
730  
731  	nca->data = data;
732  
733  	ret = ncsi_xmit_cmd(nca);
734  	if (ret)
735  		netdev_err(nca->ndp->ndev.dev,
736  			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
737  			   nca->type);
738  	return ret;
739  }
740  
ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg * nca)741  static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
742  {
743  	union {
744  		u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
745  		u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
746  	} u;
747  	int ret = 0;
748  
749  	nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
750  
751  	memset(&u, 0, sizeof(u));
752  	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
753  	u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
754  	u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
755  
756  	nca->data = u.data_u8;
757  
758  	ret = ncsi_xmit_cmd(nca);
759  	if (ret)
760  		netdev_err(nca->ndp->ndev.dev,
761  			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
762  			   nca->type);
763  	return ret;
764  }
765  
ncsi_oem_smaf_mlx(struct ncsi_cmd_arg * nca)766  static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
767  {
768  	union {
769  		u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
770  		u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
771  	} u;
772  	int ret = 0;
773  
774  	memset(&u, 0, sizeof(u));
775  	u.data_u32[0] = ntohl((__force __be32)NCSI_OEM_MFR_MLX_ID);
776  	u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
777  	u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
778  	memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
779  	       nca->ndp->ndev.dev->dev_addr,	ETH_ALEN);
780  	u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
781  		(MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
782  
783  	nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
784  	nca->data = u.data_u8;
785  
786  	ret = ncsi_xmit_cmd(nca);
787  	if (ret)
788  		netdev_err(nca->ndp->ndev.dev,
789  			   "NCSI: Failed to transmit cmd 0x%x during probe\n",
790  			   nca->type);
791  	return ret;
792  }
793  
ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg * nca)794  static int ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg *nca)
795  {
796  	unsigned char data[NCSI_OEM_INTEL_CMD_GMA_LEN];
797  	int ret = 0;
798  
799  	nca->payload = NCSI_OEM_INTEL_CMD_GMA_LEN;
800  
801  	memset(data, 0, NCSI_OEM_INTEL_CMD_GMA_LEN);
802  	*(unsigned int *)data = ntohl((__force __be32)NCSI_OEM_MFR_INTEL_ID);
803  	data[4] = NCSI_OEM_INTEL_CMD_GMA;
804  
805  	nca->data = data;
806  
807  	ret = ncsi_xmit_cmd(nca);
808  	if (ret)
809  		netdev_err(nca->ndp->ndev.dev,
810  			   "NCSI: Failed to transmit cmd 0x%x during configure\n",
811  			   nca->type);
812  
813  	return ret;
814  }
815  
816  /* OEM Command handlers initialization */
817  static struct ncsi_oem_gma_handler {
818  	unsigned int	mfr_id;
819  	int		(*handler)(struct ncsi_cmd_arg *nca);
820  } ncsi_oem_gma_handlers[] = {
821  	{ NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
822  	{ NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx },
823  	{ NCSI_OEM_MFR_INTEL_ID, ncsi_oem_gma_handler_intel }
824  };
825  
ncsi_gma_handler(struct ncsi_cmd_arg * nca,unsigned int mf_id)826  static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
827  {
828  	struct ncsi_oem_gma_handler *nch = NULL;
829  	int i;
830  
831  	/* This function should only be called once, return if flag set */
832  	if (nca->ndp->gma_flag == 1)
833  		return -1;
834  
835  	/* Find gma handler for given manufacturer id */
836  	for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
837  		if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
838  			if (ncsi_oem_gma_handlers[i].handler)
839  				nch = &ncsi_oem_gma_handlers[i];
840  			break;
841  			}
842  	}
843  
844  	if (!nch) {
845  		netdev_err(nca->ndp->ndev.dev,
846  			   "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
847  			   mf_id);
848  		return -1;
849  	}
850  
851  	/* Get Mac address from NCSI device */
852  	return nch->handler(nca);
853  }
854  
855  /* Determine if a given channel from the channel_queue should be used for Tx */
ncsi_channel_is_tx(struct ncsi_dev_priv * ndp,struct ncsi_channel * nc)856  static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
857  			       struct ncsi_channel *nc)
858  {
859  	struct ncsi_channel_mode *ncm;
860  	struct ncsi_channel *channel;
861  	struct ncsi_package *np;
862  
863  	/* Check if any other channel has Tx enabled; a channel may have already
864  	 * been configured and removed from the channel queue.
865  	 */
866  	NCSI_FOR_EACH_PACKAGE(ndp, np) {
867  		if (!ndp->multi_package && np != nc->package)
868  			continue;
869  		NCSI_FOR_EACH_CHANNEL(np, channel) {
870  			ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
871  			if (ncm->enable)
872  				return false;
873  		}
874  	}
875  
876  	/* This channel is the preferred channel and has link */
877  	list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
878  		np = channel->package;
879  		if (np->preferred_channel &&
880  		    ncsi_channel_has_link(np->preferred_channel)) {
881  			return np->preferred_channel == nc;
882  		}
883  	}
884  
885  	/* This channel has link */
886  	if (ncsi_channel_has_link(nc))
887  		return true;
888  
889  	list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
890  		if (ncsi_channel_has_link(channel))
891  			return false;
892  
893  	/* No other channel has link; default to this one */
894  	return true;
895  }
896  
897  /* Change the active Tx channel in a multi-channel setup */
ncsi_update_tx_channel(struct ncsi_dev_priv * ndp,struct ncsi_package * package,struct ncsi_channel * disable,struct ncsi_channel * enable)898  int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
899  			   struct ncsi_package *package,
900  			   struct ncsi_channel *disable,
901  			   struct ncsi_channel *enable)
902  {
903  	struct ncsi_cmd_arg nca;
904  	struct ncsi_channel *nc;
905  	struct ncsi_package *np;
906  	int ret = 0;
907  
908  	if (!package->multi_channel && !ndp->multi_package)
909  		netdev_warn(ndp->ndev.dev,
910  			    "NCSI: Trying to update Tx channel in single-channel mode\n");
911  	nca.ndp = ndp;
912  	nca.req_flags = 0;
913  
914  	/* Find current channel with Tx enabled */
915  	NCSI_FOR_EACH_PACKAGE(ndp, np) {
916  		if (disable)
917  			break;
918  		if (!ndp->multi_package && np != package)
919  			continue;
920  
921  		NCSI_FOR_EACH_CHANNEL(np, nc)
922  			if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
923  				disable = nc;
924  				break;
925  			}
926  	}
927  
928  	/* Find a suitable channel for Tx */
929  	NCSI_FOR_EACH_PACKAGE(ndp, np) {
930  		if (enable)
931  			break;
932  		if (!ndp->multi_package && np != package)
933  			continue;
934  		if (!(ndp->package_whitelist & (0x1 << np->id)))
935  			continue;
936  
937  		if (np->preferred_channel &&
938  		    ncsi_channel_has_link(np->preferred_channel)) {
939  			enable = np->preferred_channel;
940  			break;
941  		}
942  
943  		NCSI_FOR_EACH_CHANNEL(np, nc) {
944  			if (!(np->channel_whitelist & 0x1 << nc->id))
945  				continue;
946  			if (nc->state != NCSI_CHANNEL_ACTIVE)
947  				continue;
948  			if (ncsi_channel_has_link(nc)) {
949  				enable = nc;
950  				break;
951  			}
952  		}
953  	}
954  
955  	if (disable == enable)
956  		return -1;
957  
958  	if (!enable)
959  		return -1;
960  
961  	if (disable) {
962  		nca.channel = disable->id;
963  		nca.package = disable->package->id;
964  		nca.type = NCSI_PKT_CMD_DCNT;
965  		ret = ncsi_xmit_cmd(&nca);
966  		if (ret)
967  			netdev_err(ndp->ndev.dev,
968  				   "Error %d sending DCNT\n",
969  				   ret);
970  	}
971  
972  	netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
973  
974  	nca.channel = enable->id;
975  	nca.package = enable->package->id;
976  	nca.type = NCSI_PKT_CMD_ECNT;
977  	ret = ncsi_xmit_cmd(&nca);
978  	if (ret)
979  		netdev_err(ndp->ndev.dev,
980  			   "Error %d sending ECNT\n",
981  			   ret);
982  
983  	return ret;
984  }
985  
ncsi_configure_channel(struct ncsi_dev_priv * ndp)986  static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
987  {
988  	struct ncsi_package *np = ndp->active_package;
989  	struct ncsi_channel *nc = ndp->active_channel;
990  	struct ncsi_channel *hot_nc = NULL;
991  	struct ncsi_dev *nd = &ndp->ndev;
992  	struct net_device *dev = nd->dev;
993  	struct ncsi_cmd_arg nca;
994  	unsigned char index;
995  	unsigned long flags;
996  	int ret;
997  
998  	nca.ndp = ndp;
999  	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1000  	switch (nd->state) {
1001  	case ncsi_dev_state_config:
1002  	case ncsi_dev_state_config_sp:
1003  		ndp->pending_req_num = 1;
1004  
1005  		/* Select the specific package */
1006  		nca.type = NCSI_PKT_CMD_SP;
1007  		if (ndp->flags & NCSI_DEV_HWA)
1008  			nca.bytes[0] = 0;
1009  		else
1010  			nca.bytes[0] = 1;
1011  		nca.package = np->id;
1012  		nca.channel = NCSI_RESERVED_CHANNEL;
1013  		ret = ncsi_xmit_cmd(&nca);
1014  		if (ret) {
1015  			netdev_err(ndp->ndev.dev,
1016  				   "NCSI: Failed to transmit CMD_SP\n");
1017  			goto error;
1018  		}
1019  
1020  		nd->state = ncsi_dev_state_config_cis;
1021  		break;
1022  	case ncsi_dev_state_config_cis:
1023  		ndp->pending_req_num = 1;
1024  
1025  		/* Clear initial state */
1026  		nca.type = NCSI_PKT_CMD_CIS;
1027  		nca.package = np->id;
1028  		nca.channel = nc->id;
1029  		ret = ncsi_xmit_cmd(&nca);
1030  		if (ret) {
1031  			netdev_err(ndp->ndev.dev,
1032  				   "NCSI: Failed to transmit CMD_CIS\n");
1033  			goto error;
1034  		}
1035  
1036  		nd->state = IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
1037  			  ? ncsi_dev_state_config_oem_gma
1038  			  : ncsi_dev_state_config_clear_vids;
1039  		break;
1040  	case ncsi_dev_state_config_oem_gma:
1041  		nd->state = ncsi_dev_state_config_apply_mac;
1042  
1043  		nca.package = np->id;
1044  		nca.channel = nc->id;
1045  		ndp->pending_req_num = 1;
1046  		if (nc->version.major >= 1 && nc->version.minor >= 2) {
1047  			nca.type = NCSI_PKT_CMD_GMCMA;
1048  			ret = ncsi_xmit_cmd(&nca);
1049  		} else {
1050  			nca.type = NCSI_PKT_CMD_OEM;
1051  			ret = ncsi_gma_handler(&nca, nc->version.mf_id);
1052  		}
1053  		if (ret < 0) {
1054  			nd->state = ncsi_dev_state_config_clear_vids;
1055  			schedule_work(&ndp->work);
1056  		}
1057  
1058  		break;
1059  	case ncsi_dev_state_config_apply_mac:
1060  		rtnl_lock();
1061  		ret = dev_set_mac_address(dev, &ndp->pending_mac, NULL);
1062  		rtnl_unlock();
1063  		if (ret < 0)
1064  			netdev_warn(dev, "NCSI: 'Writing MAC address to device failed\n");
1065  
1066  		nd->state = ncsi_dev_state_config_clear_vids;
1067  
1068  		fallthrough;
1069  	case ncsi_dev_state_config_clear_vids:
1070  	case ncsi_dev_state_config_svf:
1071  	case ncsi_dev_state_config_ev:
1072  	case ncsi_dev_state_config_sma:
1073  	case ncsi_dev_state_config_ebf:
1074  	case ncsi_dev_state_config_dgmf:
1075  	case ncsi_dev_state_config_ecnt:
1076  	case ncsi_dev_state_config_ec:
1077  	case ncsi_dev_state_config_ae:
1078  	case ncsi_dev_state_config_gls:
1079  		ndp->pending_req_num = 1;
1080  
1081  		nca.package = np->id;
1082  		nca.channel = nc->id;
1083  
1084  		/* Clear any active filters on the channel before setting */
1085  		if (nd->state == ncsi_dev_state_config_clear_vids) {
1086  			ret = clear_one_vid(ndp, nc, &nca);
1087  			if (ret) {
1088  				nd->state = ncsi_dev_state_config_svf;
1089  				schedule_work(&ndp->work);
1090  				break;
1091  			}
1092  			/* Repeat */
1093  			nd->state = ncsi_dev_state_config_clear_vids;
1094  		/* Add known VLAN tags to the filter */
1095  		} else if (nd->state == ncsi_dev_state_config_svf) {
1096  			ret = set_one_vid(ndp, nc, &nca);
1097  			if (ret) {
1098  				nd->state = ncsi_dev_state_config_ev;
1099  				schedule_work(&ndp->work);
1100  				break;
1101  			}
1102  			/* Repeat */
1103  			nd->state = ncsi_dev_state_config_svf;
1104  		/* Enable/Disable the VLAN filter */
1105  		} else if (nd->state == ncsi_dev_state_config_ev) {
1106  			if (list_empty(&ndp->vlan_vids)) {
1107  				nca.type = NCSI_PKT_CMD_DV;
1108  			} else {
1109  				nca.type = NCSI_PKT_CMD_EV;
1110  				nca.bytes[3] = NCSI_CAP_VLAN_NO;
1111  			}
1112  			nd->state = ncsi_dev_state_config_sma;
1113  		} else if (nd->state == ncsi_dev_state_config_sma) {
1114  		/* Use first entry in unicast filter table. Note that
1115  		 * the MAC filter table starts from entry 1 instead of
1116  		 * 0.
1117  		 */
1118  			nca.type = NCSI_PKT_CMD_SMA;
1119  			for (index = 0; index < 6; index++)
1120  				nca.bytes[index] = dev->dev_addr[index];
1121  			nca.bytes[6] = 0x1;
1122  			nca.bytes[7] = 0x1;
1123  			nd->state = ncsi_dev_state_config_ebf;
1124  		} else if (nd->state == ncsi_dev_state_config_ebf) {
1125  			nca.type = NCSI_PKT_CMD_EBF;
1126  			nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1127  			/* if multicast global filtering is supported then
1128  			 * disable it so that all multicast packet will be
1129  			 * forwarded to management controller
1130  			 */
1131  			if (nc->caps[NCSI_CAP_GENERIC].cap &
1132  			    NCSI_CAP_GENERIC_MC)
1133  				nd->state = ncsi_dev_state_config_dgmf;
1134  			else if (ncsi_channel_is_tx(ndp, nc))
1135  				nd->state = ncsi_dev_state_config_ecnt;
1136  			else
1137  				nd->state = ncsi_dev_state_config_ec;
1138  		} else if (nd->state == ncsi_dev_state_config_dgmf) {
1139  			nca.type = NCSI_PKT_CMD_DGMF;
1140  			if (ncsi_channel_is_tx(ndp, nc))
1141  				nd->state = ncsi_dev_state_config_ecnt;
1142  			else
1143  				nd->state = ncsi_dev_state_config_ec;
1144  		} else if (nd->state == ncsi_dev_state_config_ecnt) {
1145  			if (np->preferred_channel &&
1146  			    nc != np->preferred_channel)
1147  				netdev_info(ndp->ndev.dev,
1148  					    "NCSI: Tx failed over to channel %u\n",
1149  					    nc->id);
1150  			nca.type = NCSI_PKT_CMD_ECNT;
1151  			nd->state = ncsi_dev_state_config_ec;
1152  		} else if (nd->state == ncsi_dev_state_config_ec) {
1153  			/* Enable AEN if it's supported */
1154  			nca.type = NCSI_PKT_CMD_EC;
1155  			nd->state = ncsi_dev_state_config_ae;
1156  			if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1157  				nd->state = ncsi_dev_state_config_gls;
1158  		} else if (nd->state == ncsi_dev_state_config_ae) {
1159  			nca.type = NCSI_PKT_CMD_AE;
1160  			nca.bytes[0] = 0;
1161  			nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1162  			nd->state = ncsi_dev_state_config_gls;
1163  		} else if (nd->state == ncsi_dev_state_config_gls) {
1164  			nca.type = NCSI_PKT_CMD_GLS;
1165  			nd->state = ncsi_dev_state_config_done;
1166  		}
1167  
1168  		ret = ncsi_xmit_cmd(&nca);
1169  		if (ret) {
1170  			netdev_err(ndp->ndev.dev,
1171  				   "NCSI: Failed to transmit CMD %x\n",
1172  				   nca.type);
1173  			goto error;
1174  		}
1175  		break;
1176  	case ncsi_dev_state_config_done:
1177  		netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1178  			   nc->id);
1179  		spin_lock_irqsave(&nc->lock, flags);
1180  		nc->state = NCSI_CHANNEL_ACTIVE;
1181  
1182  		if (ndp->flags & NCSI_DEV_RESET) {
1183  			/* A reset event happened during config, start it now */
1184  			nc->reconfigure_needed = false;
1185  			spin_unlock_irqrestore(&nc->lock, flags);
1186  			ncsi_reset_dev(nd);
1187  			break;
1188  		}
1189  
1190  		if (nc->reconfigure_needed) {
1191  			/* This channel's configuration has been updated
1192  			 * part-way during the config state - start the
1193  			 * channel configuration over
1194  			 */
1195  			nc->reconfigure_needed = false;
1196  			nc->state = NCSI_CHANNEL_INACTIVE;
1197  			spin_unlock_irqrestore(&nc->lock, flags);
1198  
1199  			spin_lock_irqsave(&ndp->lock, flags);
1200  			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1201  			spin_unlock_irqrestore(&ndp->lock, flags);
1202  
1203  			netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1204  			ncsi_process_next_channel(ndp);
1205  			break;
1206  		}
1207  
1208  		if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1209  			hot_nc = nc;
1210  		} else {
1211  			hot_nc = NULL;
1212  			netdev_dbg(ndp->ndev.dev,
1213  				   "NCSI: channel %u link down after config\n",
1214  				   nc->id);
1215  		}
1216  		spin_unlock_irqrestore(&nc->lock, flags);
1217  
1218  		/* Update the hot channel */
1219  		spin_lock_irqsave(&ndp->lock, flags);
1220  		ndp->hot_channel = hot_nc;
1221  		spin_unlock_irqrestore(&ndp->lock, flags);
1222  
1223  		ncsi_start_channel_monitor(nc);
1224  		ncsi_process_next_channel(ndp);
1225  		break;
1226  	default:
1227  		netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1228  			     nd->state);
1229  	}
1230  
1231  	return;
1232  
1233  error:
1234  	ncsi_report_link(ndp, true);
1235  }
1236  
ncsi_choose_active_channel(struct ncsi_dev_priv * ndp)1237  static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1238  {
1239  	struct ncsi_channel *nc, *found, *hot_nc;
1240  	struct ncsi_channel_mode *ncm;
1241  	unsigned long flags, cflags;
1242  	struct ncsi_package *np;
1243  	bool with_link;
1244  
1245  	spin_lock_irqsave(&ndp->lock, flags);
1246  	hot_nc = ndp->hot_channel;
1247  	spin_unlock_irqrestore(&ndp->lock, flags);
1248  
1249  	/* By default the search is done once an inactive channel with up
1250  	 * link is found, unless a preferred channel is set.
1251  	 * If multi_package or multi_channel are configured all channels in the
1252  	 * whitelist are added to the channel queue.
1253  	 */
1254  	found = NULL;
1255  	with_link = false;
1256  	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1257  		if (!(ndp->package_whitelist & (0x1 << np->id)))
1258  			continue;
1259  		NCSI_FOR_EACH_CHANNEL(np, nc) {
1260  			if (!(np->channel_whitelist & (0x1 << nc->id)))
1261  				continue;
1262  
1263  			spin_lock_irqsave(&nc->lock, cflags);
1264  
1265  			if (!list_empty(&nc->link) ||
1266  			    nc->state != NCSI_CHANNEL_INACTIVE) {
1267  				spin_unlock_irqrestore(&nc->lock, cflags);
1268  				continue;
1269  			}
1270  
1271  			if (!found)
1272  				found = nc;
1273  
1274  			if (nc == hot_nc)
1275  				found = nc;
1276  
1277  			ncm = &nc->modes[NCSI_MODE_LINK];
1278  			if (ncm->data[2] & 0x1) {
1279  				found = nc;
1280  				with_link = true;
1281  			}
1282  
1283  			/* If multi_channel is enabled configure all valid
1284  			 * channels whether or not they currently have link
1285  			 * so they will have AENs enabled.
1286  			 */
1287  			if (with_link || np->multi_channel) {
1288  				spin_lock_irqsave(&ndp->lock, flags);
1289  				list_add_tail_rcu(&nc->link,
1290  						  &ndp->channel_queue);
1291  				spin_unlock_irqrestore(&ndp->lock, flags);
1292  
1293  				netdev_dbg(ndp->ndev.dev,
1294  					   "NCSI: Channel %u added to queue (link %s)\n",
1295  					   nc->id,
1296  					   ncm->data[2] & 0x1 ? "up" : "down");
1297  			}
1298  
1299  			spin_unlock_irqrestore(&nc->lock, cflags);
1300  
1301  			if (with_link && !np->multi_channel)
1302  				break;
1303  		}
1304  		if (with_link && !ndp->multi_package)
1305  			break;
1306  	}
1307  
1308  	if (list_empty(&ndp->channel_queue) && found) {
1309  		netdev_info(ndp->ndev.dev,
1310  			    "NCSI: No channel with link found, configuring channel %u\n",
1311  			    found->id);
1312  		spin_lock_irqsave(&ndp->lock, flags);
1313  		list_add_tail_rcu(&found->link, &ndp->channel_queue);
1314  		spin_unlock_irqrestore(&ndp->lock, flags);
1315  	} else if (!found) {
1316  		netdev_warn(ndp->ndev.dev,
1317  			    "NCSI: No channel found to configure!\n");
1318  		ncsi_report_link(ndp, true);
1319  		return -ENODEV;
1320  	}
1321  
1322  	return ncsi_process_next_channel(ndp);
1323  }
1324  
ncsi_check_hwa(struct ncsi_dev_priv * ndp)1325  static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1326  {
1327  	struct ncsi_package *np;
1328  	struct ncsi_channel *nc;
1329  	unsigned int cap;
1330  	bool has_channel = false;
1331  
1332  	/* The hardware arbitration is disabled if any one channel
1333  	 * doesn't support explicitly.
1334  	 */
1335  	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1336  		NCSI_FOR_EACH_CHANNEL(np, nc) {
1337  			has_channel = true;
1338  
1339  			cap = nc->caps[NCSI_CAP_GENERIC].cap;
1340  			if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1341  			    (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1342  			    NCSI_CAP_GENERIC_HWA_SUPPORT) {
1343  				ndp->flags &= ~NCSI_DEV_HWA;
1344  				return false;
1345  			}
1346  		}
1347  	}
1348  
1349  	if (has_channel) {
1350  		ndp->flags |= NCSI_DEV_HWA;
1351  		return true;
1352  	}
1353  
1354  	ndp->flags &= ~NCSI_DEV_HWA;
1355  	return false;
1356  }
1357  
ncsi_probe_channel(struct ncsi_dev_priv * ndp)1358  static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1359  {
1360  	struct ncsi_dev *nd = &ndp->ndev;
1361  	struct ncsi_package *np;
1362  	struct ncsi_cmd_arg nca;
1363  	unsigned char index;
1364  	int ret;
1365  
1366  	nca.ndp = ndp;
1367  	nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1368  	switch (nd->state) {
1369  	case ncsi_dev_state_probe:
1370  		nd->state = ncsi_dev_state_probe_deselect;
1371  		fallthrough;
1372  	case ncsi_dev_state_probe_deselect:
1373  		ndp->pending_req_num = 8;
1374  
1375  		/* Deselect all possible packages */
1376  		nca.type = NCSI_PKT_CMD_DP;
1377  		nca.channel = NCSI_RESERVED_CHANNEL;
1378  		for (index = 0; index < 8; index++) {
1379  			nca.package = index;
1380  			ret = ncsi_xmit_cmd(&nca);
1381  			if (ret)
1382  				goto error;
1383  		}
1384  
1385  		nd->state = ncsi_dev_state_probe_package;
1386  		break;
1387  	case ncsi_dev_state_probe_package:
1388  		if (ndp->package_probe_id >= 8) {
1389  			/* Last package probed, finishing */
1390  			ndp->flags |= NCSI_DEV_PROBED;
1391  			break;
1392  		}
1393  
1394  		ndp->pending_req_num = 1;
1395  
1396  		nca.type = NCSI_PKT_CMD_SP;
1397  		nca.bytes[0] = 1;
1398  		nca.package = ndp->package_probe_id;
1399  		nca.channel = NCSI_RESERVED_CHANNEL;
1400  		ret = ncsi_xmit_cmd(&nca);
1401  		if (ret)
1402  			goto error;
1403  		nd->state = ncsi_dev_state_probe_channel;
1404  		break;
1405  	case ncsi_dev_state_probe_channel:
1406  		ndp->active_package = ncsi_find_package(ndp,
1407  							ndp->package_probe_id);
1408  		if (!ndp->active_package) {
1409  			/* No response */
1410  			nd->state = ncsi_dev_state_probe_dp;
1411  			schedule_work(&ndp->work);
1412  			break;
1413  		}
1414  		nd->state = ncsi_dev_state_probe_cis;
1415  		if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
1416  		    ndp->mlx_multi_host)
1417  			nd->state = ncsi_dev_state_probe_mlx_gma;
1418  
1419  		schedule_work(&ndp->work);
1420  		break;
1421  	case ncsi_dev_state_probe_mlx_gma:
1422  		ndp->pending_req_num = 1;
1423  
1424  		nca.type = NCSI_PKT_CMD_OEM;
1425  		nca.package = ndp->active_package->id;
1426  		nca.channel = 0;
1427  		ret = ncsi_oem_gma_handler_mlx(&nca);
1428  		if (ret)
1429  			goto error;
1430  
1431  		nd->state = ncsi_dev_state_probe_mlx_smaf;
1432  		break;
1433  	case ncsi_dev_state_probe_mlx_smaf:
1434  		ndp->pending_req_num = 1;
1435  
1436  		nca.type = NCSI_PKT_CMD_OEM;
1437  		nca.package = ndp->active_package->id;
1438  		nca.channel = 0;
1439  		ret = ncsi_oem_smaf_mlx(&nca);
1440  		if (ret)
1441  			goto error;
1442  
1443  		nd->state = ncsi_dev_state_probe_cis;
1444  		break;
1445  	case ncsi_dev_state_probe_keep_phy:
1446  		ndp->pending_req_num = 1;
1447  
1448  		nca.type = NCSI_PKT_CMD_OEM;
1449  		nca.package = ndp->active_package->id;
1450  		nca.channel = 0;
1451  		ret = ncsi_oem_keep_phy_intel(&nca);
1452  		if (ret)
1453  			goto error;
1454  
1455  		nd->state = ncsi_dev_state_probe_gvi;
1456  		break;
1457  	case ncsi_dev_state_probe_cis:
1458  	case ncsi_dev_state_probe_gvi:
1459  	case ncsi_dev_state_probe_gc:
1460  	case ncsi_dev_state_probe_gls:
1461  		np = ndp->active_package;
1462  		ndp->pending_req_num = 1;
1463  
1464  		/* Clear initial state Retrieve version, capability or link status */
1465  		if (nd->state == ncsi_dev_state_probe_cis)
1466  			nca.type = NCSI_PKT_CMD_CIS;
1467  		else if (nd->state == ncsi_dev_state_probe_gvi)
1468  			nca.type = NCSI_PKT_CMD_GVI;
1469  		else if (nd->state == ncsi_dev_state_probe_gc)
1470  			nca.type = NCSI_PKT_CMD_GC;
1471  		else
1472  			nca.type = NCSI_PKT_CMD_GLS;
1473  
1474  		nca.package = np->id;
1475  		nca.channel = ndp->channel_probe_id;
1476  
1477  		ret = ncsi_xmit_cmd(&nca);
1478  		if (ret)
1479  			goto error;
1480  
1481  		if (nd->state == ncsi_dev_state_probe_cis) {
1482  			nd->state = ncsi_dev_state_probe_gvi;
1483  			if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
1484  				nd->state = ncsi_dev_state_probe_keep_phy;
1485  		} else if (nd->state == ncsi_dev_state_probe_gvi) {
1486  			nd->state = ncsi_dev_state_probe_gc;
1487  		} else if (nd->state == ncsi_dev_state_probe_gc) {
1488  			nd->state = ncsi_dev_state_probe_gls;
1489  		} else {
1490  			nd->state = ncsi_dev_state_probe_cis;
1491  			ndp->channel_probe_id++;
1492  		}
1493  
1494  		if (ndp->channel_probe_id == ndp->channel_count) {
1495  			ndp->channel_probe_id = 0;
1496  			nd->state = ncsi_dev_state_probe_dp;
1497  		}
1498  		break;
1499  	case ncsi_dev_state_probe_dp:
1500  		ndp->pending_req_num = 1;
1501  
1502  		/* Deselect the current package */
1503  		nca.type = NCSI_PKT_CMD_DP;
1504  		nca.package = ndp->package_probe_id;
1505  		nca.channel = NCSI_RESERVED_CHANNEL;
1506  		ret = ncsi_xmit_cmd(&nca);
1507  		if (ret)
1508  			goto error;
1509  
1510  		/* Probe next package after receiving response */
1511  		ndp->package_probe_id++;
1512  		nd->state = ncsi_dev_state_probe_package;
1513  		ndp->active_package = NULL;
1514  		break;
1515  	default:
1516  		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1517  			    nd->state);
1518  	}
1519  
1520  	if (ndp->flags & NCSI_DEV_PROBED) {
1521  		/* Check if all packages have HWA support */
1522  		ncsi_check_hwa(ndp);
1523  		ncsi_choose_active_channel(ndp);
1524  	}
1525  
1526  	return;
1527  error:
1528  	netdev_err(ndp->ndev.dev,
1529  		   "NCSI: Failed to transmit cmd 0x%x during probe\n",
1530  		   nca.type);
1531  	ncsi_report_link(ndp, true);
1532  }
1533  
ncsi_dev_work(struct work_struct * work)1534  static void ncsi_dev_work(struct work_struct *work)
1535  {
1536  	struct ncsi_dev_priv *ndp = container_of(work,
1537  			struct ncsi_dev_priv, work);
1538  	struct ncsi_dev *nd = &ndp->ndev;
1539  
1540  	switch (nd->state & ncsi_dev_state_major) {
1541  	case ncsi_dev_state_probe:
1542  		ncsi_probe_channel(ndp);
1543  		break;
1544  	case ncsi_dev_state_suspend:
1545  		ncsi_suspend_channel(ndp);
1546  		break;
1547  	case ncsi_dev_state_config:
1548  		ncsi_configure_channel(ndp);
1549  		break;
1550  	default:
1551  		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1552  			    nd->state);
1553  	}
1554  }
1555  
ncsi_process_next_channel(struct ncsi_dev_priv * ndp)1556  int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1557  {
1558  	struct ncsi_channel *nc;
1559  	int old_state;
1560  	unsigned long flags;
1561  
1562  	spin_lock_irqsave(&ndp->lock, flags);
1563  	nc = list_first_or_null_rcu(&ndp->channel_queue,
1564  				    struct ncsi_channel, link);
1565  	if (!nc) {
1566  		spin_unlock_irqrestore(&ndp->lock, flags);
1567  		goto out;
1568  	}
1569  
1570  	list_del_init(&nc->link);
1571  	spin_unlock_irqrestore(&ndp->lock, flags);
1572  
1573  	spin_lock_irqsave(&nc->lock, flags);
1574  	old_state = nc->state;
1575  	nc->state = NCSI_CHANNEL_INVISIBLE;
1576  	spin_unlock_irqrestore(&nc->lock, flags);
1577  
1578  	ndp->active_channel = nc;
1579  	ndp->active_package = nc->package;
1580  
1581  	switch (old_state) {
1582  	case NCSI_CHANNEL_INACTIVE:
1583  		ndp->ndev.state = ncsi_dev_state_config;
1584  		netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1585  	                   nc->id);
1586  		ncsi_configure_channel(ndp);
1587  		break;
1588  	case NCSI_CHANNEL_ACTIVE:
1589  		ndp->ndev.state = ncsi_dev_state_suspend;
1590  		netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1591  			   nc->id);
1592  		ncsi_suspend_channel(ndp);
1593  		break;
1594  	default:
1595  		netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1596  			   old_state, nc->package->id, nc->id);
1597  		ncsi_report_link(ndp, false);
1598  		return -EINVAL;
1599  	}
1600  
1601  	return 0;
1602  
1603  out:
1604  	ndp->active_channel = NULL;
1605  	ndp->active_package = NULL;
1606  	if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1607  		ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1608  		return ncsi_choose_active_channel(ndp);
1609  	}
1610  
1611  	ncsi_report_link(ndp, false);
1612  	return -ENODEV;
1613  }
1614  
ncsi_kick_channels(struct ncsi_dev_priv * ndp)1615  static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1616  {
1617  	struct ncsi_dev *nd = &ndp->ndev;
1618  	struct ncsi_channel *nc;
1619  	struct ncsi_package *np;
1620  	unsigned long flags;
1621  	unsigned int n = 0;
1622  
1623  	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1624  		NCSI_FOR_EACH_CHANNEL(np, nc) {
1625  			spin_lock_irqsave(&nc->lock, flags);
1626  
1627  			/* Channels may be busy, mark dirty instead of
1628  			 * kicking if;
1629  			 * a) not ACTIVE (configured)
1630  			 * b) in the channel_queue (to be configured)
1631  			 * c) it's ndev is in the config state
1632  			 */
1633  			if (nc->state != NCSI_CHANNEL_ACTIVE) {
1634  				if ((ndp->ndev.state & 0xff00) ==
1635  						ncsi_dev_state_config ||
1636  						!list_empty(&nc->link)) {
1637  					netdev_dbg(nd->dev,
1638  						   "NCSI: channel %p marked dirty\n",
1639  						   nc);
1640  					nc->reconfigure_needed = true;
1641  				}
1642  				spin_unlock_irqrestore(&nc->lock, flags);
1643  				continue;
1644  			}
1645  
1646  			spin_unlock_irqrestore(&nc->lock, flags);
1647  
1648  			ncsi_stop_channel_monitor(nc);
1649  			spin_lock_irqsave(&nc->lock, flags);
1650  			nc->state = NCSI_CHANNEL_INACTIVE;
1651  			spin_unlock_irqrestore(&nc->lock, flags);
1652  
1653  			spin_lock_irqsave(&ndp->lock, flags);
1654  			list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1655  			spin_unlock_irqrestore(&ndp->lock, flags);
1656  
1657  			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1658  			n++;
1659  		}
1660  	}
1661  
1662  	return n;
1663  }
1664  
ncsi_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1665  int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1666  {
1667  	struct ncsi_dev_priv *ndp;
1668  	unsigned int n_vids = 0;
1669  	struct vlan_vid *vlan;
1670  	struct ncsi_dev *nd;
1671  	bool found = false;
1672  
1673  	if (vid == 0)
1674  		return 0;
1675  
1676  	nd = ncsi_find_dev(dev);
1677  	if (!nd) {
1678  		netdev_warn(dev, "NCSI: No net_device?\n");
1679  		return 0;
1680  	}
1681  
1682  	ndp = TO_NCSI_DEV_PRIV(nd);
1683  
1684  	/* Add the VLAN id to our internal list */
1685  	list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1686  		n_vids++;
1687  		if (vlan->vid == vid) {
1688  			netdev_dbg(dev, "NCSI: vid %u already registered\n",
1689  				   vid);
1690  			return 0;
1691  		}
1692  	}
1693  	if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1694  		netdev_warn(dev,
1695  			    "tried to add vlan id %u but NCSI max already registered (%u)\n",
1696  			    vid, NCSI_MAX_VLAN_VIDS);
1697  		return -ENOSPC;
1698  	}
1699  
1700  	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1701  	if (!vlan)
1702  		return -ENOMEM;
1703  
1704  	vlan->proto = proto;
1705  	vlan->vid = vid;
1706  	list_add_rcu(&vlan->list, &ndp->vlan_vids);
1707  
1708  	netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1709  
1710  	found = ncsi_kick_channels(ndp) != 0;
1711  
1712  	return found ? ncsi_process_next_channel(ndp) : 0;
1713  }
1714  EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1715  
ncsi_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1716  int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1717  {
1718  	struct vlan_vid *vlan, *tmp;
1719  	struct ncsi_dev_priv *ndp;
1720  	struct ncsi_dev *nd;
1721  	bool found = false;
1722  
1723  	if (vid == 0)
1724  		return 0;
1725  
1726  	nd = ncsi_find_dev(dev);
1727  	if (!nd) {
1728  		netdev_warn(dev, "NCSI: no net_device?\n");
1729  		return 0;
1730  	}
1731  
1732  	ndp = TO_NCSI_DEV_PRIV(nd);
1733  
1734  	/* Remove the VLAN id from our internal list */
1735  	list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1736  		if (vlan->vid == vid) {
1737  			netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1738  			list_del_rcu(&vlan->list);
1739  			found = true;
1740  			kfree(vlan);
1741  		}
1742  
1743  	if (!found) {
1744  		netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1745  		return -EINVAL;
1746  	}
1747  
1748  	found = ncsi_kick_channels(ndp) != 0;
1749  
1750  	return found ? ncsi_process_next_channel(ndp) : 0;
1751  }
1752  EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1753  
ncsi_register_dev(struct net_device * dev,void (* handler)(struct ncsi_dev * ndev))1754  struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1755  				   void (*handler)(struct ncsi_dev *ndev))
1756  {
1757  	struct ncsi_dev_priv *ndp;
1758  	struct ncsi_dev *nd;
1759  	struct platform_device *pdev;
1760  	struct device_node *np;
1761  	unsigned long flags;
1762  	int i;
1763  
1764  	/* Check if the device has been registered or not */
1765  	nd = ncsi_find_dev(dev);
1766  	if (nd)
1767  		return nd;
1768  
1769  	/* Create NCSI device */
1770  	ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1771  	if (!ndp)
1772  		return NULL;
1773  
1774  	nd = &ndp->ndev;
1775  	nd->state = ncsi_dev_state_registered;
1776  	nd->dev = dev;
1777  	nd->handler = handler;
1778  	ndp->pending_req_num = 0;
1779  	INIT_LIST_HEAD(&ndp->channel_queue);
1780  	INIT_LIST_HEAD(&ndp->vlan_vids);
1781  	INIT_WORK(&ndp->work, ncsi_dev_work);
1782  	ndp->package_whitelist = UINT_MAX;
1783  
1784  	/* Initialize private NCSI device */
1785  	spin_lock_init(&ndp->lock);
1786  	INIT_LIST_HEAD(&ndp->packages);
1787  	ndp->request_id = NCSI_REQ_START_IDX;
1788  	for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1789  		ndp->requests[i].id = i;
1790  		ndp->requests[i].ndp = ndp;
1791  		timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1792  	}
1793  	ndp->channel_count = NCSI_RESERVED_CHANNEL;
1794  
1795  	spin_lock_irqsave(&ncsi_dev_lock, flags);
1796  	list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1797  	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1798  
1799  	/* Register NCSI packet Rx handler */
1800  	ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1801  	ndp->ptype.func = ncsi_rcv_rsp;
1802  	ndp->ptype.dev = dev;
1803  	dev_add_pack(&ndp->ptype);
1804  
1805  	pdev = to_platform_device(dev->dev.parent);
1806  	if (pdev) {
1807  		np = pdev->dev.of_node;
1808  		if (np && (of_property_read_bool(np, "mellanox,multi-host") ||
1809  			   of_property_read_bool(np, "mlx,multi-host")))
1810  			ndp->mlx_multi_host = true;
1811  	}
1812  
1813  	return nd;
1814  }
1815  EXPORT_SYMBOL_GPL(ncsi_register_dev);
1816  
ncsi_start_dev(struct ncsi_dev * nd)1817  int ncsi_start_dev(struct ncsi_dev *nd)
1818  {
1819  	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1820  
1821  	if (nd->state != ncsi_dev_state_registered &&
1822  	    nd->state != ncsi_dev_state_functional)
1823  		return -ENOTTY;
1824  
1825  	if (!(ndp->flags & NCSI_DEV_PROBED)) {
1826  		ndp->package_probe_id = 0;
1827  		ndp->channel_probe_id = 0;
1828  		nd->state = ncsi_dev_state_probe;
1829  		schedule_work(&ndp->work);
1830  		return 0;
1831  	}
1832  
1833  	return ncsi_reset_dev(nd);
1834  }
1835  EXPORT_SYMBOL_GPL(ncsi_start_dev);
1836  
ncsi_stop_dev(struct ncsi_dev * nd)1837  void ncsi_stop_dev(struct ncsi_dev *nd)
1838  {
1839  	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1840  	struct ncsi_package *np;
1841  	struct ncsi_channel *nc;
1842  	bool chained;
1843  	int old_state;
1844  	unsigned long flags;
1845  
1846  	/* Stop the channel monitor on any active channels. Don't reset the
1847  	 * channel state so we know which were active when ncsi_start_dev()
1848  	 * is next called.
1849  	 */
1850  	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1851  		NCSI_FOR_EACH_CHANNEL(np, nc) {
1852  			ncsi_stop_channel_monitor(nc);
1853  
1854  			spin_lock_irqsave(&nc->lock, flags);
1855  			chained = !list_empty(&nc->link);
1856  			old_state = nc->state;
1857  			spin_unlock_irqrestore(&nc->lock, flags);
1858  
1859  			WARN_ON_ONCE(chained ||
1860  				     old_state == NCSI_CHANNEL_INVISIBLE);
1861  		}
1862  	}
1863  
1864  	netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1865  	ncsi_report_link(ndp, true);
1866  }
1867  EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1868  
ncsi_reset_dev(struct ncsi_dev * nd)1869  int ncsi_reset_dev(struct ncsi_dev *nd)
1870  {
1871  	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1872  	struct ncsi_channel *nc, *active, *tmp;
1873  	struct ncsi_package *np;
1874  	unsigned long flags;
1875  
1876  	spin_lock_irqsave(&ndp->lock, flags);
1877  
1878  	if (!(ndp->flags & NCSI_DEV_RESET)) {
1879  		/* Haven't been called yet, check states */
1880  		switch (nd->state & ncsi_dev_state_major) {
1881  		case ncsi_dev_state_registered:
1882  		case ncsi_dev_state_probe:
1883  			/* Not even probed yet - do nothing */
1884  			spin_unlock_irqrestore(&ndp->lock, flags);
1885  			return 0;
1886  		case ncsi_dev_state_suspend:
1887  		case ncsi_dev_state_config:
1888  			/* Wait for the channel to finish its suspend/config
1889  			 * operation; once it finishes it will check for
1890  			 * NCSI_DEV_RESET and reset the state.
1891  			 */
1892  			ndp->flags |= NCSI_DEV_RESET;
1893  			spin_unlock_irqrestore(&ndp->lock, flags);
1894  			return 0;
1895  		}
1896  	} else {
1897  		switch (nd->state) {
1898  		case ncsi_dev_state_suspend_done:
1899  		case ncsi_dev_state_config_done:
1900  		case ncsi_dev_state_functional:
1901  			/* Ok */
1902  			break;
1903  		default:
1904  			/* Current reset operation happening */
1905  			spin_unlock_irqrestore(&ndp->lock, flags);
1906  			return 0;
1907  		}
1908  	}
1909  
1910  	if (!list_empty(&ndp->channel_queue)) {
1911  		/* Clear any channel queue we may have interrupted */
1912  		list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1913  			list_del_init(&nc->link);
1914  	}
1915  	spin_unlock_irqrestore(&ndp->lock, flags);
1916  
1917  	active = NULL;
1918  	NCSI_FOR_EACH_PACKAGE(ndp, np) {
1919  		NCSI_FOR_EACH_CHANNEL(np, nc) {
1920  			spin_lock_irqsave(&nc->lock, flags);
1921  
1922  			if (nc->state == NCSI_CHANNEL_ACTIVE) {
1923  				active = nc;
1924  				nc->state = NCSI_CHANNEL_INVISIBLE;
1925  				spin_unlock_irqrestore(&nc->lock, flags);
1926  				ncsi_stop_channel_monitor(nc);
1927  				break;
1928  			}
1929  
1930  			spin_unlock_irqrestore(&nc->lock, flags);
1931  		}
1932  		if (active)
1933  			break;
1934  	}
1935  
1936  	if (!active) {
1937  		/* Done */
1938  		spin_lock_irqsave(&ndp->lock, flags);
1939  		ndp->flags &= ~NCSI_DEV_RESET;
1940  		spin_unlock_irqrestore(&ndp->lock, flags);
1941  		return ncsi_choose_active_channel(ndp);
1942  	}
1943  
1944  	spin_lock_irqsave(&ndp->lock, flags);
1945  	ndp->flags |= NCSI_DEV_RESET;
1946  	ndp->active_channel = active;
1947  	ndp->active_package = active->package;
1948  	spin_unlock_irqrestore(&ndp->lock, flags);
1949  
1950  	nd->state = ncsi_dev_state_suspend;
1951  	schedule_work(&ndp->work);
1952  	return 0;
1953  }
1954  
ncsi_unregister_dev(struct ncsi_dev * nd)1955  void ncsi_unregister_dev(struct ncsi_dev *nd)
1956  {
1957  	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1958  	struct ncsi_package *np, *tmp;
1959  	unsigned long flags;
1960  
1961  	dev_remove_pack(&ndp->ptype);
1962  
1963  	list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1964  		ncsi_remove_package(np);
1965  
1966  	spin_lock_irqsave(&ncsi_dev_lock, flags);
1967  	list_del_rcu(&ndp->node);
1968  	spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1969  
1970  	kfree(ndp);
1971  }
1972  EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
1973