196de2506SJakub Kicinski // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
296de2506SJakub Kicinski /* Copyright (C) 2018 Netronome Systems, Inc. */
3bb9a8d03SJohn Hurley 
4bb9a8d03SJohn Hurley #include "main.h"
5bb9a8d03SJohn Hurley 
6bb9a8d03SJohn Hurley /* LAG group config flags. */
7bb9a8d03SJohn Hurley #define NFP_FL_LAG_LAST			BIT(1)
8bb9a8d03SJohn Hurley #define NFP_FL_LAG_FIRST		BIT(2)
92e1cc522SJohn Hurley #define NFP_FL_LAG_DATA			BIT(3)
102e1cc522SJohn Hurley #define NFP_FL_LAG_XON			BIT(4)
112e1cc522SJohn Hurley #define NFP_FL_LAG_SYNC			BIT(5)
12bb9a8d03SJohn Hurley #define NFP_FL_LAG_SWITCH		BIT(6)
13bb9a8d03SJohn Hurley #define NFP_FL_LAG_RESET		BIT(7)
14bb9a8d03SJohn Hurley 
15bb9a8d03SJohn Hurley /* LAG port state flags. */
16bb9a8d03SJohn Hurley #define NFP_PORT_LAG_LINK_UP		BIT(0)
17bb9a8d03SJohn Hurley #define NFP_PORT_LAG_TX_ENABLED		BIT(1)
18bb9a8d03SJohn Hurley #define NFP_PORT_LAG_CHANGED		BIT(2)
19bb9a8d03SJohn Hurley 
20bb9a8d03SJohn Hurley enum nfp_fl_lag_batch {
21bb9a8d03SJohn Hurley 	NFP_FL_LAG_BATCH_FIRST,
22bb9a8d03SJohn Hurley 	NFP_FL_LAG_BATCH_MEMBER,
23bb9a8d03SJohn Hurley 	NFP_FL_LAG_BATCH_FINISHED
24bb9a8d03SJohn Hurley };
25bb9a8d03SJohn Hurley 
26bb9a8d03SJohn Hurley /**
27bb9a8d03SJohn Hurley  * struct nfp_flower_cmsg_lag_config - control message payload for LAG config
28bb9a8d03SJohn Hurley  * @ctrl_flags:	Configuration flags
29bb9a8d03SJohn Hurley  * @reserved:	Reserved for future use
30bb9a8d03SJohn Hurley  * @ttl:	Time to live of packet - host always sets to 0xff
31bb9a8d03SJohn Hurley  * @pkt_number:	Config message packet number - increment for each message
32bb9a8d03SJohn Hurley  * @batch_ver:	Batch version of messages - increment for each batch of messages
33bb9a8d03SJohn Hurley  * @group_id:	Group ID applicable
34bb9a8d03SJohn Hurley  * @group_inst:	Group instance number - increment when group is reused
35bb9a8d03SJohn Hurley  * @members:	Array of 32-bit words listing all active group members
36bb9a8d03SJohn Hurley  */
37bb9a8d03SJohn Hurley struct nfp_flower_cmsg_lag_config {
38bb9a8d03SJohn Hurley 	u8 ctrl_flags;
39bb9a8d03SJohn Hurley 	u8 reserved[2];
40bb9a8d03SJohn Hurley 	u8 ttl;
41bb9a8d03SJohn Hurley 	__be32 pkt_number;
42bb9a8d03SJohn Hurley 	__be32 batch_ver;
43bb9a8d03SJohn Hurley 	__be32 group_id;
44bb9a8d03SJohn Hurley 	__be32 group_inst;
45bb9a8d03SJohn Hurley 	__be32 members[];
46bb9a8d03SJohn Hurley };
47bb9a8d03SJohn Hurley 
48bb9a8d03SJohn Hurley /**
49bb9a8d03SJohn Hurley  * struct nfp_fl_lag_group - list entry for each LAG group
50bb9a8d03SJohn Hurley  * @group_id:		Assigned group ID for host/kernel sync
51bb9a8d03SJohn Hurley  * @group_inst:		Group instance in case of ID reuse
52bb9a8d03SJohn Hurley  * @list:		List entry
53bb9a8d03SJohn Hurley  * @master_ndev:	Group master Netdev
54bb9a8d03SJohn Hurley  * @dirty:		Marked if the group needs synced to HW
55bb9a8d03SJohn Hurley  * @offloaded:		Marked if the group is currently offloaded to NIC
56bb9a8d03SJohn Hurley  * @to_remove:		Marked if the group should be removed from NIC
57bb9a8d03SJohn Hurley  * @to_destroy:		Marked if the group should be removed from driver
58bb9a8d03SJohn Hurley  * @slave_cnt:		Number of slaves in group
59bb9a8d03SJohn Hurley  */
60bb9a8d03SJohn Hurley struct nfp_fl_lag_group {
61bb9a8d03SJohn Hurley 	unsigned int group_id;
62bb9a8d03SJohn Hurley 	u8 group_inst;
63bb9a8d03SJohn Hurley 	struct list_head list;
64bb9a8d03SJohn Hurley 	struct net_device *master_ndev;
65bb9a8d03SJohn Hurley 	bool dirty;
66bb9a8d03SJohn Hurley 	bool offloaded;
67bb9a8d03SJohn Hurley 	bool to_remove;
68bb9a8d03SJohn Hurley 	bool to_destroy;
69bb9a8d03SJohn Hurley 	unsigned int slave_cnt;
70bb9a8d03SJohn Hurley };
71bb9a8d03SJohn Hurley 
72bb9a8d03SJohn Hurley #define NFP_FL_LAG_PKT_NUMBER_MASK	GENMASK(30, 0)
73bb9a8d03SJohn Hurley #define NFP_FL_LAG_VERSION_MASK		GENMASK(22, 0)
74bb9a8d03SJohn Hurley #define NFP_FL_LAG_HOST_TTL		0xff
75bb9a8d03SJohn Hurley 
76bb9a8d03SJohn Hurley /* Use this ID with zero members to ack a batch config */
77bb9a8d03SJohn Hurley #define NFP_FL_LAG_SYNC_ID		0
78bb9a8d03SJohn Hurley #define NFP_FL_LAG_GROUP_MIN		1 /* ID 0 reserved */
79bb9a8d03SJohn Hurley #define NFP_FL_LAG_GROUP_MAX		32 /* IDs 1 to 31 are valid */
80bb9a8d03SJohn Hurley 
81bb9a8d03SJohn Hurley /* wait for more config */
82bb9a8d03SJohn Hurley #define NFP_FL_LAG_DELAY		(msecs_to_jiffies(2))
83bb9a8d03SJohn Hurley 
842e1cc522SJohn Hurley #define NFP_FL_LAG_RETRANS_LIMIT	100 /* max retrans cmsgs to store */
852e1cc522SJohn Hurley 
nfp_fl_get_next_pkt_number(struct nfp_fl_lag * lag)86bb9a8d03SJohn Hurley static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
87bb9a8d03SJohn Hurley {
88bb9a8d03SJohn Hurley 	lag->pkt_num++;
89bb9a8d03SJohn Hurley 	lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK;
90bb9a8d03SJohn Hurley 
91bb9a8d03SJohn Hurley 	return lag->pkt_num;
92bb9a8d03SJohn Hurley }
93bb9a8d03SJohn Hurley 
nfp_fl_increment_version(struct nfp_fl_lag * lag)94bb9a8d03SJohn Hurley static void nfp_fl_increment_version(struct nfp_fl_lag *lag)
95bb9a8d03SJohn Hurley {
96bb9a8d03SJohn Hurley 	/* LSB is not considered by firmware so add 2 for each increment. */
97bb9a8d03SJohn Hurley 	lag->batch_ver += 2;
98bb9a8d03SJohn Hurley 	lag->batch_ver &= NFP_FL_LAG_VERSION_MASK;
99bb9a8d03SJohn Hurley 
100bb9a8d03SJohn Hurley 	/* Zero is reserved by firmware. */
101bb9a8d03SJohn Hurley 	if (!lag->batch_ver)
102bb9a8d03SJohn Hurley 		lag->batch_ver += 2;
103bb9a8d03SJohn Hurley }
104bb9a8d03SJohn Hurley 
105bb9a8d03SJohn Hurley static struct nfp_fl_lag_group *
nfp_fl_lag_group_create(struct nfp_fl_lag * lag,struct net_device * master)106bb9a8d03SJohn Hurley nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
107bb9a8d03SJohn Hurley {
108bb9a8d03SJohn Hurley 	struct nfp_fl_lag_group *group;
109bb9a8d03SJohn Hurley 	struct nfp_flower_priv *priv;
110bb9a8d03SJohn Hurley 	int id;
111bb9a8d03SJohn Hurley 
112bb9a8d03SJohn Hurley 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
113bb9a8d03SJohn Hurley 
114bb9a8d03SJohn Hurley 	id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
115bb9a8d03SJohn Hurley 			    NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
116bb9a8d03SJohn Hurley 	if (id < 0) {
117bb9a8d03SJohn Hurley 		nfp_flower_cmsg_warn(priv->app,
118bb9a8d03SJohn Hurley 				     "No more bonding groups available\n");
119bb9a8d03SJohn Hurley 		return ERR_PTR(id);
120bb9a8d03SJohn Hurley 	}
121bb9a8d03SJohn Hurley 
122bb9a8d03SJohn Hurley 	group = kmalloc(sizeof(*group), GFP_KERNEL);
123bb9a8d03SJohn Hurley 	if (!group) {
124bb9a8d03SJohn Hurley 		ida_simple_remove(&lag->ida_handle, id);
125bb9a8d03SJohn Hurley 		return ERR_PTR(-ENOMEM);
126bb9a8d03SJohn Hurley 	}
127bb9a8d03SJohn Hurley 
128bb9a8d03SJohn Hurley 	group->group_id = id;
129bb9a8d03SJohn Hurley 	group->master_ndev = master;
130bb9a8d03SJohn Hurley 	group->dirty = true;
131bb9a8d03SJohn Hurley 	group->offloaded = false;
132bb9a8d03SJohn Hurley 	group->to_remove = false;
133bb9a8d03SJohn Hurley 	group->to_destroy = false;
134bb9a8d03SJohn Hurley 	group->slave_cnt = 0;
135bb9a8d03SJohn Hurley 	group->group_inst = ++lag->global_inst;
136bb9a8d03SJohn Hurley 	list_add_tail(&group->list, &lag->group_list);
137bb9a8d03SJohn Hurley 
138bb9a8d03SJohn Hurley 	return group;
139bb9a8d03SJohn Hurley }
140bb9a8d03SJohn Hurley 
141bb9a8d03SJohn Hurley static struct nfp_fl_lag_group *
nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag * lag,struct net_device * master)142bb9a8d03SJohn Hurley nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
143bb9a8d03SJohn Hurley 					  struct net_device *master)
144bb9a8d03SJohn Hurley {
145bb9a8d03SJohn Hurley 	struct nfp_fl_lag_group *entry;
146bb9a8d03SJohn Hurley 
147bb9a8d03SJohn Hurley 	if (!master)
148bb9a8d03SJohn Hurley 		return NULL;
149bb9a8d03SJohn Hurley 
150bb9a8d03SJohn Hurley 	list_for_each_entry(entry, &lag->group_list, list)
151bb9a8d03SJohn Hurley 		if (entry->master_ndev == master)
152bb9a8d03SJohn Hurley 			return entry;
153bb9a8d03SJohn Hurley 
154bb9a8d03SJohn Hurley 	return NULL;
155bb9a8d03SJohn Hurley }
156bb9a8d03SJohn Hurley 
nfp_fl_lag_get_group_info(struct nfp_app * app,struct net_device * netdev,__be16 * group_id,u8 * batch_ver,u8 * group_inst)157abc21095SYanguo Li static int nfp_fl_lag_get_group_info(struct nfp_app *app,
158abc21095SYanguo Li 				     struct net_device *netdev,
159abc21095SYanguo Li 				     __be16 *group_id,
160abc21095SYanguo Li 				     u8 *batch_ver,
161abc21095SYanguo Li 				     u8 *group_inst)
1627e24a593SJohn Hurley {
1637e24a593SJohn Hurley 	struct nfp_flower_priv *priv = app->priv;
1647e24a593SJohn Hurley 	struct nfp_fl_lag_group *group = NULL;
1657e24a593SJohn Hurley 	__be32 temp_vers;
1667e24a593SJohn Hurley 
1677e24a593SJohn Hurley 	mutex_lock(&priv->nfp_lag.lock);
1687e24a593SJohn Hurley 	group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
169abc21095SYanguo Li 							  netdev);
1707e24a593SJohn Hurley 	if (!group) {
1717e24a593SJohn Hurley 		mutex_unlock(&priv->nfp_lag.lock);
172abc21095SYanguo Li 		return -ENOENT;
173abc21095SYanguo Li 	}
174abc21095SYanguo Li 
175abc21095SYanguo Li 	if (group_id)
176abc21095SYanguo Li 		*group_id = cpu_to_be16(group->group_id);
177abc21095SYanguo Li 
178abc21095SYanguo Li 	if (batch_ver) {
179abc21095SYanguo Li 		temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
180abc21095SYanguo Li 					NFP_FL_PRE_LAG_VER_OFF);
181abc21095SYanguo Li 		memcpy(batch_ver, &temp_vers, 3);
182abc21095SYanguo Li 	}
183abc21095SYanguo Li 
184abc21095SYanguo Li 	if (group_inst)
185abc21095SYanguo Li 		*group_inst = group->group_inst;
186abc21095SYanguo Li 
187abc21095SYanguo Li 	mutex_unlock(&priv->nfp_lag.lock);
188abc21095SYanguo Li 
189abc21095SYanguo Li 	return 0;
190abc21095SYanguo Li }
191abc21095SYanguo Li 
nfp_flower_lag_populate_pre_action(struct nfp_app * app,struct net_device * master,struct nfp_fl_pre_lag * pre_act,struct netlink_ext_ack * extack)192abc21095SYanguo Li int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
193abc21095SYanguo Li 				       struct net_device *master,
194abc21095SYanguo Li 				       struct nfp_fl_pre_lag *pre_act,
195abc21095SYanguo Li 				       struct netlink_ext_ack *extack)
196abc21095SYanguo Li {
197abc21095SYanguo Li 	if (nfp_fl_lag_get_group_info(app, master, &pre_act->group_id,
198abc21095SYanguo Li 				      pre_act->lag_version,
199abc21095SYanguo Li 				      &pre_act->instance)) {
200bef6e97dSPieter Jansen van Vuuren 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: group does not exist for LAG action");
2017e24a593SJohn Hurley 		return -ENOENT;
2027e24a593SJohn Hurley 	}
2037e24a593SJohn Hurley 
2047e24a593SJohn Hurley 	return 0;
2057e24a593SJohn Hurley }
2067e24a593SJohn Hurley 
nfp_flower_lag_get_info_from_netdev(struct nfp_app * app,struct net_device * netdev,struct nfp_tun_neigh_lag * lag)207abc21095SYanguo Li void nfp_flower_lag_get_info_from_netdev(struct nfp_app *app,
208abc21095SYanguo Li 					 struct net_device *netdev,
209abc21095SYanguo Li 					 struct nfp_tun_neigh_lag *lag)
210abc21095SYanguo Li {
211abc21095SYanguo Li 	nfp_fl_lag_get_group_info(app, netdev, NULL,
212abc21095SYanguo Li 				  lag->lag_version, &lag->lag_instance);
213abc21095SYanguo Li }
214abc21095SYanguo Li 
nfp_flower_lag_get_output_id(struct nfp_app * app,struct net_device * master)2157e24a593SJohn Hurley int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
2167e24a593SJohn Hurley {
2177e24a593SJohn Hurley 	struct nfp_flower_priv *priv = app->priv;
2187e24a593SJohn Hurley 	struct nfp_fl_lag_group *group = NULL;
2197e24a593SJohn Hurley 	int group_id = -ENOENT;
2207e24a593SJohn Hurley 
2217e24a593SJohn Hurley 	mutex_lock(&priv->nfp_lag.lock);
2227e24a593SJohn Hurley 	group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
2237e24a593SJohn Hurley 							  master);
2247e24a593SJohn Hurley 	if (group)
2257e24a593SJohn Hurley 		group_id = group->group_id;
2267e24a593SJohn Hurley 	mutex_unlock(&priv->nfp_lag.lock);
2277e24a593SJohn Hurley 
2287e24a593SJohn Hurley 	return group_id;
2297e24a593SJohn Hurley }
2307e24a593SJohn Hurley 
231bb9a8d03SJohn Hurley static int
nfp_fl_lag_config_group(struct nfp_fl_lag * lag,struct nfp_fl_lag_group * group,struct net_device ** active_members,unsigned int member_cnt,enum nfp_fl_lag_batch * batch)232bb9a8d03SJohn Hurley nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
233bb9a8d03SJohn Hurley 			struct net_device **active_members,
234bb9a8d03SJohn Hurley 			unsigned int member_cnt, enum nfp_fl_lag_batch *batch)
235bb9a8d03SJohn Hurley {
236bb9a8d03SJohn Hurley 	struct nfp_flower_cmsg_lag_config *cmsg_payload;
237bb9a8d03SJohn Hurley 	struct nfp_flower_priv *priv;
238bb9a8d03SJohn Hurley 	unsigned long int flags;
239bb9a8d03SJohn Hurley 	unsigned int size, i;
240bb9a8d03SJohn Hurley 	struct sk_buff *skb;
241bb9a8d03SJohn Hurley 
242bb9a8d03SJohn Hurley 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
243bb9a8d03SJohn Hurley 	size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt;
244bb9a8d03SJohn Hurley 	skb = nfp_flower_cmsg_alloc(priv->app, size,
245bb9a8d03SJohn Hurley 				    NFP_FLOWER_CMSG_TYPE_LAG_CONFIG,
246bb9a8d03SJohn Hurley 				    GFP_KERNEL);
247bb9a8d03SJohn Hurley 	if (!skb)
248bb9a8d03SJohn Hurley 		return -ENOMEM;
249bb9a8d03SJohn Hurley 
250bb9a8d03SJohn Hurley 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
251bb9a8d03SJohn Hurley 	flags = 0;
252bb9a8d03SJohn Hurley 
253bb9a8d03SJohn Hurley 	/* Increment batch version for each new batch of config messages. */
254bb9a8d03SJohn Hurley 	if (*batch == NFP_FL_LAG_BATCH_FIRST) {
255bb9a8d03SJohn Hurley 		flags |= NFP_FL_LAG_FIRST;
256bb9a8d03SJohn Hurley 		nfp_fl_increment_version(lag);
257bb9a8d03SJohn Hurley 		*batch = NFP_FL_LAG_BATCH_MEMBER;
258bb9a8d03SJohn Hurley 	}
259bb9a8d03SJohn Hurley 
260bb9a8d03SJohn Hurley 	/* If it is a reset msg then it is also the end of the batch. */
261bb9a8d03SJohn Hurley 	if (lag->rst_cfg) {
262bb9a8d03SJohn Hurley 		flags |= NFP_FL_LAG_RESET;
263bb9a8d03SJohn Hurley 		*batch = NFP_FL_LAG_BATCH_FINISHED;
264bb9a8d03SJohn Hurley 	}
265bb9a8d03SJohn Hurley 
266bb9a8d03SJohn Hurley 	/* To signal the end of a batch, both the switch and last flags are set
2679bacb93bSWalter Heymans 	 * and the reserved SYNC group ID is used.
268bb9a8d03SJohn Hurley 	 */
269bb9a8d03SJohn Hurley 	if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
270bb9a8d03SJohn Hurley 		flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
271bb9a8d03SJohn Hurley 		lag->rst_cfg = false;
272bb9a8d03SJohn Hurley 		cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID);
273bb9a8d03SJohn Hurley 		cmsg_payload->group_inst = 0;
274bb9a8d03SJohn Hurley 	} else {
275bb9a8d03SJohn Hurley 		cmsg_payload->group_id = cpu_to_be32(group->group_id);
276bb9a8d03SJohn Hurley 		cmsg_payload->group_inst = cpu_to_be32(group->group_inst);
277bb9a8d03SJohn Hurley 	}
278bb9a8d03SJohn Hurley 
279bb9a8d03SJohn Hurley 	cmsg_payload->reserved[0] = 0;
280bb9a8d03SJohn Hurley 	cmsg_payload->reserved[1] = 0;
281bb9a8d03SJohn Hurley 	cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL;
282bb9a8d03SJohn Hurley 	cmsg_payload->ctrl_flags = flags;
283bb9a8d03SJohn Hurley 	cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver);
284bb9a8d03SJohn Hurley 	cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag));
285bb9a8d03SJohn Hurley 
286bb9a8d03SJohn Hurley 	for (i = 0; i < member_cnt; i++)
287bb9a8d03SJohn Hurley 		cmsg_payload->members[i] =
288bb9a8d03SJohn Hurley 			cpu_to_be32(nfp_repr_get_port_id(active_members[i]));
289bb9a8d03SJohn Hurley 
290bb9a8d03SJohn Hurley 	nfp_ctrl_tx(priv->app->ctrl, skb);
291bb9a8d03SJohn Hurley 	return 0;
292bb9a8d03SJohn Hurley }
293bb9a8d03SJohn Hurley 
nfp_fl_lag_do_work(struct work_struct * work)294bb9a8d03SJohn Hurley static void nfp_fl_lag_do_work(struct work_struct *work)
295bb9a8d03SJohn Hurley {
296bb9a8d03SJohn Hurley 	enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
297bb9a8d03SJohn Hurley 	struct nfp_fl_lag_group *entry, *storage;
298bb9a8d03SJohn Hurley 	struct delayed_work *delayed_work;
299bb9a8d03SJohn Hurley 	struct nfp_flower_priv *priv;
300bb9a8d03SJohn Hurley 	struct nfp_fl_lag *lag;
301bb9a8d03SJohn Hurley 	int err;
302bb9a8d03SJohn Hurley 
303bb9a8d03SJohn Hurley 	delayed_work = to_delayed_work(work);
304bb9a8d03SJohn Hurley 	lag = container_of(delayed_work, struct nfp_fl_lag, work);
305bb9a8d03SJohn Hurley 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
306bb9a8d03SJohn Hurley 
307bb9a8d03SJohn Hurley 	mutex_lock(&lag->lock);
308bb9a8d03SJohn Hurley 	list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
309bb9a8d03SJohn Hurley 		struct net_device *iter_netdev, **acti_netdevs;
310bb9a8d03SJohn Hurley 		struct nfp_flower_repr_priv *repr_priv;
311bb9a8d03SJohn Hurley 		int active_count = 0, slaves = 0;
312bb9a8d03SJohn Hurley 		struct nfp_repr *repr;
313bb9a8d03SJohn Hurley 		unsigned long *flags;
314bb9a8d03SJohn Hurley 
315bb9a8d03SJohn Hurley 		if (entry->to_remove) {
316bb9a8d03SJohn Hurley 			/* Active count of 0 deletes group on hw. */
317bb9a8d03SJohn Hurley 			err = nfp_fl_lag_config_group(lag, entry, NULL, 0,
318bb9a8d03SJohn Hurley 						      &batch);
319bb9a8d03SJohn Hurley 			if (!err) {
320bb9a8d03SJohn Hurley 				entry->to_remove = false;
321bb9a8d03SJohn Hurley 				entry->offloaded = false;
322bb9a8d03SJohn Hurley 			} else {
323bb9a8d03SJohn Hurley 				nfp_flower_cmsg_warn(priv->app,
324bb9a8d03SJohn Hurley 						     "group delete failed\n");
325bb9a8d03SJohn Hurley 				schedule_delayed_work(&lag->work,
326bb9a8d03SJohn Hurley 						      NFP_FL_LAG_DELAY);
327bb9a8d03SJohn Hurley 				continue;
328bb9a8d03SJohn Hurley 			}
329bb9a8d03SJohn Hurley 
330bb9a8d03SJohn Hurley 			if (entry->to_destroy) {
331bb9a8d03SJohn Hurley 				ida_simple_remove(&lag->ida_handle,
332bb9a8d03SJohn Hurley 						  entry->group_id);
333bb9a8d03SJohn Hurley 				list_del(&entry->list);
334bb9a8d03SJohn Hurley 				kfree(entry);
335bb9a8d03SJohn Hurley 			}
336bb9a8d03SJohn Hurley 			continue;
337bb9a8d03SJohn Hurley 		}
338bb9a8d03SJohn Hurley 
339bb9a8d03SJohn Hurley 		acti_netdevs = kmalloc_array(entry->slave_cnt,
340bb9a8d03SJohn Hurley 					     sizeof(*acti_netdevs), GFP_KERNEL);
341*408ba7fdSDuoming Zhou 		if (!acti_netdevs) {
342*408ba7fdSDuoming Zhou 			schedule_delayed_work(&lag->work,
343*408ba7fdSDuoming Zhou 					      NFP_FL_LAG_DELAY);
344*408ba7fdSDuoming Zhou 			continue;
345*408ba7fdSDuoming Zhou 		}
346bb9a8d03SJohn Hurley 
347bb9a8d03SJohn Hurley 		/* Include sanity check in the loop. It may be that a bond has
348bb9a8d03SJohn Hurley 		 * changed between processing the last notification and the
349bb9a8d03SJohn Hurley 		 * work queue triggering. If the number of slaves has changed
350bb9a8d03SJohn Hurley 		 * or it now contains netdevs that cannot be offloaded, ignore
351bb9a8d03SJohn Hurley 		 * the group until pending notifications are processed.
352bb9a8d03SJohn Hurley 		 */
353bb9a8d03SJohn Hurley 		rcu_read_lock();
354bb9a8d03SJohn Hurley 		for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) {
355bb9a8d03SJohn Hurley 			if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
356bb9a8d03SJohn Hurley 				slaves = 0;
357bb9a8d03SJohn Hurley 				break;
358bb9a8d03SJohn Hurley 			}
359bb9a8d03SJohn Hurley 
360bb9a8d03SJohn Hurley 			repr = netdev_priv(iter_netdev);
361bb9a8d03SJohn Hurley 
362bb9a8d03SJohn Hurley 			if (repr->app != priv->app) {
363bb9a8d03SJohn Hurley 				slaves = 0;
364bb9a8d03SJohn Hurley 				break;
365bb9a8d03SJohn Hurley 			}
366bb9a8d03SJohn Hurley 
367bb9a8d03SJohn Hurley 			slaves++;
368bb9a8d03SJohn Hurley 			if (slaves > entry->slave_cnt)
369bb9a8d03SJohn Hurley 				break;
370bb9a8d03SJohn Hurley 
371bb9a8d03SJohn Hurley 			/* Check the ports for state changes. */
372bb9a8d03SJohn Hurley 			repr_priv = repr->app_priv;
373bb9a8d03SJohn Hurley 			flags = &repr_priv->lag_port_flags;
374bb9a8d03SJohn Hurley 
375bb9a8d03SJohn Hurley 			if (*flags & NFP_PORT_LAG_CHANGED) {
376bb9a8d03SJohn Hurley 				*flags &= ~NFP_PORT_LAG_CHANGED;
377bb9a8d03SJohn Hurley 				entry->dirty = true;
378bb9a8d03SJohn Hurley 			}
379bb9a8d03SJohn Hurley 
380bb9a8d03SJohn Hurley 			if ((*flags & NFP_PORT_LAG_TX_ENABLED) &&
381bb9a8d03SJohn Hurley 			    (*flags & NFP_PORT_LAG_LINK_UP))
382bb9a8d03SJohn Hurley 				acti_netdevs[active_count++] = iter_netdev;
383bb9a8d03SJohn Hurley 		}
384bb9a8d03SJohn Hurley 		rcu_read_unlock();
385bb9a8d03SJohn Hurley 
386bb9a8d03SJohn Hurley 		if (slaves != entry->slave_cnt || !entry->dirty) {
387bb9a8d03SJohn Hurley 			kfree(acti_netdevs);
388bb9a8d03SJohn Hurley 			continue;
389bb9a8d03SJohn Hurley 		}
390bb9a8d03SJohn Hurley 
391bb9a8d03SJohn Hurley 		err = nfp_fl_lag_config_group(lag, entry, acti_netdevs,
392bb9a8d03SJohn Hurley 					      active_count, &batch);
393bb9a8d03SJohn Hurley 		if (!err) {
394bb9a8d03SJohn Hurley 			entry->offloaded = true;
395bb9a8d03SJohn Hurley 			entry->dirty = false;
396bb9a8d03SJohn Hurley 		} else {
397bb9a8d03SJohn Hurley 			nfp_flower_cmsg_warn(priv->app,
398bb9a8d03SJohn Hurley 					     "group offload failed\n");
399bb9a8d03SJohn Hurley 			schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
400bb9a8d03SJohn Hurley 		}
401bb9a8d03SJohn Hurley 
402bb9a8d03SJohn Hurley 		kfree(acti_netdevs);
403bb9a8d03SJohn Hurley 	}
404bb9a8d03SJohn Hurley 
405bb9a8d03SJohn Hurley 	/* End the config batch if at least one packet has been batched. */
406bb9a8d03SJohn Hurley 	if (batch == NFP_FL_LAG_BATCH_MEMBER) {
407bb9a8d03SJohn Hurley 		batch = NFP_FL_LAG_BATCH_FINISHED;
408bb9a8d03SJohn Hurley 		err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
409bb9a8d03SJohn Hurley 		if (err)
410bb9a8d03SJohn Hurley 			nfp_flower_cmsg_warn(priv->app,
411bb9a8d03SJohn Hurley 					     "group batch end cmsg failed\n");
412bb9a8d03SJohn Hurley 	}
413bb9a8d03SJohn Hurley 
414bb9a8d03SJohn Hurley 	mutex_unlock(&lag->lock);
415bb9a8d03SJohn Hurley }
416bb9a8d03SJohn Hurley 
4172e1cc522SJohn Hurley static int
nfp_fl_lag_put_unprocessed(struct nfp_fl_lag * lag,struct sk_buff * skb)4182e1cc522SJohn Hurley nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
4192e1cc522SJohn Hurley {
4202e1cc522SJohn Hurley 	struct nfp_flower_cmsg_lag_config *cmsg_payload;
4212e1cc522SJohn Hurley 
4222e1cc522SJohn Hurley 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
4232e1cc522SJohn Hurley 	if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX)
4242e1cc522SJohn Hurley 		return -EINVAL;
4252e1cc522SJohn Hurley 
4262e1cc522SJohn Hurley 	/* Drop cmsg retrans if storage limit is exceeded to prevent
4272e1cc522SJohn Hurley 	 * overloading. If the fw notices that expected messages have not been
4282e1cc522SJohn Hurley 	 * received in a given time block, it will request a full resync.
4292e1cc522SJohn Hurley 	 */
4302e1cc522SJohn Hurley 	if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
4312e1cc522SJohn Hurley 		return -ENOSPC;
4322e1cc522SJohn Hurley 
4332e1cc522SJohn Hurley 	__skb_queue_tail(&lag->retrans_skbs, skb);
4342e1cc522SJohn Hurley 
4352e1cc522SJohn Hurley 	return 0;
4362e1cc522SJohn Hurley }
4372e1cc522SJohn Hurley 
nfp_fl_send_unprocessed(struct nfp_fl_lag * lag)4382e1cc522SJohn Hurley static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
4392e1cc522SJohn Hurley {
4402e1cc522SJohn Hurley 	struct nfp_flower_priv *priv;
4412e1cc522SJohn Hurley 	struct sk_buff *skb;
4422e1cc522SJohn Hurley 
4432e1cc522SJohn Hurley 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
4442e1cc522SJohn Hurley 
4452e1cc522SJohn Hurley 	while ((skb = __skb_dequeue(&lag->retrans_skbs)))
4462e1cc522SJohn Hurley 		nfp_ctrl_tx(priv->app->ctrl, skb);
4472e1cc522SJohn Hurley }
4482e1cc522SJohn Hurley 
nfp_flower_lag_unprocessed_msg(struct nfp_app * app,struct sk_buff * skb)4492e1cc522SJohn Hurley bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
4502e1cc522SJohn Hurley {
4512e1cc522SJohn Hurley 	struct nfp_flower_cmsg_lag_config *cmsg_payload;
4522e1cc522SJohn Hurley 	struct nfp_flower_priv *priv = app->priv;
4532e1cc522SJohn Hurley 	struct nfp_fl_lag_group *group_entry;
4542e1cc522SJohn Hurley 	unsigned long int flags;
4552e1cc522SJohn Hurley 	bool store_skb = false;
4562e1cc522SJohn Hurley 	int err;
4572e1cc522SJohn Hurley 
4582e1cc522SJohn Hurley 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
4592e1cc522SJohn Hurley 	flags = cmsg_payload->ctrl_flags;
4602e1cc522SJohn Hurley 
4612e1cc522SJohn Hurley 	/* Note the intentional fall through below. If DATA and XON are both
4622e1cc522SJohn Hurley 	 * set, the message will stored and sent again with the rest of the
4632e1cc522SJohn Hurley 	 * unprocessed messages list.
4642e1cc522SJohn Hurley 	 */
4652e1cc522SJohn Hurley 
4662e1cc522SJohn Hurley 	/* Store */
4672e1cc522SJohn Hurley 	if (flags & NFP_FL_LAG_DATA)
4682e1cc522SJohn Hurley 		if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb))
4692e1cc522SJohn Hurley 			store_skb = true;
4702e1cc522SJohn Hurley 
4712e1cc522SJohn Hurley 	/* Send stored */
4722e1cc522SJohn Hurley 	if (flags & NFP_FL_LAG_XON)
4732e1cc522SJohn Hurley 		nfp_fl_send_unprocessed(&priv->nfp_lag);
4742e1cc522SJohn Hurley 
4752e1cc522SJohn Hurley 	/* Resend all */
4762e1cc522SJohn Hurley 	if (flags & NFP_FL_LAG_SYNC) {
4772e1cc522SJohn Hurley 		/* To resend all config:
4782e1cc522SJohn Hurley 		 * 1) Clear all unprocessed messages
4792e1cc522SJohn Hurley 		 * 2) Mark all groups dirty
4802e1cc522SJohn Hurley 		 * 3) Reset NFP group config
4812e1cc522SJohn Hurley 		 * 4) Schedule a LAG config update
4822e1cc522SJohn Hurley 		 */
4832e1cc522SJohn Hurley 
4842e1cc522SJohn Hurley 		__skb_queue_purge(&priv->nfp_lag.retrans_skbs);
4852e1cc522SJohn Hurley 
4862e1cc522SJohn Hurley 		mutex_lock(&priv->nfp_lag.lock);
4872e1cc522SJohn Hurley 		list_for_each_entry(group_entry, &priv->nfp_lag.group_list,
4882e1cc522SJohn Hurley 				    list)
4892e1cc522SJohn Hurley 			group_entry->dirty = true;
4902e1cc522SJohn Hurley 
4912e1cc522SJohn Hurley 		err = nfp_flower_lag_reset(&priv->nfp_lag);
4922e1cc522SJohn Hurley 		if (err)
4932e1cc522SJohn Hurley 			nfp_flower_cmsg_warn(priv->app,
4942e1cc522SJohn Hurley 					     "mem err in group reset msg\n");
4952e1cc522SJohn Hurley 		mutex_unlock(&priv->nfp_lag.lock);
4962e1cc522SJohn Hurley 
4972e1cc522SJohn Hurley 		schedule_delayed_work(&priv->nfp_lag.work, 0);
4982e1cc522SJohn Hurley 	}
4992e1cc522SJohn Hurley 
5002e1cc522SJohn Hurley 	return store_skb;
5012e1cc522SJohn Hurley }
5022e1cc522SJohn Hurley 
503bb9a8d03SJohn Hurley static void
nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag * lag,struct nfp_fl_lag_group * group)504bb9a8d03SJohn Hurley nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
505bb9a8d03SJohn Hurley 				 struct nfp_fl_lag_group *group)
506bb9a8d03SJohn Hurley {
507bb9a8d03SJohn Hurley 	group->to_remove = true;
508bb9a8d03SJohn Hurley 
509bb9a8d03SJohn Hurley 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
510bb9a8d03SJohn Hurley }
511bb9a8d03SJohn Hurley 
512a558c982SJakub Kicinski static void
nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag * lag,struct net_device * master)513bb9a8d03SJohn Hurley nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
514bb9a8d03SJohn Hurley 				 struct net_device *master)
515bb9a8d03SJohn Hurley {
516bb9a8d03SJohn Hurley 	struct nfp_fl_lag_group *group;
517a558c982SJakub Kicinski 	struct nfp_flower_priv *priv;
518a558c982SJakub Kicinski 
519a558c982SJakub Kicinski 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
520a558c982SJakub Kicinski 
521a558c982SJakub Kicinski 	if (!netif_is_bond_master(master))
522a558c982SJakub Kicinski 		return;
523bb9a8d03SJohn Hurley 
524bb9a8d03SJohn Hurley 	mutex_lock(&lag->lock);
525bb9a8d03SJohn Hurley 	group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
526bb9a8d03SJohn Hurley 	if (!group) {
527bb9a8d03SJohn Hurley 		mutex_unlock(&lag->lock);
528a558c982SJakub Kicinski 		nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n",
529a558c982SJakub Kicinski 			 netdev_name(master));
530a558c982SJakub Kicinski 		return;
531bb9a8d03SJohn Hurley 	}
532bb9a8d03SJohn Hurley 
533bb9a8d03SJohn Hurley 	group->to_remove = true;
534bb9a8d03SJohn Hurley 	group->to_destroy = true;
535bb9a8d03SJohn Hurley 	mutex_unlock(&lag->lock);
536bb9a8d03SJohn Hurley 
537bb9a8d03SJohn Hurley 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
538bb9a8d03SJohn Hurley }
539bb9a8d03SJohn Hurley 
540bb9a8d03SJohn Hurley static int
nfp_fl_lag_changeupper_event(struct nfp_fl_lag * lag,struct netdev_notifier_changeupper_info * info)541bb9a8d03SJohn Hurley nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
542bb9a8d03SJohn Hurley 			     struct netdev_notifier_changeupper_info *info)
543bb9a8d03SJohn Hurley {
544bb9a8d03SJohn Hurley 	struct net_device *upper = info->upper_dev, *iter_netdev;
545bb9a8d03SJohn Hurley 	struct netdev_lag_upper_info *lag_upper_info;
546bb9a8d03SJohn Hurley 	struct nfp_fl_lag_group *group;
547bb9a8d03SJohn Hurley 	struct nfp_flower_priv *priv;
548bb9a8d03SJohn Hurley 	unsigned int slave_count = 0;
549bb9a8d03SJohn Hurley 	bool can_offload = true;
550bb9a8d03SJohn Hurley 	struct nfp_repr *repr;
551bb9a8d03SJohn Hurley 
552bb9a8d03SJohn Hurley 	if (!netif_is_lag_master(upper))
553bb9a8d03SJohn Hurley 		return 0;
554bb9a8d03SJohn Hurley 
555bb9a8d03SJohn Hurley 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
556bb9a8d03SJohn Hurley 
557bb9a8d03SJohn Hurley 	rcu_read_lock();
558bb9a8d03SJohn Hurley 	for_each_netdev_in_bond_rcu(upper, iter_netdev) {
559bb9a8d03SJohn Hurley 		if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
560bb9a8d03SJohn Hurley 			can_offload = false;
561bb9a8d03SJohn Hurley 			break;
562bb9a8d03SJohn Hurley 		}
563bb9a8d03SJohn Hurley 		repr = netdev_priv(iter_netdev);
564bb9a8d03SJohn Hurley 
565bb9a8d03SJohn Hurley 		/* Ensure all ports are created by the same app/on same card. */
566bb9a8d03SJohn Hurley 		if (repr->app != priv->app) {
567bb9a8d03SJohn Hurley 			can_offload = false;
568bb9a8d03SJohn Hurley 			break;
569bb9a8d03SJohn Hurley 		}
570bb9a8d03SJohn Hurley 
571bb9a8d03SJohn Hurley 		slave_count++;
572bb9a8d03SJohn Hurley 	}
573bb9a8d03SJohn Hurley 	rcu_read_unlock();
574bb9a8d03SJohn Hurley 
575bb9a8d03SJohn Hurley 	lag_upper_info = info->upper_info;
576bb9a8d03SJohn Hurley 
577bb9a8d03SJohn Hurley 	/* Firmware supports active/backup and L3/L4 hash bonds. */
578bb9a8d03SJohn Hurley 	if (lag_upper_info &&
579bb9a8d03SJohn Hurley 	    lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
580bb9a8d03SJohn Hurley 	    (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
581bb9a8d03SJohn Hurley 	     (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
582635cf43dSJohn Hurley 	      lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
583635cf43dSJohn Hurley 	      lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
584bb9a8d03SJohn Hurley 		can_offload = false;
585bb9a8d03SJohn Hurley 		nfp_flower_cmsg_warn(priv->app,
586bb9a8d03SJohn Hurley 				     "Unable to offload tx_type %u hash %u\n",
587bb9a8d03SJohn Hurley 				     lag_upper_info->tx_type,
588bb9a8d03SJohn Hurley 				     lag_upper_info->hash_type);
589bb9a8d03SJohn Hurley 	}
590bb9a8d03SJohn Hurley 
591bb9a8d03SJohn Hurley 	mutex_lock(&lag->lock);
592bb9a8d03SJohn Hurley 	group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper);
593bb9a8d03SJohn Hurley 
594bb9a8d03SJohn Hurley 	if (slave_count == 0 || !can_offload) {
595bb9a8d03SJohn Hurley 		/* Cannot offload the group - remove if previously offloaded. */
596bb9a8d03SJohn Hurley 		if (group && group->offloaded)
597bb9a8d03SJohn Hurley 			nfp_fl_lag_schedule_group_remove(lag, group);
598bb9a8d03SJohn Hurley 
599bb9a8d03SJohn Hurley 		mutex_unlock(&lag->lock);
600bb9a8d03SJohn Hurley 		return 0;
601bb9a8d03SJohn Hurley 	}
602bb9a8d03SJohn Hurley 
603bb9a8d03SJohn Hurley 	if (!group) {
604bb9a8d03SJohn Hurley 		group = nfp_fl_lag_group_create(lag, upper);
605bb9a8d03SJohn Hurley 		if (IS_ERR(group)) {
606bb9a8d03SJohn Hurley 			mutex_unlock(&lag->lock);
607bb9a8d03SJohn Hurley 			return PTR_ERR(group);
608bb9a8d03SJohn Hurley 		}
609bb9a8d03SJohn Hurley 	}
610bb9a8d03SJohn Hurley 
611bb9a8d03SJohn Hurley 	group->dirty = true;
612bb9a8d03SJohn Hurley 	group->slave_cnt = slave_count;
613bb9a8d03SJohn Hurley 
6149bacb93bSWalter Heymans 	/* Group may have been on queue for removal but is now offloadable. */
615bb9a8d03SJohn Hurley 	group->to_remove = false;
616bb9a8d03SJohn Hurley 	mutex_unlock(&lag->lock);
617bb9a8d03SJohn Hurley 
618bb9a8d03SJohn Hurley 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
619bb9a8d03SJohn Hurley 	return 0;
620bb9a8d03SJohn Hurley }
621bb9a8d03SJohn Hurley 
622659bb404SJakub Kicinski static void
nfp_fl_lag_changels_event(struct nfp_fl_lag * lag,struct net_device * netdev,struct netdev_notifier_changelowerstate_info * info)623bb9a8d03SJohn Hurley nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
624bb9a8d03SJohn Hurley 			  struct netdev_notifier_changelowerstate_info *info)
625bb9a8d03SJohn Hurley {
626bb9a8d03SJohn Hurley 	struct netdev_lag_lower_state_info *lag_lower_info;
627bb9a8d03SJohn Hurley 	struct nfp_flower_repr_priv *repr_priv;
628bb9a8d03SJohn Hurley 	struct nfp_flower_priv *priv;
629bb9a8d03SJohn Hurley 	struct nfp_repr *repr;
630bb9a8d03SJohn Hurley 	unsigned long *flags;
631bb9a8d03SJohn Hurley 
632bb9a8d03SJohn Hurley 	if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
633659bb404SJakub Kicinski 		return;
634bb9a8d03SJohn Hurley 
635bb9a8d03SJohn Hurley 	lag_lower_info = info->lower_state_info;
636bb9a8d03SJohn Hurley 	if (!lag_lower_info)
637659bb404SJakub Kicinski 		return;
638bb9a8d03SJohn Hurley 
639bb9a8d03SJohn Hurley 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
640bb9a8d03SJohn Hurley 	repr = netdev_priv(netdev);
641bb9a8d03SJohn Hurley 
642bb9a8d03SJohn Hurley 	/* Verify that the repr is associated with this app. */
643bb9a8d03SJohn Hurley 	if (repr->app != priv->app)
644659bb404SJakub Kicinski 		return;
645bb9a8d03SJohn Hurley 
646bb9a8d03SJohn Hurley 	repr_priv = repr->app_priv;
647bb9a8d03SJohn Hurley 	flags = &repr_priv->lag_port_flags;
648bb9a8d03SJohn Hurley 
649bb9a8d03SJohn Hurley 	mutex_lock(&lag->lock);
650bb9a8d03SJohn Hurley 	if (lag_lower_info->link_up)
651bb9a8d03SJohn Hurley 		*flags |= NFP_PORT_LAG_LINK_UP;
652bb9a8d03SJohn Hurley 	else
653bb9a8d03SJohn Hurley 		*flags &= ~NFP_PORT_LAG_LINK_UP;
654bb9a8d03SJohn Hurley 
655bb9a8d03SJohn Hurley 	if (lag_lower_info->tx_enabled)
656bb9a8d03SJohn Hurley 		*flags |= NFP_PORT_LAG_TX_ENABLED;
657bb9a8d03SJohn Hurley 	else
658bb9a8d03SJohn Hurley 		*flags &= ~NFP_PORT_LAG_TX_ENABLED;
659bb9a8d03SJohn Hurley 
660bb9a8d03SJohn Hurley 	*flags |= NFP_PORT_LAG_CHANGED;
661bb9a8d03SJohn Hurley 	mutex_unlock(&lag->lock);
662bb9a8d03SJohn Hurley 
663bb9a8d03SJohn Hurley 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
664bb9a8d03SJohn Hurley }
665bb9a8d03SJohn Hurley 
nfp_flower_lag_netdev_event(struct nfp_flower_priv * priv,struct net_device * netdev,unsigned long event,void * ptr)6660c665e2bSJakub Kicinski int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
6670c665e2bSJakub Kicinski 				struct net_device *netdev,
6680c665e2bSJakub Kicinski 				unsigned long event, void *ptr)
669bb9a8d03SJohn Hurley {
6700c665e2bSJakub Kicinski 	struct nfp_fl_lag *lag = &priv->nfp_lag;
671bb9a8d03SJohn Hurley 	int err;
672bb9a8d03SJohn Hurley 
673bb9a8d03SJohn Hurley 	switch (event) {
674bb9a8d03SJohn Hurley 	case NETDEV_CHANGEUPPER:
675bb9a8d03SJohn Hurley 		err = nfp_fl_lag_changeupper_event(lag, ptr);
676bb9a8d03SJohn Hurley 		if (err)
677bb9a8d03SJohn Hurley 			return NOTIFY_BAD;
678bb9a8d03SJohn Hurley 		return NOTIFY_OK;
679bb9a8d03SJohn Hurley 	case NETDEV_CHANGELOWERSTATE:
680659bb404SJakub Kicinski 		nfp_fl_lag_changels_event(lag, netdev, ptr);
681bb9a8d03SJohn Hurley 		return NOTIFY_OK;
682bb9a8d03SJohn Hurley 	case NETDEV_UNREGISTER:
683a558c982SJakub Kicinski 		nfp_fl_lag_schedule_group_delete(lag, netdev);
684bb9a8d03SJohn Hurley 		return NOTIFY_OK;
685bb9a8d03SJohn Hurley 	}
686bb9a8d03SJohn Hurley 
687bb9a8d03SJohn Hurley 	return NOTIFY_DONE;
688bb9a8d03SJohn Hurley }
689bb9a8d03SJohn Hurley 
nfp_flower_lag_reset(struct nfp_fl_lag * lag)690bb9a8d03SJohn Hurley int nfp_flower_lag_reset(struct nfp_fl_lag *lag)
691bb9a8d03SJohn Hurley {
692bb9a8d03SJohn Hurley 	enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
693bb9a8d03SJohn Hurley 
694bb9a8d03SJohn Hurley 	lag->rst_cfg = true;
695bb9a8d03SJohn Hurley 	return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
696bb9a8d03SJohn Hurley }
697bb9a8d03SJohn Hurley 
nfp_flower_lag_init(struct nfp_fl_lag * lag)698bb9a8d03SJohn Hurley void nfp_flower_lag_init(struct nfp_fl_lag *lag)
699bb9a8d03SJohn Hurley {
700bb9a8d03SJohn Hurley 	INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
701bb9a8d03SJohn Hurley 	INIT_LIST_HEAD(&lag->group_list);
702bb9a8d03SJohn Hurley 	mutex_init(&lag->lock);
703bb9a8d03SJohn Hurley 	ida_init(&lag->ida_handle);
704bb9a8d03SJohn Hurley 
7052e1cc522SJohn Hurley 	__skb_queue_head_init(&lag->retrans_skbs);
7062e1cc522SJohn Hurley 
707bb9a8d03SJohn Hurley 	/* 0 is a reserved batch version so increment to first valid value. */
708bb9a8d03SJohn Hurley 	nfp_fl_increment_version(lag);
709bb9a8d03SJohn Hurley }
710bb9a8d03SJohn Hurley 
nfp_flower_lag_cleanup(struct nfp_fl_lag * lag)711bb9a8d03SJohn Hurley void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
712bb9a8d03SJohn Hurley {
713bb9a8d03SJohn Hurley 	struct nfp_fl_lag_group *entry, *storage;
714bb9a8d03SJohn Hurley 
715bb9a8d03SJohn Hurley 	cancel_delayed_work_sync(&lag->work);
716bb9a8d03SJohn Hurley 
7172e1cc522SJohn Hurley 	__skb_queue_purge(&lag->retrans_skbs);
7182e1cc522SJohn Hurley 
719bb9a8d03SJohn Hurley 	/* Remove all groups. */
720bb9a8d03SJohn Hurley 	mutex_lock(&lag->lock);
721bb9a8d03SJohn Hurley 	list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
722bb9a8d03SJohn Hurley 		list_del(&entry->list);
723bb9a8d03SJohn Hurley 		kfree(entry);
724bb9a8d03SJohn Hurley 	}
725bb9a8d03SJohn Hurley 	mutex_unlock(&lag->lock);
726bb9a8d03SJohn Hurley 	mutex_destroy(&lag->lock);
727bb9a8d03SJohn Hurley 	ida_destroy(&lag->ida_handle);
728bb9a8d03SJohn Hurley }
729