1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
3 
4 #include "main.h"
5 
6 /* LAG group config flags. */
7 #define NFP_FL_LAG_LAST			BIT(1)
8 #define NFP_FL_LAG_FIRST		BIT(2)
9 #define NFP_FL_LAG_DATA			BIT(3)
10 #define NFP_FL_LAG_XON			BIT(4)
11 #define NFP_FL_LAG_SYNC			BIT(5)
12 #define NFP_FL_LAG_SWITCH		BIT(6)
13 #define NFP_FL_LAG_RESET		BIT(7)
14 
15 /* LAG port state flags. */
16 #define NFP_PORT_LAG_LINK_UP		BIT(0)
17 #define NFP_PORT_LAG_TX_ENABLED		BIT(1)
18 #define NFP_PORT_LAG_CHANGED		BIT(2)
19 
20 enum nfp_fl_lag_batch {
21 	NFP_FL_LAG_BATCH_FIRST,
22 	NFP_FL_LAG_BATCH_MEMBER,
23 	NFP_FL_LAG_BATCH_FINISHED
24 };
25 
26 /**
27  * struct nfp_flower_cmsg_lag_config - control message payload for LAG config
28  * @ctrl_flags:	Configuration flags
29  * @reserved:	Reserved for future use
30  * @ttl:	Time to live of packet - host always sets to 0xff
31  * @pkt_number:	Config message packet number - increment for each message
32  * @batch_ver:	Batch version of messages - increment for each batch of messages
33  * @group_id:	Group ID applicable
34  * @group_inst:	Group instance number - increment when group is reused
35  * @members:	Array of 32-bit words listing all active group members
36  */
37 struct nfp_flower_cmsg_lag_config {
38 	u8 ctrl_flags;
39 	u8 reserved[2];
40 	u8 ttl;
41 	__be32 pkt_number;
42 	__be32 batch_ver;
43 	__be32 group_id;
44 	__be32 group_inst;
45 	__be32 members[];
46 };
47 
48 /**
49  * struct nfp_fl_lag_group - list entry for each LAG group
50  * @group_id:		Assigned group ID for host/kernel sync
51  * @group_inst:		Group instance in case of ID reuse
52  * @list:		List entry
53  * @master_ndev:	Group master Netdev
54  * @dirty:		Marked if the group needs synced to HW
55  * @offloaded:		Marked if the group is currently offloaded to NIC
56  * @to_remove:		Marked if the group should be removed from NIC
57  * @to_destroy:		Marked if the group should be removed from driver
58  * @slave_cnt:		Number of slaves in group
59  */
60 struct nfp_fl_lag_group {
61 	unsigned int group_id;
62 	u8 group_inst;
63 	struct list_head list;
64 	struct net_device *master_ndev;
65 	bool dirty;
66 	bool offloaded;
67 	bool to_remove;
68 	bool to_destroy;
69 	unsigned int slave_cnt;
70 };
71 
72 #define NFP_FL_LAG_PKT_NUMBER_MASK	GENMASK(30, 0)
73 #define NFP_FL_LAG_VERSION_MASK		GENMASK(22, 0)
74 #define NFP_FL_LAG_HOST_TTL		0xff
75 
76 /* Use this ID with zero members to ack a batch config */
77 #define NFP_FL_LAG_SYNC_ID		0
78 #define NFP_FL_LAG_GROUP_MIN		1 /* ID 0 reserved */
79 #define NFP_FL_LAG_GROUP_MAX		32 /* IDs 1 to 31 are valid */
80 
81 /* wait for more config */
82 #define NFP_FL_LAG_DELAY		(msecs_to_jiffies(2))
83 
84 #define NFP_FL_LAG_RETRANS_LIMIT	100 /* max retrans cmsgs to store */
85 
86 static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
87 {
88 	lag->pkt_num++;
89 	lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK;
90 
91 	return lag->pkt_num;
92 }
93 
94 static void nfp_fl_increment_version(struct nfp_fl_lag *lag)
95 {
96 	/* LSB is not considered by firmware so add 2 for each increment. */
97 	lag->batch_ver += 2;
98 	lag->batch_ver &= NFP_FL_LAG_VERSION_MASK;
99 
100 	/* Zero is reserved by firmware. */
101 	if (!lag->batch_ver)
102 		lag->batch_ver += 2;
103 }
104 
105 static struct nfp_fl_lag_group *
106 nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
107 {
108 	struct nfp_fl_lag_group *group;
109 	struct nfp_flower_priv *priv;
110 	int id;
111 
112 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
113 
114 	id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
115 			    NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
116 	if (id < 0) {
117 		nfp_flower_cmsg_warn(priv->app,
118 				     "No more bonding groups available\n");
119 		return ERR_PTR(id);
120 	}
121 
122 	group = kmalloc(sizeof(*group), GFP_KERNEL);
123 	if (!group) {
124 		ida_simple_remove(&lag->ida_handle, id);
125 		return ERR_PTR(-ENOMEM);
126 	}
127 
128 	group->group_id = id;
129 	group->master_ndev = master;
130 	group->dirty = true;
131 	group->offloaded = false;
132 	group->to_remove = false;
133 	group->to_destroy = false;
134 	group->slave_cnt = 0;
135 	group->group_inst = ++lag->global_inst;
136 	list_add_tail(&group->list, &lag->group_list);
137 
138 	return group;
139 }
140 
141 static struct nfp_fl_lag_group *
142 nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
143 					  struct net_device *master)
144 {
145 	struct nfp_fl_lag_group *entry;
146 
147 	if (!master)
148 		return NULL;
149 
150 	list_for_each_entry(entry, &lag->group_list, list)
151 		if (entry->master_ndev == master)
152 			return entry;
153 
154 	return NULL;
155 }
156 
157 int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
158 				       struct net_device *master,
159 				       struct nfp_fl_pre_lag *pre_act)
160 {
161 	struct nfp_flower_priv *priv = app->priv;
162 	struct nfp_fl_lag_group *group = NULL;
163 	__be32 temp_vers;
164 
165 	mutex_lock(&priv->nfp_lag.lock);
166 	group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
167 							  master);
168 	if (!group) {
169 		mutex_unlock(&priv->nfp_lag.lock);
170 		return -ENOENT;
171 	}
172 
173 	pre_act->group_id = cpu_to_be16(group->group_id);
174 	temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
175 				NFP_FL_PRE_LAG_VER_OFF);
176 	memcpy(pre_act->lag_version, &temp_vers, 3);
177 	pre_act->instance = group->group_inst;
178 	mutex_unlock(&priv->nfp_lag.lock);
179 
180 	return 0;
181 }
182 
183 int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
184 {
185 	struct nfp_flower_priv *priv = app->priv;
186 	struct nfp_fl_lag_group *group = NULL;
187 	int group_id = -ENOENT;
188 
189 	mutex_lock(&priv->nfp_lag.lock);
190 	group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
191 							  master);
192 	if (group)
193 		group_id = group->group_id;
194 	mutex_unlock(&priv->nfp_lag.lock);
195 
196 	return group_id;
197 }
198 
199 static int
200 nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
201 			struct net_device **active_members,
202 			unsigned int member_cnt, enum nfp_fl_lag_batch *batch)
203 {
204 	struct nfp_flower_cmsg_lag_config *cmsg_payload;
205 	struct nfp_flower_priv *priv;
206 	unsigned long int flags;
207 	unsigned int size, i;
208 	struct sk_buff *skb;
209 
210 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
211 	size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt;
212 	skb = nfp_flower_cmsg_alloc(priv->app, size,
213 				    NFP_FLOWER_CMSG_TYPE_LAG_CONFIG,
214 				    GFP_KERNEL);
215 	if (!skb)
216 		return -ENOMEM;
217 
218 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
219 	flags = 0;
220 
221 	/* Increment batch version for each new batch of config messages. */
222 	if (*batch == NFP_FL_LAG_BATCH_FIRST) {
223 		flags |= NFP_FL_LAG_FIRST;
224 		nfp_fl_increment_version(lag);
225 		*batch = NFP_FL_LAG_BATCH_MEMBER;
226 	}
227 
228 	/* If it is a reset msg then it is also the end of the batch. */
229 	if (lag->rst_cfg) {
230 		flags |= NFP_FL_LAG_RESET;
231 		*batch = NFP_FL_LAG_BATCH_FINISHED;
232 	}
233 
234 	/* To signal the end of a batch, both the switch and last flags are set
235 	 * and the the reserved SYNC group ID is used.
236 	 */
237 	if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
238 		flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
239 		lag->rst_cfg = false;
240 		cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID);
241 		cmsg_payload->group_inst = 0;
242 	} else {
243 		cmsg_payload->group_id = cpu_to_be32(group->group_id);
244 		cmsg_payload->group_inst = cpu_to_be32(group->group_inst);
245 	}
246 
247 	cmsg_payload->reserved[0] = 0;
248 	cmsg_payload->reserved[1] = 0;
249 	cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL;
250 	cmsg_payload->ctrl_flags = flags;
251 	cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver);
252 	cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag));
253 
254 	for (i = 0; i < member_cnt; i++)
255 		cmsg_payload->members[i] =
256 			cpu_to_be32(nfp_repr_get_port_id(active_members[i]));
257 
258 	nfp_ctrl_tx(priv->app->ctrl, skb);
259 	return 0;
260 }
261 
262 static void nfp_fl_lag_do_work(struct work_struct *work)
263 {
264 	enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
265 	struct nfp_fl_lag_group *entry, *storage;
266 	struct delayed_work *delayed_work;
267 	struct nfp_flower_priv *priv;
268 	struct nfp_fl_lag *lag;
269 	int err;
270 
271 	delayed_work = to_delayed_work(work);
272 	lag = container_of(delayed_work, struct nfp_fl_lag, work);
273 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
274 
275 	mutex_lock(&lag->lock);
276 	list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
277 		struct net_device *iter_netdev, **acti_netdevs;
278 		struct nfp_flower_repr_priv *repr_priv;
279 		int active_count = 0, slaves = 0;
280 		struct nfp_repr *repr;
281 		unsigned long *flags;
282 
283 		if (entry->to_remove) {
284 			/* Active count of 0 deletes group on hw. */
285 			err = nfp_fl_lag_config_group(lag, entry, NULL, 0,
286 						      &batch);
287 			if (!err) {
288 				entry->to_remove = false;
289 				entry->offloaded = false;
290 			} else {
291 				nfp_flower_cmsg_warn(priv->app,
292 						     "group delete failed\n");
293 				schedule_delayed_work(&lag->work,
294 						      NFP_FL_LAG_DELAY);
295 				continue;
296 			}
297 
298 			if (entry->to_destroy) {
299 				ida_simple_remove(&lag->ida_handle,
300 						  entry->group_id);
301 				list_del(&entry->list);
302 				kfree(entry);
303 			}
304 			continue;
305 		}
306 
307 		acti_netdevs = kmalloc_array(entry->slave_cnt,
308 					     sizeof(*acti_netdevs), GFP_KERNEL);
309 
310 		/* Include sanity check in the loop. It may be that a bond has
311 		 * changed between processing the last notification and the
312 		 * work queue triggering. If the number of slaves has changed
313 		 * or it now contains netdevs that cannot be offloaded, ignore
314 		 * the group until pending notifications are processed.
315 		 */
316 		rcu_read_lock();
317 		for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) {
318 			if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
319 				slaves = 0;
320 				break;
321 			}
322 
323 			repr = netdev_priv(iter_netdev);
324 
325 			if (repr->app != priv->app) {
326 				slaves = 0;
327 				break;
328 			}
329 
330 			slaves++;
331 			if (slaves > entry->slave_cnt)
332 				break;
333 
334 			/* Check the ports for state changes. */
335 			repr_priv = repr->app_priv;
336 			flags = &repr_priv->lag_port_flags;
337 
338 			if (*flags & NFP_PORT_LAG_CHANGED) {
339 				*flags &= ~NFP_PORT_LAG_CHANGED;
340 				entry->dirty = true;
341 			}
342 
343 			if ((*flags & NFP_PORT_LAG_TX_ENABLED) &&
344 			    (*flags & NFP_PORT_LAG_LINK_UP))
345 				acti_netdevs[active_count++] = iter_netdev;
346 		}
347 		rcu_read_unlock();
348 
349 		if (slaves != entry->slave_cnt || !entry->dirty) {
350 			kfree(acti_netdevs);
351 			continue;
352 		}
353 
354 		err = nfp_fl_lag_config_group(lag, entry, acti_netdevs,
355 					      active_count, &batch);
356 		if (!err) {
357 			entry->offloaded = true;
358 			entry->dirty = false;
359 		} else {
360 			nfp_flower_cmsg_warn(priv->app,
361 					     "group offload failed\n");
362 			schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
363 		}
364 
365 		kfree(acti_netdevs);
366 	}
367 
368 	/* End the config batch if at least one packet has been batched. */
369 	if (batch == NFP_FL_LAG_BATCH_MEMBER) {
370 		batch = NFP_FL_LAG_BATCH_FINISHED;
371 		err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
372 		if (err)
373 			nfp_flower_cmsg_warn(priv->app,
374 					     "group batch end cmsg failed\n");
375 	}
376 
377 	mutex_unlock(&lag->lock);
378 }
379 
380 static int
381 nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
382 {
383 	struct nfp_flower_cmsg_lag_config *cmsg_payload;
384 
385 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
386 	if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX)
387 		return -EINVAL;
388 
389 	/* Drop cmsg retrans if storage limit is exceeded to prevent
390 	 * overloading. If the fw notices that expected messages have not been
391 	 * received in a given time block, it will request a full resync.
392 	 */
393 	if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
394 		return -ENOSPC;
395 
396 	__skb_queue_tail(&lag->retrans_skbs, skb);
397 
398 	return 0;
399 }
400 
401 static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
402 {
403 	struct nfp_flower_priv *priv;
404 	struct sk_buff *skb;
405 
406 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
407 
408 	while ((skb = __skb_dequeue(&lag->retrans_skbs)))
409 		nfp_ctrl_tx(priv->app->ctrl, skb);
410 }
411 
412 bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
413 {
414 	struct nfp_flower_cmsg_lag_config *cmsg_payload;
415 	struct nfp_flower_priv *priv = app->priv;
416 	struct nfp_fl_lag_group *group_entry;
417 	unsigned long int flags;
418 	bool store_skb = false;
419 	int err;
420 
421 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
422 	flags = cmsg_payload->ctrl_flags;
423 
424 	/* Note the intentional fall through below. If DATA and XON are both
425 	 * set, the message will stored and sent again with the rest of the
426 	 * unprocessed messages list.
427 	 */
428 
429 	/* Store */
430 	if (flags & NFP_FL_LAG_DATA)
431 		if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb))
432 			store_skb = true;
433 
434 	/* Send stored */
435 	if (flags & NFP_FL_LAG_XON)
436 		nfp_fl_send_unprocessed(&priv->nfp_lag);
437 
438 	/* Resend all */
439 	if (flags & NFP_FL_LAG_SYNC) {
440 		/* To resend all config:
441 		 * 1) Clear all unprocessed messages
442 		 * 2) Mark all groups dirty
443 		 * 3) Reset NFP group config
444 		 * 4) Schedule a LAG config update
445 		 */
446 
447 		__skb_queue_purge(&priv->nfp_lag.retrans_skbs);
448 
449 		mutex_lock(&priv->nfp_lag.lock);
450 		list_for_each_entry(group_entry, &priv->nfp_lag.group_list,
451 				    list)
452 			group_entry->dirty = true;
453 
454 		err = nfp_flower_lag_reset(&priv->nfp_lag);
455 		if (err)
456 			nfp_flower_cmsg_warn(priv->app,
457 					     "mem err in group reset msg\n");
458 		mutex_unlock(&priv->nfp_lag.lock);
459 
460 		schedule_delayed_work(&priv->nfp_lag.work, 0);
461 	}
462 
463 	return store_skb;
464 }
465 
466 static void
467 nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
468 				 struct nfp_fl_lag_group *group)
469 {
470 	group->to_remove = true;
471 
472 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
473 }
474 
475 static int
476 nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
477 				 struct net_device *master)
478 {
479 	struct nfp_fl_lag_group *group;
480 
481 	mutex_lock(&lag->lock);
482 	group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
483 	if (!group) {
484 		mutex_unlock(&lag->lock);
485 		return -ENOENT;
486 	}
487 
488 	group->to_remove = true;
489 	group->to_destroy = true;
490 	mutex_unlock(&lag->lock);
491 
492 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
493 	return 0;
494 }
495 
496 static int
497 nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
498 			     struct netdev_notifier_changeupper_info *info)
499 {
500 	struct net_device *upper = info->upper_dev, *iter_netdev;
501 	struct netdev_lag_upper_info *lag_upper_info;
502 	struct nfp_fl_lag_group *group;
503 	struct nfp_flower_priv *priv;
504 	unsigned int slave_count = 0;
505 	bool can_offload = true;
506 	struct nfp_repr *repr;
507 
508 	if (!netif_is_lag_master(upper))
509 		return 0;
510 
511 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
512 
513 	rcu_read_lock();
514 	for_each_netdev_in_bond_rcu(upper, iter_netdev) {
515 		if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
516 			can_offload = false;
517 			break;
518 		}
519 		repr = netdev_priv(iter_netdev);
520 
521 		/* Ensure all ports are created by the same app/on same card. */
522 		if (repr->app != priv->app) {
523 			can_offload = false;
524 			break;
525 		}
526 
527 		slave_count++;
528 	}
529 	rcu_read_unlock();
530 
531 	lag_upper_info = info->upper_info;
532 
533 	/* Firmware supports active/backup and L3/L4 hash bonds. */
534 	if (lag_upper_info &&
535 	    lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
536 	    (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
537 	     (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
538 	      lag_upper_info->hash_type != NETDEV_LAG_HASH_E34 &&
539 	      lag_upper_info->hash_type != NETDEV_LAG_HASH_UNKNOWN))) {
540 		can_offload = false;
541 		nfp_flower_cmsg_warn(priv->app,
542 				     "Unable to offload tx_type %u hash %u\n",
543 				     lag_upper_info->tx_type,
544 				     lag_upper_info->hash_type);
545 	}
546 
547 	mutex_lock(&lag->lock);
548 	group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper);
549 
550 	if (slave_count == 0 || !can_offload) {
551 		/* Cannot offload the group - remove if previously offloaded. */
552 		if (group && group->offloaded)
553 			nfp_fl_lag_schedule_group_remove(lag, group);
554 
555 		mutex_unlock(&lag->lock);
556 		return 0;
557 	}
558 
559 	if (!group) {
560 		group = nfp_fl_lag_group_create(lag, upper);
561 		if (IS_ERR(group)) {
562 			mutex_unlock(&lag->lock);
563 			return PTR_ERR(group);
564 		}
565 	}
566 
567 	group->dirty = true;
568 	group->slave_cnt = slave_count;
569 
570 	/* Group may have been on queue for removal but is now offfloable. */
571 	group->to_remove = false;
572 	mutex_unlock(&lag->lock);
573 
574 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
575 	return 0;
576 }
577 
578 static int
579 nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
580 			  struct netdev_notifier_changelowerstate_info *info)
581 {
582 	struct netdev_lag_lower_state_info *lag_lower_info;
583 	struct nfp_flower_repr_priv *repr_priv;
584 	struct nfp_flower_priv *priv;
585 	struct nfp_repr *repr;
586 	unsigned long *flags;
587 
588 	if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
589 		return 0;
590 
591 	lag_lower_info = info->lower_state_info;
592 	if (!lag_lower_info)
593 		return 0;
594 
595 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
596 	repr = netdev_priv(netdev);
597 
598 	/* Verify that the repr is associated with this app. */
599 	if (repr->app != priv->app)
600 		return 0;
601 
602 	repr_priv = repr->app_priv;
603 	flags = &repr_priv->lag_port_flags;
604 
605 	mutex_lock(&lag->lock);
606 	if (lag_lower_info->link_up)
607 		*flags |= NFP_PORT_LAG_LINK_UP;
608 	else
609 		*flags &= ~NFP_PORT_LAG_LINK_UP;
610 
611 	if (lag_lower_info->tx_enabled)
612 		*flags |= NFP_PORT_LAG_TX_ENABLED;
613 	else
614 		*flags &= ~NFP_PORT_LAG_TX_ENABLED;
615 
616 	*flags |= NFP_PORT_LAG_CHANGED;
617 	mutex_unlock(&lag->lock);
618 
619 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
620 	return 0;
621 }
622 
623 static int
624 nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
625 			void *ptr)
626 {
627 	struct net_device *netdev;
628 	struct nfp_fl_lag *lag;
629 	int err;
630 
631 	netdev = netdev_notifier_info_to_dev(ptr);
632 	lag = container_of(nb, struct nfp_fl_lag, lag_nb);
633 
634 	switch (event) {
635 	case NETDEV_CHANGEUPPER:
636 		err = nfp_fl_lag_changeupper_event(lag, ptr);
637 		if (err)
638 			return NOTIFY_BAD;
639 		return NOTIFY_OK;
640 	case NETDEV_CHANGELOWERSTATE:
641 		err = nfp_fl_lag_changels_event(lag, netdev, ptr);
642 		if (err)
643 			return NOTIFY_BAD;
644 		return NOTIFY_OK;
645 	case NETDEV_UNREGISTER:
646 		if (netif_is_bond_master(netdev)) {
647 			err = nfp_fl_lag_schedule_group_delete(lag, netdev);
648 			if (err)
649 				return NOTIFY_BAD;
650 			return NOTIFY_OK;
651 		}
652 	}
653 
654 	return NOTIFY_DONE;
655 }
656 
657 int nfp_flower_lag_reset(struct nfp_fl_lag *lag)
658 {
659 	enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
660 
661 	lag->rst_cfg = true;
662 	return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
663 }
664 
665 void nfp_flower_lag_init(struct nfp_fl_lag *lag)
666 {
667 	INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
668 	INIT_LIST_HEAD(&lag->group_list);
669 	mutex_init(&lag->lock);
670 	ida_init(&lag->ida_handle);
671 
672 	__skb_queue_head_init(&lag->retrans_skbs);
673 
674 	/* 0 is a reserved batch version so increment to first valid value. */
675 	nfp_fl_increment_version(lag);
676 
677 	lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event;
678 }
679 
680 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
681 {
682 	struct nfp_fl_lag_group *entry, *storage;
683 
684 	cancel_delayed_work_sync(&lag->work);
685 
686 	__skb_queue_purge(&lag->retrans_skbs);
687 
688 	/* Remove all groups. */
689 	mutex_lock(&lag->lock);
690 	list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
691 		list_del(&entry->list);
692 		kfree(entry);
693 	}
694 	mutex_unlock(&lag->lock);
695 	mutex_destroy(&lag->lock);
696 	ida_destroy(&lag->ida_handle);
697 }
698