1 /*
2  * Copyright (C) 2018 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include "main.h"
35 
36 /* LAG group config flags. */
37 #define NFP_FL_LAG_LAST			BIT(1)
38 #define NFP_FL_LAG_FIRST		BIT(2)
39 #define NFP_FL_LAG_DATA			BIT(3)
40 #define NFP_FL_LAG_XON			BIT(4)
41 #define NFP_FL_LAG_SYNC			BIT(5)
42 #define NFP_FL_LAG_SWITCH		BIT(6)
43 #define NFP_FL_LAG_RESET		BIT(7)
44 
45 /* LAG port state flags. */
46 #define NFP_PORT_LAG_LINK_UP		BIT(0)
47 #define NFP_PORT_LAG_TX_ENABLED		BIT(1)
48 #define NFP_PORT_LAG_CHANGED		BIT(2)
49 
50 enum nfp_fl_lag_batch {
51 	NFP_FL_LAG_BATCH_FIRST,
52 	NFP_FL_LAG_BATCH_MEMBER,
53 	NFP_FL_LAG_BATCH_FINISHED
54 };
55 
56 /**
57  * struct nfp_flower_cmsg_lag_config - control message payload for LAG config
58  * @ctrl_flags:	Configuration flags
59  * @reserved:	Reserved for future use
60  * @ttl:	Time to live of packet - host always sets to 0xff
61  * @pkt_number:	Config message packet number - increment for each message
62  * @batch_ver:	Batch version of messages - increment for each batch of messages
63  * @group_id:	Group ID applicable
64  * @group_inst:	Group instance number - increment when group is reused
65  * @members:	Array of 32-bit words listing all active group members
66  */
67 struct nfp_flower_cmsg_lag_config {
68 	u8 ctrl_flags;
69 	u8 reserved[2];
70 	u8 ttl;
71 	__be32 pkt_number;
72 	__be32 batch_ver;
73 	__be32 group_id;
74 	__be32 group_inst;
75 	__be32 members[];
76 };
77 
78 /**
79  * struct nfp_fl_lag_group - list entry for each LAG group
80  * @group_id:		Assigned group ID for host/kernel sync
81  * @group_inst:		Group instance in case of ID reuse
82  * @list:		List entry
83  * @master_ndev:	Group master Netdev
84  * @dirty:		Marked if the group needs synced to HW
85  * @offloaded:		Marked if the group is currently offloaded to NIC
86  * @to_remove:		Marked if the group should be removed from NIC
87  * @to_destroy:		Marked if the group should be removed from driver
88  * @slave_cnt:		Number of slaves in group
89  */
90 struct nfp_fl_lag_group {
91 	unsigned int group_id;
92 	u8 group_inst;
93 	struct list_head list;
94 	struct net_device *master_ndev;
95 	bool dirty;
96 	bool offloaded;
97 	bool to_remove;
98 	bool to_destroy;
99 	unsigned int slave_cnt;
100 };
101 
102 #define NFP_FL_LAG_PKT_NUMBER_MASK	GENMASK(30, 0)
103 #define NFP_FL_LAG_VERSION_MASK		GENMASK(22, 0)
104 #define NFP_FL_LAG_HOST_TTL		0xff
105 
106 /* Use this ID with zero members to ack a batch config */
107 #define NFP_FL_LAG_SYNC_ID		0
108 #define NFP_FL_LAG_GROUP_MIN		1 /* ID 0 reserved */
109 #define NFP_FL_LAG_GROUP_MAX		32 /* IDs 1 to 31 are valid */
110 
111 /* wait for more config */
112 #define NFP_FL_LAG_DELAY		(msecs_to_jiffies(2))
113 
114 #define NFP_FL_LAG_RETRANS_LIMIT	100 /* max retrans cmsgs to store */
115 
116 static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag)
117 {
118 	lag->pkt_num++;
119 	lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK;
120 
121 	return lag->pkt_num;
122 }
123 
124 static void nfp_fl_increment_version(struct nfp_fl_lag *lag)
125 {
126 	/* LSB is not considered by firmware so add 2 for each increment. */
127 	lag->batch_ver += 2;
128 	lag->batch_ver &= NFP_FL_LAG_VERSION_MASK;
129 
130 	/* Zero is reserved by firmware. */
131 	if (!lag->batch_ver)
132 		lag->batch_ver += 2;
133 }
134 
135 static struct nfp_fl_lag_group *
136 nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master)
137 {
138 	struct nfp_fl_lag_group *group;
139 	struct nfp_flower_priv *priv;
140 	int id;
141 
142 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
143 
144 	id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN,
145 			    NFP_FL_LAG_GROUP_MAX, GFP_KERNEL);
146 	if (id < 0) {
147 		nfp_flower_cmsg_warn(priv->app,
148 				     "No more bonding groups available\n");
149 		return ERR_PTR(id);
150 	}
151 
152 	group = kmalloc(sizeof(*group), GFP_KERNEL);
153 	if (!group) {
154 		ida_simple_remove(&lag->ida_handle, id);
155 		return ERR_PTR(-ENOMEM);
156 	}
157 
158 	group->group_id = id;
159 	group->master_ndev = master;
160 	group->dirty = true;
161 	group->offloaded = false;
162 	group->to_remove = false;
163 	group->to_destroy = false;
164 	group->slave_cnt = 0;
165 	group->group_inst = ++lag->global_inst;
166 	list_add_tail(&group->list, &lag->group_list);
167 
168 	return group;
169 }
170 
171 static struct nfp_fl_lag_group *
172 nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag,
173 					  struct net_device *master)
174 {
175 	struct nfp_fl_lag_group *entry;
176 
177 	if (!master)
178 		return NULL;
179 
180 	list_for_each_entry(entry, &lag->group_list, list)
181 		if (entry->master_ndev == master)
182 			return entry;
183 
184 	return NULL;
185 }
186 
187 int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
188 				       struct net_device *master,
189 				       struct nfp_fl_pre_lag *pre_act)
190 {
191 	struct nfp_flower_priv *priv = app->priv;
192 	struct nfp_fl_lag_group *group = NULL;
193 	__be32 temp_vers;
194 
195 	mutex_lock(&priv->nfp_lag.lock);
196 	group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
197 							  master);
198 	if (!group) {
199 		mutex_unlock(&priv->nfp_lag.lock);
200 		return -ENOENT;
201 	}
202 
203 	pre_act->group_id = cpu_to_be16(group->group_id);
204 	temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver <<
205 				NFP_FL_PRE_LAG_VER_OFF);
206 	memcpy(pre_act->lag_version, &temp_vers, 3);
207 	pre_act->instance = group->group_inst;
208 	mutex_unlock(&priv->nfp_lag.lock);
209 
210 	return 0;
211 }
212 
213 int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master)
214 {
215 	struct nfp_flower_priv *priv = app->priv;
216 	struct nfp_fl_lag_group *group = NULL;
217 	int group_id = -ENOENT;
218 
219 	mutex_lock(&priv->nfp_lag.lock);
220 	group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag,
221 							  master);
222 	if (group)
223 		group_id = group->group_id;
224 	mutex_unlock(&priv->nfp_lag.lock);
225 
226 	return group_id;
227 }
228 
229 static int
230 nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group,
231 			struct net_device **active_members,
232 			unsigned int member_cnt, enum nfp_fl_lag_batch *batch)
233 {
234 	struct nfp_flower_cmsg_lag_config *cmsg_payload;
235 	struct nfp_flower_priv *priv;
236 	unsigned long int flags;
237 	unsigned int size, i;
238 	struct sk_buff *skb;
239 
240 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
241 	size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt;
242 	skb = nfp_flower_cmsg_alloc(priv->app, size,
243 				    NFP_FLOWER_CMSG_TYPE_LAG_CONFIG,
244 				    GFP_KERNEL);
245 	if (!skb)
246 		return -ENOMEM;
247 
248 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
249 	flags = 0;
250 
251 	/* Increment batch version for each new batch of config messages. */
252 	if (*batch == NFP_FL_LAG_BATCH_FIRST) {
253 		flags |= NFP_FL_LAG_FIRST;
254 		nfp_fl_increment_version(lag);
255 		*batch = NFP_FL_LAG_BATCH_MEMBER;
256 	}
257 
258 	/* If it is a reset msg then it is also the end of the batch. */
259 	if (lag->rst_cfg) {
260 		flags |= NFP_FL_LAG_RESET;
261 		*batch = NFP_FL_LAG_BATCH_FINISHED;
262 	}
263 
264 	/* To signal the end of a batch, both the switch and last flags are set
265 	 * and the the reserved SYNC group ID is used.
266 	 */
267 	if (*batch == NFP_FL_LAG_BATCH_FINISHED) {
268 		flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST;
269 		lag->rst_cfg = false;
270 		cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID);
271 		cmsg_payload->group_inst = 0;
272 	} else {
273 		cmsg_payload->group_id = cpu_to_be32(group->group_id);
274 		cmsg_payload->group_inst = cpu_to_be32(group->group_inst);
275 	}
276 
277 	cmsg_payload->reserved[0] = 0;
278 	cmsg_payload->reserved[1] = 0;
279 	cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL;
280 	cmsg_payload->ctrl_flags = flags;
281 	cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver);
282 	cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag));
283 
284 	for (i = 0; i < member_cnt; i++)
285 		cmsg_payload->members[i] =
286 			cpu_to_be32(nfp_repr_get_port_id(active_members[i]));
287 
288 	nfp_ctrl_tx(priv->app->ctrl, skb);
289 	return 0;
290 }
291 
292 static void nfp_fl_lag_do_work(struct work_struct *work)
293 {
294 	enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
295 	struct nfp_fl_lag_group *entry, *storage;
296 	struct delayed_work *delayed_work;
297 	struct nfp_flower_priv *priv;
298 	struct nfp_fl_lag *lag;
299 	int err;
300 
301 	delayed_work = to_delayed_work(work);
302 	lag = container_of(delayed_work, struct nfp_fl_lag, work);
303 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
304 
305 	mutex_lock(&lag->lock);
306 	list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
307 		struct net_device *iter_netdev, **acti_netdevs;
308 		struct nfp_flower_repr_priv *repr_priv;
309 		int active_count = 0, slaves = 0;
310 		struct nfp_repr *repr;
311 		unsigned long *flags;
312 
313 		if (entry->to_remove) {
314 			/* Active count of 0 deletes group on hw. */
315 			err = nfp_fl_lag_config_group(lag, entry, NULL, 0,
316 						      &batch);
317 			if (!err) {
318 				entry->to_remove = false;
319 				entry->offloaded = false;
320 			} else {
321 				nfp_flower_cmsg_warn(priv->app,
322 						     "group delete failed\n");
323 				schedule_delayed_work(&lag->work,
324 						      NFP_FL_LAG_DELAY);
325 				continue;
326 			}
327 
328 			if (entry->to_destroy) {
329 				ida_simple_remove(&lag->ida_handle,
330 						  entry->group_id);
331 				list_del(&entry->list);
332 				kfree(entry);
333 			}
334 			continue;
335 		}
336 
337 		acti_netdevs = kmalloc_array(entry->slave_cnt,
338 					     sizeof(*acti_netdevs), GFP_KERNEL);
339 
340 		/* Include sanity check in the loop. It may be that a bond has
341 		 * changed between processing the last notification and the
342 		 * work queue triggering. If the number of slaves has changed
343 		 * or it now contains netdevs that cannot be offloaded, ignore
344 		 * the group until pending notifications are processed.
345 		 */
346 		rcu_read_lock();
347 		for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) {
348 			if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
349 				slaves = 0;
350 				break;
351 			}
352 
353 			repr = netdev_priv(iter_netdev);
354 
355 			if (repr->app != priv->app) {
356 				slaves = 0;
357 				break;
358 			}
359 
360 			slaves++;
361 			if (slaves > entry->slave_cnt)
362 				break;
363 
364 			/* Check the ports for state changes. */
365 			repr_priv = repr->app_priv;
366 			flags = &repr_priv->lag_port_flags;
367 
368 			if (*flags & NFP_PORT_LAG_CHANGED) {
369 				*flags &= ~NFP_PORT_LAG_CHANGED;
370 				entry->dirty = true;
371 			}
372 
373 			if ((*flags & NFP_PORT_LAG_TX_ENABLED) &&
374 			    (*flags & NFP_PORT_LAG_LINK_UP))
375 				acti_netdevs[active_count++] = iter_netdev;
376 		}
377 		rcu_read_unlock();
378 
379 		if (slaves != entry->slave_cnt || !entry->dirty) {
380 			kfree(acti_netdevs);
381 			continue;
382 		}
383 
384 		err = nfp_fl_lag_config_group(lag, entry, acti_netdevs,
385 					      active_count, &batch);
386 		if (!err) {
387 			entry->offloaded = true;
388 			entry->dirty = false;
389 		} else {
390 			nfp_flower_cmsg_warn(priv->app,
391 					     "group offload failed\n");
392 			schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
393 		}
394 
395 		kfree(acti_netdevs);
396 	}
397 
398 	/* End the config batch if at least one packet has been batched. */
399 	if (batch == NFP_FL_LAG_BATCH_MEMBER) {
400 		batch = NFP_FL_LAG_BATCH_FINISHED;
401 		err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
402 		if (err)
403 			nfp_flower_cmsg_warn(priv->app,
404 					     "group batch end cmsg failed\n");
405 	}
406 
407 	mutex_unlock(&lag->lock);
408 }
409 
410 static int
411 nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb)
412 {
413 	struct nfp_flower_cmsg_lag_config *cmsg_payload;
414 
415 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
416 	if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX)
417 		return -EINVAL;
418 
419 	/* Drop cmsg retrans if storage limit is exceeded to prevent
420 	 * overloading. If the fw notices that expected messages have not been
421 	 * received in a given time block, it will request a full resync.
422 	 */
423 	if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT)
424 		return -ENOSPC;
425 
426 	__skb_queue_tail(&lag->retrans_skbs, skb);
427 
428 	return 0;
429 }
430 
431 static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag)
432 {
433 	struct nfp_flower_priv *priv;
434 	struct sk_buff *skb;
435 
436 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
437 
438 	while ((skb = __skb_dequeue(&lag->retrans_skbs)))
439 		nfp_ctrl_tx(priv->app->ctrl, skb);
440 }
441 
442 bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb)
443 {
444 	struct nfp_flower_cmsg_lag_config *cmsg_payload;
445 	struct nfp_flower_priv *priv = app->priv;
446 	struct nfp_fl_lag_group *group_entry;
447 	unsigned long int flags;
448 	bool store_skb = false;
449 	int err;
450 
451 	cmsg_payload = nfp_flower_cmsg_get_data(skb);
452 	flags = cmsg_payload->ctrl_flags;
453 
454 	/* Note the intentional fall through below. If DATA and XON are both
455 	 * set, the message will stored and sent again with the rest of the
456 	 * unprocessed messages list.
457 	 */
458 
459 	/* Store */
460 	if (flags & NFP_FL_LAG_DATA)
461 		if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb))
462 			store_skb = true;
463 
464 	/* Send stored */
465 	if (flags & NFP_FL_LAG_XON)
466 		nfp_fl_send_unprocessed(&priv->nfp_lag);
467 
468 	/* Resend all */
469 	if (flags & NFP_FL_LAG_SYNC) {
470 		/* To resend all config:
471 		 * 1) Clear all unprocessed messages
472 		 * 2) Mark all groups dirty
473 		 * 3) Reset NFP group config
474 		 * 4) Schedule a LAG config update
475 		 */
476 
477 		__skb_queue_purge(&priv->nfp_lag.retrans_skbs);
478 
479 		mutex_lock(&priv->nfp_lag.lock);
480 		list_for_each_entry(group_entry, &priv->nfp_lag.group_list,
481 				    list)
482 			group_entry->dirty = true;
483 
484 		err = nfp_flower_lag_reset(&priv->nfp_lag);
485 		if (err)
486 			nfp_flower_cmsg_warn(priv->app,
487 					     "mem err in group reset msg\n");
488 		mutex_unlock(&priv->nfp_lag.lock);
489 
490 		schedule_delayed_work(&priv->nfp_lag.work, 0);
491 	}
492 
493 	return store_skb;
494 }
495 
496 static void
497 nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
498 				 struct nfp_fl_lag_group *group)
499 {
500 	group->to_remove = true;
501 
502 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
503 }
504 
505 static int
506 nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
507 				 struct net_device *master)
508 {
509 	struct nfp_fl_lag_group *group;
510 
511 	mutex_lock(&lag->lock);
512 	group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
513 	if (!group) {
514 		mutex_unlock(&lag->lock);
515 		return -ENOENT;
516 	}
517 
518 	group->to_remove = true;
519 	group->to_destroy = true;
520 	mutex_unlock(&lag->lock);
521 
522 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
523 	return 0;
524 }
525 
526 static int
527 nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
528 			     struct netdev_notifier_changeupper_info *info)
529 {
530 	struct net_device *upper = info->upper_dev, *iter_netdev;
531 	struct netdev_lag_upper_info *lag_upper_info;
532 	struct nfp_fl_lag_group *group;
533 	struct nfp_flower_priv *priv;
534 	unsigned int slave_count = 0;
535 	bool can_offload = true;
536 	struct nfp_repr *repr;
537 
538 	if (!netif_is_lag_master(upper))
539 		return 0;
540 
541 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
542 
543 	rcu_read_lock();
544 	for_each_netdev_in_bond_rcu(upper, iter_netdev) {
545 		if (!nfp_netdev_is_nfp_repr(iter_netdev)) {
546 			can_offload = false;
547 			break;
548 		}
549 		repr = netdev_priv(iter_netdev);
550 
551 		/* Ensure all ports are created by the same app/on same card. */
552 		if (repr->app != priv->app) {
553 			can_offload = false;
554 			break;
555 		}
556 
557 		slave_count++;
558 	}
559 	rcu_read_unlock();
560 
561 	lag_upper_info = info->upper_info;
562 
563 	/* Firmware supports active/backup and L3/L4 hash bonds. */
564 	if (lag_upper_info &&
565 	    lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
566 	    (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH ||
567 	    (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 &&
568 	    lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) {
569 		can_offload = false;
570 		nfp_flower_cmsg_warn(priv->app,
571 				     "Unable to offload tx_type %u hash %u\n",
572 				     lag_upper_info->tx_type,
573 				     lag_upper_info->hash_type);
574 	}
575 
576 	mutex_lock(&lag->lock);
577 	group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper);
578 
579 	if (slave_count == 0 || !can_offload) {
580 		/* Cannot offload the group - remove if previously offloaded. */
581 		if (group && group->offloaded)
582 			nfp_fl_lag_schedule_group_remove(lag, group);
583 
584 		mutex_unlock(&lag->lock);
585 		return 0;
586 	}
587 
588 	if (!group) {
589 		group = nfp_fl_lag_group_create(lag, upper);
590 		if (IS_ERR(group)) {
591 			mutex_unlock(&lag->lock);
592 			return PTR_ERR(group);
593 		}
594 	}
595 
596 	group->dirty = true;
597 	group->slave_cnt = slave_count;
598 
599 	/* Group may have been on queue for removal but is now offfloable. */
600 	group->to_remove = false;
601 	mutex_unlock(&lag->lock);
602 
603 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
604 	return 0;
605 }
606 
607 static int
608 nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
609 			  struct netdev_notifier_changelowerstate_info *info)
610 {
611 	struct netdev_lag_lower_state_info *lag_lower_info;
612 	struct nfp_flower_repr_priv *repr_priv;
613 	struct nfp_flower_priv *priv;
614 	struct nfp_repr *repr;
615 	unsigned long *flags;
616 
617 	if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
618 		return 0;
619 
620 	lag_lower_info = info->lower_state_info;
621 	if (!lag_lower_info)
622 		return 0;
623 
624 	priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
625 	repr = netdev_priv(netdev);
626 
627 	/* Verify that the repr is associated with this app. */
628 	if (repr->app != priv->app)
629 		return 0;
630 
631 	repr_priv = repr->app_priv;
632 	flags = &repr_priv->lag_port_flags;
633 
634 	mutex_lock(&lag->lock);
635 	if (lag_lower_info->link_up)
636 		*flags |= NFP_PORT_LAG_LINK_UP;
637 	else
638 		*flags &= ~NFP_PORT_LAG_LINK_UP;
639 
640 	if (lag_lower_info->tx_enabled)
641 		*flags |= NFP_PORT_LAG_TX_ENABLED;
642 	else
643 		*flags &= ~NFP_PORT_LAG_TX_ENABLED;
644 
645 	*flags |= NFP_PORT_LAG_CHANGED;
646 	mutex_unlock(&lag->lock);
647 
648 	schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
649 	return 0;
650 }
651 
652 static int
653 nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
654 			void *ptr)
655 {
656 	struct net_device *netdev;
657 	struct nfp_fl_lag *lag;
658 	int err;
659 
660 	netdev = netdev_notifier_info_to_dev(ptr);
661 	lag = container_of(nb, struct nfp_fl_lag, lag_nb);
662 
663 	switch (event) {
664 	case NETDEV_CHANGEUPPER:
665 		err = nfp_fl_lag_changeupper_event(lag, ptr);
666 		if (err)
667 			return NOTIFY_BAD;
668 		return NOTIFY_OK;
669 	case NETDEV_CHANGELOWERSTATE:
670 		err = nfp_fl_lag_changels_event(lag, netdev, ptr);
671 		if (err)
672 			return NOTIFY_BAD;
673 		return NOTIFY_OK;
674 	case NETDEV_UNREGISTER:
675 		if (netif_is_bond_master(netdev)) {
676 			err = nfp_fl_lag_schedule_group_delete(lag, netdev);
677 			if (err)
678 				return NOTIFY_BAD;
679 			return NOTIFY_OK;
680 		}
681 	}
682 
683 	return NOTIFY_DONE;
684 }
685 
686 int nfp_flower_lag_reset(struct nfp_fl_lag *lag)
687 {
688 	enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST;
689 
690 	lag->rst_cfg = true;
691 	return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch);
692 }
693 
694 void nfp_flower_lag_init(struct nfp_fl_lag *lag)
695 {
696 	INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work);
697 	INIT_LIST_HEAD(&lag->group_list);
698 	mutex_init(&lag->lock);
699 	ida_init(&lag->ida_handle);
700 
701 	__skb_queue_head_init(&lag->retrans_skbs);
702 
703 	/* 0 is a reserved batch version so increment to first valid value. */
704 	nfp_fl_increment_version(lag);
705 
706 	lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event;
707 }
708 
709 void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
710 {
711 	struct nfp_fl_lag_group *entry, *storage;
712 
713 	cancel_delayed_work_sync(&lag->work);
714 
715 	__skb_queue_purge(&lag->retrans_skbs);
716 
717 	/* Remove all groups. */
718 	mutex_lock(&lag->lock);
719 	list_for_each_entry_safe(entry, storage, &lag->group_list, list) {
720 		list_del(&entry->list);
721 		kfree(entry);
722 	}
723 	mutex_unlock(&lag->lock);
724 	mutex_destroy(&lag->lock);
725 	ida_destroy(&lag->ida_handle);
726 }
727