1df006dd4SDave Ertman // SPDX-License-Identifier: GPL-2.0
2df006dd4SDave Ertman /* Copyright (C) 2018-2021, Intel Corporation. */
3df006dd4SDave Ertman 
4df006dd4SDave Ertman /* Link Aggregation code */
5df006dd4SDave Ertman 
6df006dd4SDave Ertman #include "ice.h"
7bb52f42aSDave Ertman #include "ice_lib.h"
8df006dd4SDave Ertman #include "ice_lag.h"
9df006dd4SDave Ertman 
10bb52f42aSDave Ertman #define ICE_LAG_RES_SHARED	BIT(14)
11bb52f42aSDave Ertman #define ICE_LAG_RES_VALID	BIT(15)
12bb52f42aSDave Ertman 
131e0f9881SDave Ertman #define LACP_TRAIN_PKT_LEN		16
141e0f9881SDave Ertman static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
151e0f9881SDave Ertman 						       0, 0, 0, 0, 0, 0,
161e0f9881SDave Ertman 						       0x88, 0x09, 0, 0 };
171e0f9881SDave Ertman 
18ec5a6c5fSDave Ertman #define ICE_RECIPE_LEN			64
19ec5a6c5fSDave Ertman static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = {
20ec5a6c5fSDave Ertman 	0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
21ec5a6c5fSDave Ertman 	0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0,
22ec5a6c5fSDave Ertman 	0, 0, 0, 0, 0, 0, 0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0,
23ec5a6c5fSDave Ertman 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
24ec5a6c5fSDave Ertman 
25df006dd4SDave Ertman /**
26df006dd4SDave Ertman  * ice_lag_set_primary - set PF LAG state as Primary
27df006dd4SDave Ertman  * @lag: LAG info struct
28df006dd4SDave Ertman  */
ice_lag_set_primary(struct ice_lag * lag)29df006dd4SDave Ertman static void ice_lag_set_primary(struct ice_lag *lag)
30df006dd4SDave Ertman {
31df006dd4SDave Ertman 	struct ice_pf *pf = lag->pf;
32df006dd4SDave Ertman 
33df006dd4SDave Ertman 	if (!pf)
34df006dd4SDave Ertman 		return;
35df006dd4SDave Ertman 
36df006dd4SDave Ertman 	if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_BACKUP) {
37df006dd4SDave Ertman 		dev_warn(ice_pf_to_dev(pf), "%s: Attempt to be Primary, but incompatible state.\n",
38df006dd4SDave Ertman 			 netdev_name(lag->netdev));
39df006dd4SDave Ertman 		return;
40df006dd4SDave Ertman 	}
41df006dd4SDave Ertman 
42df006dd4SDave Ertman 	lag->role = ICE_LAG_PRIMARY;
43df006dd4SDave Ertman }
44df006dd4SDave Ertman 
45df006dd4SDave Ertman /**
46df006dd4SDave Ertman  * ice_lag_set_backup - set PF LAG state to Backup
47df006dd4SDave Ertman  * @lag: LAG info struct
48df006dd4SDave Ertman  */
ice_lag_set_backup(struct ice_lag * lag)49df006dd4SDave Ertman static void ice_lag_set_backup(struct ice_lag *lag)
50df006dd4SDave Ertman {
51df006dd4SDave Ertman 	struct ice_pf *pf = lag->pf;
52df006dd4SDave Ertman 
53df006dd4SDave Ertman 	if (!pf)
54df006dd4SDave Ertman 		return;
55df006dd4SDave Ertman 
56df006dd4SDave Ertman 	if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_PRIMARY) {
57df006dd4SDave Ertman 		dev_dbg(ice_pf_to_dev(pf), "%s: Attempt to be Backup, but incompatible state\n",
58df006dd4SDave Ertman 			netdev_name(lag->netdev));
59df006dd4SDave Ertman 		return;
60df006dd4SDave Ertman 	}
61df006dd4SDave Ertman 
62df006dd4SDave Ertman 	lag->role = ICE_LAG_BACKUP;
63df006dd4SDave Ertman }
64df006dd4SDave Ertman 
65df006dd4SDave Ertman /**
661e0f9881SDave Ertman  * netif_is_same_ice - determine if netdev is on the same ice NIC as local PF
671e0f9881SDave Ertman  * @pf: local PF struct
681e0f9881SDave Ertman  * @netdev: netdev we are evaluating
691e0f9881SDave Ertman  */
netif_is_same_ice(struct ice_pf * pf,struct net_device * netdev)701e0f9881SDave Ertman static bool netif_is_same_ice(struct ice_pf *pf, struct net_device *netdev)
711e0f9881SDave Ertman {
721e0f9881SDave Ertman 	struct ice_netdev_priv *np;
731e0f9881SDave Ertman 	struct ice_pf *test_pf;
741e0f9881SDave Ertman 	struct ice_vsi *vsi;
751e0f9881SDave Ertman 
761e0f9881SDave Ertman 	if (!netif_is_ice(netdev))
771e0f9881SDave Ertman 		return false;
781e0f9881SDave Ertman 
791e0f9881SDave Ertman 	np = netdev_priv(netdev);
801e0f9881SDave Ertman 	if (!np)
811e0f9881SDave Ertman 		return false;
821e0f9881SDave Ertman 
831e0f9881SDave Ertman 	vsi = np->vsi;
841e0f9881SDave Ertman 	if (!vsi)
851e0f9881SDave Ertman 		return false;
861e0f9881SDave Ertman 
871e0f9881SDave Ertman 	test_pf = vsi->back;
881e0f9881SDave Ertman 	if (!test_pf)
891e0f9881SDave Ertman 		return false;
901e0f9881SDave Ertman 
911e0f9881SDave Ertman 	if (pf->pdev->bus != test_pf->pdev->bus ||
921e0f9881SDave Ertman 	    pf->pdev->slot != test_pf->pdev->slot)
931e0f9881SDave Ertman 		return false;
941e0f9881SDave Ertman 
951e0f9881SDave Ertman 	return true;
961e0f9881SDave Ertman }
971e0f9881SDave Ertman 
981e0f9881SDave Ertman /**
99ec5a6c5fSDave Ertman  * ice_netdev_to_lag - return pointer to associated lag struct from netdev
100ec5a6c5fSDave Ertman  * @netdev: pointer to net_device struct to query
101ec5a6c5fSDave Ertman  */
ice_netdev_to_lag(struct net_device * netdev)102ec5a6c5fSDave Ertman static struct ice_lag *ice_netdev_to_lag(struct net_device *netdev)
103ec5a6c5fSDave Ertman {
104ec5a6c5fSDave Ertman 	struct ice_netdev_priv *np;
105ec5a6c5fSDave Ertman 	struct ice_vsi *vsi;
106ec5a6c5fSDave Ertman 
107ec5a6c5fSDave Ertman 	if (!netif_is_ice(netdev))
108ec5a6c5fSDave Ertman 		return NULL;
109ec5a6c5fSDave Ertman 
110ec5a6c5fSDave Ertman 	np = netdev_priv(netdev);
111ec5a6c5fSDave Ertman 	if (!np)
112ec5a6c5fSDave Ertman 		return NULL;
113ec5a6c5fSDave Ertman 
114ec5a6c5fSDave Ertman 	vsi = np->vsi;
115ec5a6c5fSDave Ertman 	if (!vsi)
116ec5a6c5fSDave Ertman 		return NULL;
117ec5a6c5fSDave Ertman 
118ec5a6c5fSDave Ertman 	return vsi->back->lag;
119ec5a6c5fSDave Ertman }
120ec5a6c5fSDave Ertman 
121ec5a6c5fSDave Ertman /**
1221e0f9881SDave Ertman  * ice_lag_find_hw_by_lport - return an hw struct from bond members lport
1231e0f9881SDave Ertman  * @lag: lag struct
1241e0f9881SDave Ertman  * @lport: lport value to search for
1251e0f9881SDave Ertman  */
1261e0f9881SDave Ertman static struct ice_hw *
ice_lag_find_hw_by_lport(struct ice_lag * lag,u8 lport)1271e0f9881SDave Ertman ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport)
1281e0f9881SDave Ertman {
1291e0f9881SDave Ertman 	struct ice_lag_netdev_list *entry;
1301e0f9881SDave Ertman 	struct net_device *tmp_netdev;
1311e0f9881SDave Ertman 	struct ice_netdev_priv *np;
1321e0f9881SDave Ertman 	struct ice_hw *hw;
1331e0f9881SDave Ertman 
1341533b774SYang Yingliang 	list_for_each_entry(entry, lag->netdev_head, node) {
1351e0f9881SDave Ertman 		tmp_netdev = entry->netdev;
1361e0f9881SDave Ertman 		if (!tmp_netdev || !netif_is_ice(tmp_netdev))
1371e0f9881SDave Ertman 			continue;
1381e0f9881SDave Ertman 
1391e0f9881SDave Ertman 		np = netdev_priv(tmp_netdev);
1401e0f9881SDave Ertman 		if (!np || !np->vsi)
1411e0f9881SDave Ertman 			continue;
1421e0f9881SDave Ertman 
1431e0f9881SDave Ertman 		hw = &np->vsi->back->hw;
1441e0f9881SDave Ertman 		if (hw->port_info->lport == lport)
1451e0f9881SDave Ertman 			return hw;
1461e0f9881SDave Ertman 	}
1471e0f9881SDave Ertman 
1481e0f9881SDave Ertman 	return NULL;
1491e0f9881SDave Ertman }
1501e0f9881SDave Ertman 
1511e0f9881SDave Ertman /**
152ec5a6c5fSDave Ertman  * ice_lag_find_primary - returns pointer to primary interfaces lag struct
153ec5a6c5fSDave Ertman  * @lag: local interfaces lag struct
154ec5a6c5fSDave Ertman  */
ice_lag_find_primary(struct ice_lag * lag)155ec5a6c5fSDave Ertman static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag)
156ec5a6c5fSDave Ertman {
157ec5a6c5fSDave Ertman 	struct ice_lag *primary_lag = NULL;
158ec5a6c5fSDave Ertman 	struct list_head *tmp;
159ec5a6c5fSDave Ertman 
160ec5a6c5fSDave Ertman 	list_for_each(tmp, lag->netdev_head) {
161ec5a6c5fSDave Ertman 		struct ice_lag_netdev_list *entry;
162ec5a6c5fSDave Ertman 		struct ice_lag *tmp_lag;
163ec5a6c5fSDave Ertman 
164ec5a6c5fSDave Ertman 		entry = list_entry(tmp, struct ice_lag_netdev_list, node);
165ec5a6c5fSDave Ertman 		tmp_lag = ice_netdev_to_lag(entry->netdev);
166ec5a6c5fSDave Ertman 		if (tmp_lag && tmp_lag->primary) {
167ec5a6c5fSDave Ertman 			primary_lag = tmp_lag;
168ec5a6c5fSDave Ertman 			break;
169ec5a6c5fSDave Ertman 		}
170ec5a6c5fSDave Ertman 	}
171ec5a6c5fSDave Ertman 
172ec5a6c5fSDave Ertman 	return primary_lag;
173ec5a6c5fSDave Ertman }
174ec5a6c5fSDave Ertman 
175ec5a6c5fSDave Ertman /**
176ec5a6c5fSDave Ertman  * ice_lag_cfg_dflt_fltr - Add/Remove default VSI rule for LAG
177ec5a6c5fSDave Ertman  * @lag: lag struct for local interface
178ec5a6c5fSDave Ertman  * @add: boolean on whether we are adding filters
179ec5a6c5fSDave Ertman  */
180ec5a6c5fSDave Ertman static int
ice_lag_cfg_dflt_fltr(struct ice_lag * lag,bool add)181ec5a6c5fSDave Ertman ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
182ec5a6c5fSDave Ertman {
183ec5a6c5fSDave Ertman 	struct ice_sw_rule_lkup_rx_tx *s_rule;
184ec5a6c5fSDave Ertman 	u16 s_rule_sz, vsi_num;
185ec5a6c5fSDave Ertman 	struct ice_hw *hw;
186ec5a6c5fSDave Ertman 	u32 act, opc;
187ec5a6c5fSDave Ertman 	u8 *eth_hdr;
188ec5a6c5fSDave Ertman 	int err;
189ec5a6c5fSDave Ertman 
190ec5a6c5fSDave Ertman 	hw = &lag->pf->hw;
191ec5a6c5fSDave Ertman 	vsi_num = ice_get_hw_vsi_num(hw, 0);
192ec5a6c5fSDave Ertman 
193ec5a6c5fSDave Ertman 	s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
194ec5a6c5fSDave Ertman 	s_rule = kzalloc(s_rule_sz, GFP_KERNEL);
195ec5a6c5fSDave Ertman 	if (!s_rule) {
196ec5a6c5fSDave Ertman 		dev_err(ice_pf_to_dev(lag->pf), "error allocating rule for LAG default VSI\n");
197ec5a6c5fSDave Ertman 		return -ENOMEM;
198ec5a6c5fSDave Ertman 	}
199ec5a6c5fSDave Ertman 
200ec5a6c5fSDave Ertman 	if (add) {
201ec5a6c5fSDave Ertman 		eth_hdr = s_rule->hdr_data;
202ec5a6c5fSDave Ertman 		ice_fill_eth_hdr(eth_hdr);
203ec5a6c5fSDave Ertman 
204ec5a6c5fSDave Ertman 		act = (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) &
205ec5a6c5fSDave Ertman 			ICE_SINGLE_ACT_VSI_ID_M;
206ec5a6c5fSDave Ertman 		act |= ICE_SINGLE_ACT_VSI_FORWARDING |
207ec5a6c5fSDave Ertman 			ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE;
208ec5a6c5fSDave Ertman 
209ec5a6c5fSDave Ertman 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
210ec5a6c5fSDave Ertman 		s_rule->recipe_id = cpu_to_le16(lag->pf_recipe);
211ec5a6c5fSDave Ertman 		s_rule->src = cpu_to_le16(hw->port_info->lport);
212ec5a6c5fSDave Ertman 		s_rule->act = cpu_to_le32(act);
213ec5a6c5fSDave Ertman 		s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN);
214ec5a6c5fSDave Ertman 		opc = ice_aqc_opc_add_sw_rules;
215ec5a6c5fSDave Ertman 	} else {
216ec5a6c5fSDave Ertman 		s_rule->index = cpu_to_le16(lag->pf_rule_id);
217ec5a6c5fSDave Ertman 		opc = ice_aqc_opc_remove_sw_rules;
218ec5a6c5fSDave Ertman 	}
219ec5a6c5fSDave Ertman 
220ec5a6c5fSDave Ertman 	err = ice_aq_sw_rules(&lag->pf->hw, s_rule, s_rule_sz, 1, opc, NULL);
221ec5a6c5fSDave Ertman 	if (err)
222ec5a6c5fSDave Ertman 		goto dflt_fltr_free;
223ec5a6c5fSDave Ertman 
224ec5a6c5fSDave Ertman 	if (add)
225ec5a6c5fSDave Ertman 		lag->pf_rule_id = le16_to_cpu(s_rule->index);
226ec5a6c5fSDave Ertman 	else
227ec5a6c5fSDave Ertman 		lag->pf_rule_id = 0;
228ec5a6c5fSDave Ertman 
229ec5a6c5fSDave Ertman dflt_fltr_free:
230ec5a6c5fSDave Ertman 	kfree(s_rule);
231ec5a6c5fSDave Ertman 	return err;
232ec5a6c5fSDave Ertman }
233ec5a6c5fSDave Ertman 
234ec5a6c5fSDave Ertman /**
235ec5a6c5fSDave Ertman  * ice_lag_cfg_pf_fltrs - set filters up for new active port
236ec5a6c5fSDave Ertman  * @lag: local interfaces lag struct
237ec5a6c5fSDave Ertman  * @ptr: opaque data containing notifier event
238ec5a6c5fSDave Ertman  */
239ec5a6c5fSDave Ertman static void
ice_lag_cfg_pf_fltrs(struct ice_lag * lag,void * ptr)240ec5a6c5fSDave Ertman ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
241ec5a6c5fSDave Ertman {
242ec5a6c5fSDave Ertman 	struct netdev_notifier_bonding_info *info;
243ec5a6c5fSDave Ertman 	struct netdev_bonding_info *bonding_info;
244ec5a6c5fSDave Ertman 	struct net_device *event_netdev;
245ec5a6c5fSDave Ertman 	struct device *dev;
246ec5a6c5fSDave Ertman 
247ec5a6c5fSDave Ertman 	event_netdev = netdev_notifier_info_to_dev(ptr);
248ec5a6c5fSDave Ertman 	/* not for this netdev */
249ec5a6c5fSDave Ertman 	if (event_netdev != lag->netdev)
250ec5a6c5fSDave Ertman 		return;
251ec5a6c5fSDave Ertman 
252ec5a6c5fSDave Ertman 	info = (struct netdev_notifier_bonding_info *)ptr;
253ec5a6c5fSDave Ertman 	bonding_info = &info->bonding_info;
254ec5a6c5fSDave Ertman 	dev = ice_pf_to_dev(lag->pf);
255ec5a6c5fSDave Ertman 
256ec5a6c5fSDave Ertman 	/* interface not active - remove old default VSI rule */
257ec5a6c5fSDave Ertman 	if (bonding_info->slave.state && lag->pf_rule_id) {
258ec5a6c5fSDave Ertman 		if (ice_lag_cfg_dflt_fltr(lag, false))
259ec5a6c5fSDave Ertman 			dev_err(dev, "Error removing old default VSI filter\n");
260ec5a6c5fSDave Ertman 		return;
261ec5a6c5fSDave Ertman 	}
262ec5a6c5fSDave Ertman 
263ec5a6c5fSDave Ertman 	/* interface becoming active - add new default VSI rule */
264ec5a6c5fSDave Ertman 	if (!bonding_info->slave.state && !lag->pf_rule_id)
265ec5a6c5fSDave Ertman 		if (ice_lag_cfg_dflt_fltr(lag, true))
266ec5a6c5fSDave Ertman 			dev_err(dev, "Error adding new default VSI filter\n");
267ec5a6c5fSDave Ertman }
268ec5a6c5fSDave Ertman 
269ec5a6c5fSDave Ertman /**
270df006dd4SDave Ertman  * ice_display_lag_info - print LAG info
271df006dd4SDave Ertman  * @lag: LAG info struct
272df006dd4SDave Ertman  */
ice_display_lag_info(struct ice_lag * lag)273df006dd4SDave Ertman static void ice_display_lag_info(struct ice_lag *lag)
274df006dd4SDave Ertman {
27541ccedf5SDave Ertman 	const char *name, *upper, *role, *bonded, *primary;
276df006dd4SDave Ertman 	struct device *dev = &lag->pf->pdev->dev;
277df006dd4SDave Ertman 
278df006dd4SDave Ertman 	name = lag->netdev ? netdev_name(lag->netdev) : "unset";
279df006dd4SDave Ertman 	upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
2805c603001SMikael Barsehyan 	primary = lag->primary ? "TRUE" : "FALSE";
281df006dd4SDave Ertman 	bonded = lag->bonded ? "BONDED" : "UNBONDED";
282df006dd4SDave Ertman 
283df006dd4SDave Ertman 	switch (lag->role) {
284df006dd4SDave Ertman 	case ICE_LAG_NONE:
285df006dd4SDave Ertman 		role = "NONE";
286df006dd4SDave Ertman 		break;
287df006dd4SDave Ertman 	case ICE_LAG_PRIMARY:
288df006dd4SDave Ertman 		role = "PRIMARY";
289df006dd4SDave Ertman 		break;
290df006dd4SDave Ertman 	case ICE_LAG_BACKUP:
291df006dd4SDave Ertman 		role = "BACKUP";
292df006dd4SDave Ertman 		break;
293df006dd4SDave Ertman 	case ICE_LAG_UNSET:
294df006dd4SDave Ertman 		role = "UNSET";
295df006dd4SDave Ertman 		break;
296df006dd4SDave Ertman 	default:
297df006dd4SDave Ertman 		role = "ERROR";
298df006dd4SDave Ertman 	}
299df006dd4SDave Ertman 
30041ccedf5SDave Ertman 	dev_dbg(dev, "%s %s, upper:%s, role:%s, primary:%s\n", name, bonded,
30141ccedf5SDave Ertman 		upper, role, primary);
302df006dd4SDave Ertman }
303df006dd4SDave Ertman 
304df006dd4SDave Ertman /**
3051e0f9881SDave Ertman  * ice_lag_qbuf_recfg - generate a buffer of queues for a reconfigure command
3061e0f9881SDave Ertman  * @hw: HW struct that contains the queue contexts
3071e0f9881SDave Ertman  * @qbuf: pointer to buffer to populate
3081e0f9881SDave Ertman  * @vsi_num: index of the VSI in PF space
3091e0f9881SDave Ertman  * @numq: number of queues to search for
3101e0f9881SDave Ertman  * @tc: traffic class that contains the queues
3111e0f9881SDave Ertman  *
3121e0f9881SDave Ertman  * function returns the number of valid queues in buffer
3131e0f9881SDave Ertman  */
3141e0f9881SDave Ertman static u16
ice_lag_qbuf_recfg(struct ice_hw * hw,struct ice_aqc_cfg_txqs_buf * qbuf,u16 vsi_num,u16 numq,u8 tc)3151e0f9881SDave Ertman ice_lag_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
3161e0f9881SDave Ertman 		   u16 vsi_num, u16 numq, u8 tc)
3171e0f9881SDave Ertman {
3181e0f9881SDave Ertman 	struct ice_q_ctx *q_ctx;
3191e0f9881SDave Ertman 	u16 qid, count = 0;
3201e0f9881SDave Ertman 	struct ice_pf *pf;
3211e0f9881SDave Ertman 	int i;
3221e0f9881SDave Ertman 
3231e0f9881SDave Ertman 	pf = hw->back;
3241e0f9881SDave Ertman 	for (i = 0; i < numq; i++) {
3251e0f9881SDave Ertman 		q_ctx = ice_get_lan_q_ctx(hw, vsi_num, tc, i);
3261e0f9881SDave Ertman 		if (!q_ctx) {
3271e0f9881SDave Ertman 			dev_dbg(ice_hw_to_dev(hw), "%s queue %d NO Q CONTEXT\n",
3281e0f9881SDave Ertman 				__func__, i);
3291e0f9881SDave Ertman 			continue;
3301e0f9881SDave Ertman 		}
3311e0f9881SDave Ertman 		if (q_ctx->q_teid == ICE_INVAL_TEID) {
3321e0f9881SDave Ertman 			dev_dbg(ice_hw_to_dev(hw), "%s queue %d INVAL TEID\n",
3331e0f9881SDave Ertman 				__func__, i);
3341e0f9881SDave Ertman 			continue;
3351e0f9881SDave Ertman 		}
3361e0f9881SDave Ertman 		if (q_ctx->q_handle == ICE_INVAL_Q_HANDLE) {
3371e0f9881SDave Ertman 			dev_dbg(ice_hw_to_dev(hw), "%s queue %d INVAL Q HANDLE\n",
3381e0f9881SDave Ertman 				__func__, i);
3391e0f9881SDave Ertman 			continue;
3401e0f9881SDave Ertman 		}
3411e0f9881SDave Ertman 
3421e0f9881SDave Ertman 		qid = pf->vsi[vsi_num]->txq_map[q_ctx->q_handle];
3431e0f9881SDave Ertman 		qbuf->queue_info[count].q_handle = cpu_to_le16(qid);
3441e0f9881SDave Ertman 		qbuf->queue_info[count].tc = tc;
3451e0f9881SDave Ertman 		qbuf->queue_info[count].q_teid = cpu_to_le32(q_ctx->q_teid);
3461e0f9881SDave Ertman 		count++;
3471e0f9881SDave Ertman 	}
3481e0f9881SDave Ertman 
3491e0f9881SDave Ertman 	return count;
3501e0f9881SDave Ertman }
3511e0f9881SDave Ertman 
3521e0f9881SDave Ertman /**
3531e0f9881SDave Ertman  * ice_lag_get_sched_parent - locate or create a sched node parent
3541e0f9881SDave Ertman  * @hw: HW struct for getting parent in
3551e0f9881SDave Ertman  * @tc: traffic class on parent/node
3561e0f9881SDave Ertman  */
3571e0f9881SDave Ertman static struct ice_sched_node *
ice_lag_get_sched_parent(struct ice_hw * hw,u8 tc)3581e0f9881SDave Ertman ice_lag_get_sched_parent(struct ice_hw *hw, u8 tc)
3591e0f9881SDave Ertman {
3601e0f9881SDave Ertman 	struct ice_sched_node *tc_node, *aggnode, *parent = NULL;
3611e0f9881SDave Ertman 	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
3621e0f9881SDave Ertman 	struct ice_port_info *pi = hw->port_info;
3631e0f9881SDave Ertman 	struct device *dev;
3641e0f9881SDave Ertman 	u8 aggl, vsil;
3651e0f9881SDave Ertman 	int n;
3661e0f9881SDave Ertman 
3671e0f9881SDave Ertman 	dev = ice_hw_to_dev(hw);
3681e0f9881SDave Ertman 
3691e0f9881SDave Ertman 	tc_node = ice_sched_get_tc_node(pi, tc);
3701e0f9881SDave Ertman 	if (!tc_node) {
3711e0f9881SDave Ertman 		dev_warn(dev, "Failure to find TC node for LAG move\n");
3721e0f9881SDave Ertman 		return parent;
3731e0f9881SDave Ertman 	}
3741e0f9881SDave Ertman 
3751e0f9881SDave Ertman 	aggnode = ice_sched_get_agg_node(pi, tc_node, ICE_DFLT_AGG_ID);
3761e0f9881SDave Ertman 	if (!aggnode) {
3771e0f9881SDave Ertman 		dev_warn(dev, "Failure to find aggregate node for LAG move\n");
3781e0f9881SDave Ertman 		return parent;
3791e0f9881SDave Ertman 	}
3801e0f9881SDave Ertman 
3811e0f9881SDave Ertman 	aggl = ice_sched_get_agg_layer(hw);
3821e0f9881SDave Ertman 	vsil = ice_sched_get_vsi_layer(hw);
3831e0f9881SDave Ertman 
3841e0f9881SDave Ertman 	for (n = aggl + 1; n < vsil; n++)
3851e0f9881SDave Ertman 		num_nodes[n] = 1;
3861e0f9881SDave Ertman 
3871e0f9881SDave Ertman 	for (n = 0; n < aggnode->num_children; n++) {
3881e0f9881SDave Ertman 		parent = ice_sched_get_free_vsi_parent(hw, aggnode->children[n],
3891e0f9881SDave Ertman 						       num_nodes);
3901e0f9881SDave Ertman 		if (parent)
3911e0f9881SDave Ertman 			return parent;
3921e0f9881SDave Ertman 	}
3931e0f9881SDave Ertman 
3941e0f9881SDave Ertman 	/* if free parent not found - add one */
3951e0f9881SDave Ertman 	parent = aggnode;
3961e0f9881SDave Ertman 	for (n = aggl + 1; n < vsil; n++) {
3971e0f9881SDave Ertman 		u16 num_nodes_added;
3981e0f9881SDave Ertman 		u32 first_teid;
3991e0f9881SDave Ertman 		int err;
4001e0f9881SDave Ertman 
4011e0f9881SDave Ertman 		err = ice_sched_add_nodes_to_layer(pi, tc_node, parent, n,
4021e0f9881SDave Ertman 						   num_nodes[n], &first_teid,
4031e0f9881SDave Ertman 						   &num_nodes_added);
4041e0f9881SDave Ertman 		if (err || num_nodes[n] != num_nodes_added)
4051e0f9881SDave Ertman 			return NULL;
4061e0f9881SDave Ertman 
4071e0f9881SDave Ertman 		if (num_nodes_added)
4081e0f9881SDave Ertman 			parent = ice_sched_find_node_by_teid(tc_node,
4091e0f9881SDave Ertman 							     first_teid);
4101e0f9881SDave Ertman 		else
4111e0f9881SDave Ertman 			parent = parent->children[0];
4121e0f9881SDave Ertman 		if (!parent) {
4131e0f9881SDave Ertman 			dev_warn(dev, "Failure to add new parent for LAG move\n");
4141e0f9881SDave Ertman 			return parent;
4151e0f9881SDave Ertman 		}
4161e0f9881SDave Ertman 	}
4171e0f9881SDave Ertman 
4181e0f9881SDave Ertman 	return parent;
4191e0f9881SDave Ertman }
4201e0f9881SDave Ertman 
4211e0f9881SDave Ertman /**
422ec5a6c5fSDave Ertman  * ice_lag_move_vf_node_tc - move scheduling nodes for one VF on one TC
423ec5a6c5fSDave Ertman  * @lag: lag info struct
424ec5a6c5fSDave Ertman  * @oldport: lport of previous nodes location
425ec5a6c5fSDave Ertman  * @newport: lport of destination nodes location
426ec5a6c5fSDave Ertman  * @vsi_num: array index of VSI in PF space
427ec5a6c5fSDave Ertman  * @tc: traffic class to move
428ec5a6c5fSDave Ertman  */
429ec5a6c5fSDave Ertman static void
ice_lag_move_vf_node_tc(struct ice_lag * lag,u8 oldport,u8 newport,u16 vsi_num,u8 tc)430ec5a6c5fSDave Ertman ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
431ec5a6c5fSDave Ertman 			u16 vsi_num, u8 tc)
432ec5a6c5fSDave Ertman {
4331e0f9881SDave Ertman 	u16 numq, valq, buf_size, num_moved, qbuf_size;
4341e0f9881SDave Ertman 	struct device *dev = ice_pf_to_dev(lag->pf);
4351e0f9881SDave Ertman 	struct ice_aqc_cfg_txqs_buf *qbuf;
4361e0f9881SDave Ertman 	struct ice_aqc_move_elem *buf;
4371e0f9881SDave Ertman 	struct ice_sched_node *n_prt;
4381e0f9881SDave Ertman 	struct ice_hw *new_hw = NULL;
4391e0f9881SDave Ertman 	__le32 teid, parent_teid;
4401e0f9881SDave Ertman 	struct ice_vsi_ctx *ctx;
4411e0f9881SDave Ertman 	u32 tmp_teid;
4421e0f9881SDave Ertman 
4431e0f9881SDave Ertman 	ctx = ice_get_vsi_ctx(&lag->pf->hw, vsi_num);
4441e0f9881SDave Ertman 	if (!ctx) {
4451e0f9881SDave Ertman 		dev_warn(dev, "Unable to locate VSI context for LAG failover\n");
4461e0f9881SDave Ertman 		return;
4471e0f9881SDave Ertman 	}
4481e0f9881SDave Ertman 
4491e0f9881SDave Ertman 	/* check to see if this VF is enabled on this TC */
4501e0f9881SDave Ertman 	if (!ctx->sched.vsi_node[tc])
4511e0f9881SDave Ertman 		return;
4521e0f9881SDave Ertman 
4531e0f9881SDave Ertman 	/* locate HW struct for destination port */
4541e0f9881SDave Ertman 	new_hw = ice_lag_find_hw_by_lport(lag, newport);
4551e0f9881SDave Ertman 	if (!new_hw) {
4561e0f9881SDave Ertman 		dev_warn(dev, "Unable to locate HW struct for LAG node destination\n");
4571e0f9881SDave Ertman 		return;
4581e0f9881SDave Ertman 	}
4591e0f9881SDave Ertman 
4601e0f9881SDave Ertman 	numq = ctx->num_lan_q_entries[tc];
4611e0f9881SDave Ertman 	teid = ctx->sched.vsi_node[tc]->info.node_teid;
4621e0f9881SDave Ertman 	tmp_teid = le32_to_cpu(teid);
4631e0f9881SDave Ertman 	parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
4641e0f9881SDave Ertman 	/* if no teid assigned or numq == 0, then this TC is not active */
4651e0f9881SDave Ertman 	if (!tmp_teid || !numq)
4661e0f9881SDave Ertman 		return;
4671e0f9881SDave Ertman 
4681e0f9881SDave Ertman 	/* suspend VSI subtree for Traffic Class "tc" on
4691e0f9881SDave Ertman 	 * this VF's VSI
4701e0f9881SDave Ertman 	 */
4711e0f9881SDave Ertman 	if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, true))
4721e0f9881SDave Ertman 		dev_dbg(dev, "Problem suspending traffic for LAG node move\n");
4731e0f9881SDave Ertman 
4741e0f9881SDave Ertman 	/* reconfigure all VF's queues on this Traffic Class
4751e0f9881SDave Ertman 	 * to new port
4761e0f9881SDave Ertman 	 */
4771e0f9881SDave Ertman 	qbuf_size = struct_size(qbuf, queue_info, numq);
4781e0f9881SDave Ertman 	qbuf = kzalloc(qbuf_size, GFP_KERNEL);
4791e0f9881SDave Ertman 	if (!qbuf) {
4801e0f9881SDave Ertman 		dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n");
4811e0f9881SDave Ertman 		goto resume_traffic;
4821e0f9881SDave Ertman 	}
4831e0f9881SDave Ertman 
4841e0f9881SDave Ertman 	/* add the per queue info for the reconfigure command buffer */
4851e0f9881SDave Ertman 	valq = ice_lag_qbuf_recfg(&lag->pf->hw, qbuf, vsi_num, numq, tc);
4861e0f9881SDave Ertman 	if (!valq) {
4871e0f9881SDave Ertman 		dev_dbg(dev, "No valid queues found for LAG failover\n");
4881e0f9881SDave Ertman 		goto qbuf_none;
4891e0f9881SDave Ertman 	}
4901e0f9881SDave Ertman 
4911e0f9881SDave Ertman 	if (ice_aq_cfg_lan_txq(&lag->pf->hw, qbuf, qbuf_size, valq, oldport,
4921e0f9881SDave Ertman 			       newport, NULL)) {
4931e0f9881SDave Ertman 		dev_warn(dev, "Failure to configure queues for LAG failover\n");
4941e0f9881SDave Ertman 		goto qbuf_err;
4951e0f9881SDave Ertman 	}
4961e0f9881SDave Ertman 
4971e0f9881SDave Ertman qbuf_none:
4981e0f9881SDave Ertman 	kfree(qbuf);
4991e0f9881SDave Ertman 
5001e0f9881SDave Ertman 	/* find new parent in destination port's tree for VF VSI node on this
5011e0f9881SDave Ertman 	 * Traffic Class
5021e0f9881SDave Ertman 	 */
5031e0f9881SDave Ertman 	n_prt = ice_lag_get_sched_parent(new_hw, tc);
5041e0f9881SDave Ertman 	if (!n_prt)
5051e0f9881SDave Ertman 		goto resume_traffic;
5061e0f9881SDave Ertman 
5071e0f9881SDave Ertman 	/* Move Vf's VSI node for this TC to newport's scheduler tree */
5081e0f9881SDave Ertman 	buf_size = struct_size(buf, teid, 1);
5091e0f9881SDave Ertman 	buf = kzalloc(buf_size, GFP_KERNEL);
5101e0f9881SDave Ertman 	if (!buf) {
5111e0f9881SDave Ertman 		dev_warn(dev, "Failure to alloc memory for VF node failover\n");
5121e0f9881SDave Ertman 		goto resume_traffic;
5131e0f9881SDave Ertman 	}
5141e0f9881SDave Ertman 
5151e0f9881SDave Ertman 	buf->hdr.src_parent_teid = parent_teid;
5161e0f9881SDave Ertman 	buf->hdr.dest_parent_teid = n_prt->info.node_teid;
5171e0f9881SDave Ertman 	buf->hdr.num_elems = cpu_to_le16(1);
5181e0f9881SDave Ertman 	buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
5191e0f9881SDave Ertman 	buf->teid[0] = teid;
5201e0f9881SDave Ertman 
5211e0f9881SDave Ertman 	if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
5221e0f9881SDave Ertman 				    NULL))
5231e0f9881SDave Ertman 		dev_warn(dev, "Failure to move VF nodes for failover\n");
5241e0f9881SDave Ertman 	else
5251e0f9881SDave Ertman 		ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
5261e0f9881SDave Ertman 
5271e0f9881SDave Ertman 	kfree(buf);
5281e0f9881SDave Ertman 	goto resume_traffic;
5291e0f9881SDave Ertman 
5301e0f9881SDave Ertman qbuf_err:
5311e0f9881SDave Ertman 	kfree(qbuf);
5321e0f9881SDave Ertman 
5331e0f9881SDave Ertman resume_traffic:
5341e0f9881SDave Ertman 	/* restart traffic for VSI node */
5351e0f9881SDave Ertman 	if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, false))
5361e0f9881SDave Ertman 		dev_dbg(dev, "Problem restarting traffic for LAG node move\n");
537ec5a6c5fSDave Ertman }
538ec5a6c5fSDave Ertman 
539ec5a6c5fSDave Ertman /**
540fd7f7a8aSDave Ertman  * ice_lag_build_netdev_list - populate the lag struct's netdev list
541fd7f7a8aSDave Ertman  * @lag: local lag struct
542fd7f7a8aSDave Ertman  * @ndlist: pointer to netdev list to populate
543fd7f7a8aSDave Ertman  */
ice_lag_build_netdev_list(struct ice_lag * lag,struct ice_lag_netdev_list * ndlist)544fd7f7a8aSDave Ertman static void ice_lag_build_netdev_list(struct ice_lag *lag,
545fd7f7a8aSDave Ertman 				      struct ice_lag_netdev_list *ndlist)
546fd7f7a8aSDave Ertman {
547fd7f7a8aSDave Ertman 	struct ice_lag_netdev_list *nl;
548fd7f7a8aSDave Ertman 	struct net_device *tmp_nd;
549fd7f7a8aSDave Ertman 
550fd7f7a8aSDave Ertman 	INIT_LIST_HEAD(&ndlist->node);
551fd7f7a8aSDave Ertman 	rcu_read_lock();
552fd7f7a8aSDave Ertman 	for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
553fd7f7a8aSDave Ertman 		nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
554fd7f7a8aSDave Ertman 		if (!nl)
555fd7f7a8aSDave Ertman 			break;
556fd7f7a8aSDave Ertman 
557fd7f7a8aSDave Ertman 		nl->netdev = tmp_nd;
558fd7f7a8aSDave Ertman 		list_add(&nl->node, &ndlist->node);
559fd7f7a8aSDave Ertman 	}
560fd7f7a8aSDave Ertman 	rcu_read_unlock();
561fd7f7a8aSDave Ertman 	lag->netdev_head = &ndlist->node;
562fd7f7a8aSDave Ertman }
563fd7f7a8aSDave Ertman 
564fd7f7a8aSDave Ertman /**
565fd7f7a8aSDave Ertman  * ice_lag_destroy_netdev_list - free lag struct's netdev list
566fd7f7a8aSDave Ertman  * @lag: pointer to local lag struct
567fd7f7a8aSDave Ertman  * @ndlist: pointer to lag struct netdev list
568fd7f7a8aSDave Ertman  */
ice_lag_destroy_netdev_list(struct ice_lag * lag,struct ice_lag_netdev_list * ndlist)569fd7f7a8aSDave Ertman static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
570fd7f7a8aSDave Ertman 					struct ice_lag_netdev_list *ndlist)
571fd7f7a8aSDave Ertman {
572fd7f7a8aSDave Ertman 	struct ice_lag_netdev_list *entry, *n;
573fd7f7a8aSDave Ertman 
574fd7f7a8aSDave Ertman 	rcu_read_lock();
575fd7f7a8aSDave Ertman 	list_for_each_entry_safe(entry, n, &ndlist->node, node) {
576fd7f7a8aSDave Ertman 		list_del(&entry->node);
577fd7f7a8aSDave Ertman 		kfree(entry);
578fd7f7a8aSDave Ertman 	}
579fd7f7a8aSDave Ertman 	rcu_read_unlock();
580fd7f7a8aSDave Ertman 	lag->netdev_head = NULL;
581fd7f7a8aSDave Ertman }
582fd7f7a8aSDave Ertman 
583fd7f7a8aSDave Ertman /**
584ec5a6c5fSDave Ertman  * ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
585ec5a6c5fSDave Ertman  * @lag: primary interface LAG struct
586ec5a6c5fSDave Ertman  * @oldport: lport of previous interface
587ec5a6c5fSDave Ertman  * @newport: lport of destination interface
588ec5a6c5fSDave Ertman  * @vsi_num: SW index of VF's VSI
589ec5a6c5fSDave Ertman  */
590ec5a6c5fSDave Ertman static void
ice_lag_move_single_vf_nodes(struct ice_lag * lag,u8 oldport,u8 newport,u16 vsi_num)591ec5a6c5fSDave Ertman ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
592ec5a6c5fSDave Ertman 			     u16 vsi_num)
593ec5a6c5fSDave Ertman {
594ec5a6c5fSDave Ertman 	u8 tc;
595ec5a6c5fSDave Ertman 
596ec5a6c5fSDave Ertman 	ice_for_each_traffic_class(tc)
597ec5a6c5fSDave Ertman 		ice_lag_move_vf_node_tc(lag, oldport, newport, vsi_num, tc);
598ec5a6c5fSDave Ertman }
599ec5a6c5fSDave Ertman 
600ec5a6c5fSDave Ertman /**
601ec5a6c5fSDave Ertman  * ice_lag_move_new_vf_nodes - Move Tx scheduling nodes for a VF if required
602ec5a6c5fSDave Ertman  * @vf: the VF to move Tx nodes for
603ec5a6c5fSDave Ertman  *
604ec5a6c5fSDave Ertman  * Called just after configuring new VF queues. Check whether the VF Tx
605ec5a6c5fSDave Ertman  * scheduling nodes need to be updated to fail over to the active port. If so,
606ec5a6c5fSDave Ertman  * move them now.
607ec5a6c5fSDave Ertman  */
ice_lag_move_new_vf_nodes(struct ice_vf * vf)608ec5a6c5fSDave Ertman void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
609ec5a6c5fSDave Ertman {
6101e0f9881SDave Ertman 	struct ice_lag_netdev_list ndlist;
6111e0f9881SDave Ertman 	u8 pri_port, act_port;
6121e0f9881SDave Ertman 	struct ice_lag *lag;
6131e0f9881SDave Ertman 	struct ice_vsi *vsi;
6141e0f9881SDave Ertman 	struct ice_pf *pf;
6151e0f9881SDave Ertman 
6161e0f9881SDave Ertman 	vsi = ice_get_vf_vsi(vf);
6171e0f9881SDave Ertman 
6181e0f9881SDave Ertman 	if (WARN_ON(!vsi))
6191e0f9881SDave Ertman 		return;
6201e0f9881SDave Ertman 
6211e0f9881SDave Ertman 	if (WARN_ON(vsi->type != ICE_VSI_VF))
6221e0f9881SDave Ertman 		return;
6231e0f9881SDave Ertman 
6241e0f9881SDave Ertman 	pf = vf->pf;
6251e0f9881SDave Ertman 	lag = pf->lag;
6261e0f9881SDave Ertman 
6271e0f9881SDave Ertman 	mutex_lock(&pf->lag_mutex);
6281e0f9881SDave Ertman 	if (!lag->bonded)
6291e0f9881SDave Ertman 		goto new_vf_unlock;
6301e0f9881SDave Ertman 
6311e0f9881SDave Ertman 	pri_port = pf->hw.port_info->lport;
6321e0f9881SDave Ertman 	act_port = lag->active_port;
6331e0f9881SDave Ertman 
634fd7f7a8aSDave Ertman 	if (lag->upper_netdev)
635fd7f7a8aSDave Ertman 		ice_lag_build_netdev_list(lag, &ndlist);
6361e0f9881SDave Ertman 
6371e0f9881SDave Ertman 	if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
6381e0f9881SDave Ertman 	    lag->bonded && lag->primary && pri_port != act_port &&
6391e0f9881SDave Ertman 	    !list_empty(lag->netdev_head))
6401e0f9881SDave Ertman 		ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
6411e0f9881SDave Ertman 
642fd7f7a8aSDave Ertman 	ice_lag_destroy_netdev_list(lag, &ndlist);
6431e0f9881SDave Ertman 
6441e0f9881SDave Ertman new_vf_unlock:
6451e0f9881SDave Ertman 	mutex_unlock(&pf->lag_mutex);
646ec5a6c5fSDave Ertman }
647ec5a6c5fSDave Ertman 
648ec5a6c5fSDave Ertman /**
649ec5a6c5fSDave Ertman  * ice_lag_move_vf_nodes - move Tx scheduling nodes for all VFs to new port
650ec5a6c5fSDave Ertman  * @lag: lag info struct
651ec5a6c5fSDave Ertman  * @oldport: lport of previous interface
652ec5a6c5fSDave Ertman  * @newport: lport of destination interface
653ec5a6c5fSDave Ertman  */
ice_lag_move_vf_nodes(struct ice_lag * lag,u8 oldport,u8 newport)654ec5a6c5fSDave Ertman static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
655ec5a6c5fSDave Ertman {
656ec5a6c5fSDave Ertman 	struct ice_pf *pf;
657ec5a6c5fSDave Ertman 	int i;
658ec5a6c5fSDave Ertman 
659ec5a6c5fSDave Ertman 	if (!lag->primary)
660ec5a6c5fSDave Ertman 		return;
661ec5a6c5fSDave Ertman 
662ec5a6c5fSDave Ertman 	pf = lag->pf;
663ec5a6c5fSDave Ertman 	ice_for_each_vsi(pf, i)
664ec5a6c5fSDave Ertman 		if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
665ec5a6c5fSDave Ertman 				   pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
666ec5a6c5fSDave Ertman 			ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
667ec5a6c5fSDave Ertman }
668ec5a6c5fSDave Ertman 
669fd7f7a8aSDave Ertman /**
670fd7f7a8aSDave Ertman  * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
671fd7f7a8aSDave Ertman  * @lag: local lag struct
672fd7f7a8aSDave Ertman  * @src_prt: lport value for source port
673fd7f7a8aSDave Ertman  * @dst_prt: lport value for destination port
674fd7f7a8aSDave Ertman  *
675fd7f7a8aSDave Ertman  * This function is used to move nodes during an out-of-netdev-event situation,
676fd7f7a8aSDave Ertman  * primarily when the driver needs to reconfigure or recreate resources.
677fd7f7a8aSDave Ertman  *
678fd7f7a8aSDave Ertman  * Must be called while holding the lag_mutex to avoid lag events from
679fd7f7a8aSDave Ertman  * processing while out-of-sync moves are happening.  Also, paired moves,
680fd7f7a8aSDave Ertman  * such as used in a reset flow, should both be called under the same mutex
681fd7f7a8aSDave Ertman  * lock to avoid changes between start of reset and end of reset.
682fd7f7a8aSDave Ertman  */
ice_lag_move_vf_nodes_cfg(struct ice_lag * lag,u8 src_prt,u8 dst_prt)683fd7f7a8aSDave Ertman void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
684fd7f7a8aSDave Ertman {
685fd7f7a8aSDave Ertman 	struct ice_lag_netdev_list ndlist;
686fd7f7a8aSDave Ertman 
687fd7f7a8aSDave Ertman 	ice_lag_build_netdev_list(lag, &ndlist);
688fd7f7a8aSDave Ertman 	ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
689fd7f7a8aSDave Ertman 	ice_lag_destroy_netdev_list(lag, &ndlist);
690fd7f7a8aSDave Ertman }
691fd7f7a8aSDave Ertman 
692ec5a6c5fSDave Ertman #define ICE_LAG_SRIOV_CP_RECIPE		10
693ec5a6c5fSDave Ertman #define ICE_LAG_SRIOV_TRAIN_PKT_LEN	16
694ec5a6c5fSDave Ertman 
695ec5a6c5fSDave Ertman /**
696ec5a6c5fSDave Ertman  * ice_lag_cfg_cp_fltr - configure filter for control packets
697ec5a6c5fSDave Ertman  * @lag: local interface's lag struct
698ec5a6c5fSDave Ertman  * @add: add or remove rule
699ec5a6c5fSDave Ertman  */
700ec5a6c5fSDave Ertman static void
ice_lag_cfg_cp_fltr(struct ice_lag * lag,bool add)701ec5a6c5fSDave Ertman ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
702ec5a6c5fSDave Ertman {
7031e0f9881SDave Ertman 	struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
7041e0f9881SDave Ertman 	struct ice_vsi *vsi;
7051e0f9881SDave Ertman 	u16 buf_len, opc;
7061e0f9881SDave Ertman 
7071e0f9881SDave Ertman 	vsi = lag->pf->vsi[0];
7081e0f9881SDave Ertman 
7091e0f9881SDave Ertman 	buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule,
7101e0f9881SDave Ertman 					     ICE_LAG_SRIOV_TRAIN_PKT_LEN);
7111e0f9881SDave Ertman 	s_rule = kzalloc(buf_len, GFP_KERNEL);
7121e0f9881SDave Ertman 	if (!s_rule) {
7131e0f9881SDave Ertman 		netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
7141e0f9881SDave Ertman 		return;
7151e0f9881SDave Ertman 	}
7161e0f9881SDave Ertman 
7171e0f9881SDave Ertman 	if (add) {
7181e0f9881SDave Ertman 		s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
7191e0f9881SDave Ertman 		s_rule->recipe_id = cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
7201e0f9881SDave Ertman 		s_rule->src = cpu_to_le16(vsi->port_info->lport);
7211e0f9881SDave Ertman 		s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
7221e0f9881SDave Ertman 					  ICE_SINGLE_ACT_LAN_ENABLE |
7231e0f9881SDave Ertman 					  ICE_SINGLE_ACT_VALID_BIT |
7241e0f9881SDave Ertman 					  ((vsi->vsi_num <<
7251e0f9881SDave Ertman 					    ICE_SINGLE_ACT_VSI_ID_S) &
7261e0f9881SDave Ertman 					   ICE_SINGLE_ACT_VSI_ID_M));
7271e0f9881SDave Ertman 		s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN);
7281e0f9881SDave Ertman 		memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN);
7291e0f9881SDave Ertman 		opc = ice_aqc_opc_add_sw_rules;
7301e0f9881SDave Ertman 	} else {
7311e0f9881SDave Ertman 		opc = ice_aqc_opc_remove_sw_rules;
7321e0f9881SDave Ertman 		s_rule->index = cpu_to_le16(lag->cp_rule_idx);
7331e0f9881SDave Ertman 	}
7341e0f9881SDave Ertman 	if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
7351e0f9881SDave Ertman 		netdev_warn(lag->netdev, "Error %s CP rule for fail-over\n",
7361e0f9881SDave Ertman 			    add ? "ADDING" : "REMOVING");
7371e0f9881SDave Ertman 		goto cp_free;
7381e0f9881SDave Ertman 	}
7391e0f9881SDave Ertman 
7401e0f9881SDave Ertman 	if (add)
7411e0f9881SDave Ertman 		lag->cp_rule_idx = le16_to_cpu(s_rule->index);
7421e0f9881SDave Ertman 	else
7431e0f9881SDave Ertman 		lag->cp_rule_idx = 0;
7441e0f9881SDave Ertman 
7451e0f9881SDave Ertman cp_free:
7461e0f9881SDave Ertman 	kfree(s_rule);
747ec5a6c5fSDave Ertman }
748ec5a6c5fSDave Ertman 
749ec5a6c5fSDave Ertman /**
750df006dd4SDave Ertman  * ice_lag_info_event - handle NETDEV_BONDING_INFO event
751df006dd4SDave Ertman  * @lag: LAG info struct
752df006dd4SDave Ertman  * @ptr: opaque data pointer
753df006dd4SDave Ertman  *
754df006dd4SDave Ertman  * ptr is to be cast to (netdev_notifier_bonding_info *)
755df006dd4SDave Ertman  */
ice_lag_info_event(struct ice_lag * lag,void * ptr)756df006dd4SDave Ertman static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
757df006dd4SDave Ertman {
758df006dd4SDave Ertman 	struct netdev_notifier_bonding_info *info;
759df006dd4SDave Ertman 	struct netdev_bonding_info *bonding_info;
7606a8b3572SDave Ertman 	struct net_device *event_netdev;
761df006dd4SDave Ertman 	const char *lag_netdev_name;
762df006dd4SDave Ertman 
763df006dd4SDave Ertman 	event_netdev = netdev_notifier_info_to_dev(ptr);
764df006dd4SDave Ertman 	info = ptr;
765df006dd4SDave Ertman 	lag_netdev_name = netdev_name(lag->netdev);
766df006dd4SDave Ertman 	bonding_info = &info->bonding_info;
767df006dd4SDave Ertman 
768df006dd4SDave Ertman 	if (event_netdev != lag->netdev || !lag->bonded || !lag->upper_netdev)
769df006dd4SDave Ertman 		return;
770df006dd4SDave Ertman 
771df006dd4SDave Ertman 	if (bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) {
772df006dd4SDave Ertman 		netdev_dbg(lag->netdev, "Bonding event recv, but mode not active/backup\n");
773df006dd4SDave Ertman 		goto lag_out;
774df006dd4SDave Ertman 	}
775df006dd4SDave Ertman 
776df006dd4SDave Ertman 	if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
7775c603001SMikael Barsehyan 		netdev_dbg(lag->netdev, "Bonding event recv, but secondary info not for us\n");
778df006dd4SDave Ertman 		goto lag_out;
779df006dd4SDave Ertman 	}
780df006dd4SDave Ertman 
781df006dd4SDave Ertman 	if (bonding_info->slave.state)
782df006dd4SDave Ertman 		ice_lag_set_backup(lag);
783df006dd4SDave Ertman 	else
784df006dd4SDave Ertman 		ice_lag_set_primary(lag);
785df006dd4SDave Ertman 
786df006dd4SDave Ertman lag_out:
787df006dd4SDave Ertman 	ice_display_lag_info(lag);
788df006dd4SDave Ertman }
789df006dd4SDave Ertman 
790df006dd4SDave Ertman /**
791ec5a6c5fSDave Ertman  * ice_lag_reclaim_vf_tc - move scheduling nodes back to primary interface
792ec5a6c5fSDave Ertman  * @lag: primary interface lag struct
793ec5a6c5fSDave Ertman  * @src_hw: HW struct current node location
794ec5a6c5fSDave Ertman  * @vsi_num: VSI index in PF space
795ec5a6c5fSDave Ertman  * @tc: traffic class to move
796df006dd4SDave Ertman  */
797df006dd4SDave Ertman static void
ice_lag_reclaim_vf_tc(struct ice_lag * lag,struct ice_hw * src_hw,u16 vsi_num,u8 tc)798ec5a6c5fSDave Ertman ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
799ec5a6c5fSDave Ertman 		      u8 tc)
800df006dd4SDave Ertman {
8011e0f9881SDave Ertman 	u16 numq, valq, buf_size, num_moved, qbuf_size;
8021e0f9881SDave Ertman 	struct device *dev = ice_pf_to_dev(lag->pf);
8031e0f9881SDave Ertman 	struct ice_aqc_cfg_txqs_buf *qbuf;
8041e0f9881SDave Ertman 	struct ice_aqc_move_elem *buf;
8051e0f9881SDave Ertman 	struct ice_sched_node *n_prt;
8061e0f9881SDave Ertman 	__le32 teid, parent_teid;
8071e0f9881SDave Ertman 	struct ice_vsi_ctx *ctx;
8081e0f9881SDave Ertman 	struct ice_hw *hw;
8091e0f9881SDave Ertman 	u32 tmp_teid;
8101e0f9881SDave Ertman 
8111e0f9881SDave Ertman 	hw = &lag->pf->hw;
8121e0f9881SDave Ertman 	ctx = ice_get_vsi_ctx(hw, vsi_num);
8131e0f9881SDave Ertman 	if (!ctx) {
8141e0f9881SDave Ertman 		dev_warn(dev, "Unable to locate VSI context for LAG reclaim\n");
8151e0f9881SDave Ertman 		return;
8161e0f9881SDave Ertman 	}
8171e0f9881SDave Ertman 
8181e0f9881SDave Ertman 	/* check to see if this VF is enabled on this TC */
8191e0f9881SDave Ertman 	if (!ctx->sched.vsi_node[tc])
8201e0f9881SDave Ertman 		return;
8211e0f9881SDave Ertman 
8221e0f9881SDave Ertman 	numq = ctx->num_lan_q_entries[tc];
8231e0f9881SDave Ertman 	teid = ctx->sched.vsi_node[tc]->info.node_teid;
8241e0f9881SDave Ertman 	tmp_teid = le32_to_cpu(teid);
8251e0f9881SDave Ertman 	parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
8261e0f9881SDave Ertman 
8271e0f9881SDave Ertman 	/* if !teid or !numq, then this TC is not active */
8281e0f9881SDave Ertman 	if (!tmp_teid || !numq)
8291e0f9881SDave Ertman 		return;
8301e0f9881SDave Ertman 
8311e0f9881SDave Ertman 	/* suspend traffic */
8321e0f9881SDave Ertman 	if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, true))
8331e0f9881SDave Ertman 		dev_dbg(dev, "Problem suspending traffic for LAG node move\n");
8341e0f9881SDave Ertman 
8351e0f9881SDave Ertman 	/* reconfig queues for new port */
8361e0f9881SDave Ertman 	qbuf_size = struct_size(qbuf, queue_info, numq);
8371e0f9881SDave Ertman 	qbuf = kzalloc(qbuf_size, GFP_KERNEL);
8381e0f9881SDave Ertman 	if (!qbuf) {
8391e0f9881SDave Ertman 		dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n");
8401e0f9881SDave Ertman 		goto resume_reclaim;
8411e0f9881SDave Ertman 	}
8421e0f9881SDave Ertman 
8431e0f9881SDave Ertman 	/* add the per queue info for the reconfigure command buffer */
8441e0f9881SDave Ertman 	valq = ice_lag_qbuf_recfg(hw, qbuf, vsi_num, numq, tc);
8451e0f9881SDave Ertman 	if (!valq) {
8461e0f9881SDave Ertman 		dev_dbg(dev, "No valid queues found for LAG reclaim\n");
8471e0f9881SDave Ertman 		goto reclaim_none;
8481e0f9881SDave Ertman 	}
8491e0f9881SDave Ertman 
8501e0f9881SDave Ertman 	if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq,
8511e0f9881SDave Ertman 			       src_hw->port_info->lport, hw->port_info->lport,
8521e0f9881SDave Ertman 			       NULL)) {
8531e0f9881SDave Ertman 		dev_warn(dev, "Failure to configure queues for LAG failover\n");
8541e0f9881SDave Ertman 		goto reclaim_qerr;
8551e0f9881SDave Ertman 	}
8561e0f9881SDave Ertman 
8571e0f9881SDave Ertman reclaim_none:
8581e0f9881SDave Ertman 	kfree(qbuf);
8591e0f9881SDave Ertman 
8601e0f9881SDave Ertman 	/* find parent in primary tree */
8611e0f9881SDave Ertman 	n_prt = ice_lag_get_sched_parent(hw, tc);
8621e0f9881SDave Ertman 	if (!n_prt)
8631e0f9881SDave Ertman 		goto resume_reclaim;
8641e0f9881SDave Ertman 
8651e0f9881SDave Ertman 	/* Move node to new parent */
8661e0f9881SDave Ertman 	buf_size = struct_size(buf, teid, 1);
8671e0f9881SDave Ertman 	buf = kzalloc(buf_size, GFP_KERNEL);
8681e0f9881SDave Ertman 	if (!buf) {
8691e0f9881SDave Ertman 		dev_warn(dev, "Failure to alloc memory for VF node failover\n");
8701e0f9881SDave Ertman 		goto resume_reclaim;
8711e0f9881SDave Ertman 	}
8721e0f9881SDave Ertman 
8731e0f9881SDave Ertman 	buf->hdr.src_parent_teid = parent_teid;
8741e0f9881SDave Ertman 	buf->hdr.dest_parent_teid = n_prt->info.node_teid;
8751e0f9881SDave Ertman 	buf->hdr.num_elems = cpu_to_le16(1);
8761e0f9881SDave Ertman 	buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
8771e0f9881SDave Ertman 	buf->teid[0] = teid;
8781e0f9881SDave Ertman 
8791e0f9881SDave Ertman 	if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
8801e0f9881SDave Ertman 				    NULL))
8811e0f9881SDave Ertman 		dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n");
8821e0f9881SDave Ertman 	else
8831e0f9881SDave Ertman 		ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
8841e0f9881SDave Ertman 
8851e0f9881SDave Ertman 	kfree(buf);
8861e0f9881SDave Ertman 	goto resume_reclaim;
8871e0f9881SDave Ertman 
8881e0f9881SDave Ertman reclaim_qerr:
8891e0f9881SDave Ertman 	kfree(qbuf);
8901e0f9881SDave Ertman 
8911e0f9881SDave Ertman resume_reclaim:
8921e0f9881SDave Ertman 	/* restart traffic */
8931e0f9881SDave Ertman 	if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, false))
8941e0f9881SDave Ertman 		dev_warn(dev, "Problem restarting traffic for LAG node reclaim\n");
895ec5a6c5fSDave Ertman }
896ec5a6c5fSDave Ertman 
897ec5a6c5fSDave Ertman /**
898ec5a6c5fSDave Ertman  * ice_lag_reclaim_vf_nodes - When interface leaving bond primary reclaims nodes
899ec5a6c5fSDave Ertman  * @lag: primary interface lag struct
900ec5a6c5fSDave Ertman  * @src_hw: HW struct for current node location
901ec5a6c5fSDave Ertman  */
902ec5a6c5fSDave Ertman static void
ice_lag_reclaim_vf_nodes(struct ice_lag * lag,struct ice_hw * src_hw)903ec5a6c5fSDave Ertman ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw)
904ec5a6c5fSDave Ertman {
905ec5a6c5fSDave Ertman 	struct ice_pf *pf;
906ec5a6c5fSDave Ertman 	int i, tc;
907ec5a6c5fSDave Ertman 
908ec5a6c5fSDave Ertman 	if (!lag->primary || !src_hw)
909ec5a6c5fSDave Ertman 		return;
910ec5a6c5fSDave Ertman 
911ec5a6c5fSDave Ertman 	pf = lag->pf;
912ec5a6c5fSDave Ertman 	ice_for_each_vsi(pf, i)
913ec5a6c5fSDave Ertman 		if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
914ec5a6c5fSDave Ertman 				   pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
915ec5a6c5fSDave Ertman 			ice_for_each_traffic_class(tc)
916ec5a6c5fSDave Ertman 				ice_lag_reclaim_vf_tc(lag, src_hw, i, tc);
917ec5a6c5fSDave Ertman }
918ec5a6c5fSDave Ertman 
919ec5a6c5fSDave Ertman /**
920ec5a6c5fSDave Ertman  * ice_lag_link - handle LAG link event
921ec5a6c5fSDave Ertman  * @lag: LAG info struct
922ec5a6c5fSDave Ertman  */
ice_lag_link(struct ice_lag * lag)923ec5a6c5fSDave Ertman static void ice_lag_link(struct ice_lag *lag)
924ec5a6c5fSDave Ertman {
925df006dd4SDave Ertman 	struct ice_pf *pf = lag->pf;
926df006dd4SDave Ertman 
927df006dd4SDave Ertman 	if (lag->bonded)
928df006dd4SDave Ertman 		dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n",
929df006dd4SDave Ertman 			 netdev_name(lag->netdev));
930df006dd4SDave Ertman 
931df006dd4SDave Ertman 	lag->bonded = true;
932df006dd4SDave Ertman 	lag->role = ICE_LAG_UNSET;
933bf65da2eSDave Ertman 	netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n");
934df006dd4SDave Ertman }
935df006dd4SDave Ertman 
936df006dd4SDave Ertman /**
937df006dd4SDave Ertman  * ice_lag_unlink - handle unlink event
938df006dd4SDave Ertman  * @lag: LAG info struct
939df006dd4SDave Ertman  */
ice_lag_unlink(struct ice_lag * lag)940ec5a6c5fSDave Ertman static void ice_lag_unlink(struct ice_lag *lag)
941df006dd4SDave Ertman {
942ec5a6c5fSDave Ertman 	u8 pri_port, act_port, loc_port;
943df006dd4SDave Ertman 	struct ice_pf *pf = lag->pf;
944df006dd4SDave Ertman 
945df006dd4SDave Ertman 	if (!lag->bonded) {
946df006dd4SDave Ertman 		netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n");
947df006dd4SDave Ertman 		return;
948df006dd4SDave Ertman 	}
949df006dd4SDave Ertman 
950ec5a6c5fSDave Ertman 	if (lag->primary) {
951ec5a6c5fSDave Ertman 		act_port = lag->active_port;
952ec5a6c5fSDave Ertman 		pri_port = lag->pf->hw.port_info->lport;
953ec5a6c5fSDave Ertman 		if (act_port != pri_port && act_port != ICE_LAG_INVALID_PORT)
954ec5a6c5fSDave Ertman 			ice_lag_move_vf_nodes(lag, act_port, pri_port);
955ec5a6c5fSDave Ertman 		lag->primary = false;
956ec5a6c5fSDave Ertman 		lag->active_port = ICE_LAG_INVALID_PORT;
957ec5a6c5fSDave Ertman 	} else {
958ec5a6c5fSDave Ertman 		struct ice_lag *primary_lag;
959ec5a6c5fSDave Ertman 
960ec5a6c5fSDave Ertman 		primary_lag = ice_lag_find_primary(lag);
961ec5a6c5fSDave Ertman 		if (primary_lag) {
962ec5a6c5fSDave Ertman 			act_port = primary_lag->active_port;
963ec5a6c5fSDave Ertman 			pri_port = primary_lag->pf->hw.port_info->lport;
964ec5a6c5fSDave Ertman 			loc_port = pf->hw.port_info->lport;
965ec5a6c5fSDave Ertman 			if (act_port == loc_port &&
966ec5a6c5fSDave Ertman 			    act_port != ICE_LAG_INVALID_PORT) {
967ec5a6c5fSDave Ertman 				ice_lag_reclaim_vf_nodes(primary_lag,
968ec5a6c5fSDave Ertman 							 &lag->pf->hw);
969ec5a6c5fSDave Ertman 				primary_lag->active_port = ICE_LAG_INVALID_PORT;
970df006dd4SDave Ertman 			}
971df006dd4SDave Ertman 		}
972df006dd4SDave Ertman 	}
973df006dd4SDave Ertman 
974df006dd4SDave Ertman 	lag->bonded = false;
975df006dd4SDave Ertman 	lag->role = ICE_LAG_NONE;
976ec5a6c5fSDave Ertman 	lag->upper_netdev = NULL;
977df006dd4SDave Ertman }
978df006dd4SDave Ertman 
979df006dd4SDave Ertman /**
980ec5a6c5fSDave Ertman  * ice_lag_link_unlink - helper function to call lag_link/unlink
981ec5a6c5fSDave Ertman  * @lag: lag info struct
982ec5a6c5fSDave Ertman  * @ptr: opaque pointer data
983bea1898fSDave Ertman  */
ice_lag_link_unlink(struct ice_lag * lag,void * ptr)984ec5a6c5fSDave Ertman static void ice_lag_link_unlink(struct ice_lag *lag, void *ptr)
985bea1898fSDave Ertman {
986ec5a6c5fSDave Ertman 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
987ec5a6c5fSDave Ertman 	struct netdev_notifier_changeupper_info *info = ptr;
988bea1898fSDave Ertman 
989ec5a6c5fSDave Ertman 	if (netdev != lag->netdev)
990bea1898fSDave Ertman 		return;
991bea1898fSDave Ertman 
992ec5a6c5fSDave Ertman 	if (info->linking)
993ec5a6c5fSDave Ertman 		ice_lag_link(lag);
994ec5a6c5fSDave Ertman 	else
995ec5a6c5fSDave Ertman 		ice_lag_unlink(lag);
996bea1898fSDave Ertman }
997ec5a6c5fSDave Ertman 
998ec5a6c5fSDave Ertman /**
999ec5a6c5fSDave Ertman  * ice_lag_set_swid - set the SWID on secondary interface
1000ec5a6c5fSDave Ertman  * @primary_swid: primary interface's SWID
1001ec5a6c5fSDave Ertman  * @local_lag: local interfaces LAG struct
1002ec5a6c5fSDave Ertman  * @link: Is this a linking activity
1003ec5a6c5fSDave Ertman  *
1004ec5a6c5fSDave Ertman  * If link is false, then primary_swid should be expected to not be valid
10053579aa86SDave Ertman  * This function should never be called in interrupt context.
1006ec5a6c5fSDave Ertman  */
1007ec5a6c5fSDave Ertman static void
ice_lag_set_swid(u16 primary_swid,struct ice_lag * local_lag,bool link)1008ec5a6c5fSDave Ertman ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
1009ec5a6c5fSDave Ertman 		 bool link)
1010ec5a6c5fSDave Ertman {
10111e0f9881SDave Ertman 	struct ice_aqc_alloc_free_res_elem *buf;
10121e0f9881SDave Ertman 	struct ice_aqc_set_port_params *cmd;
10131e0f9881SDave Ertman 	struct ice_aq_desc desc;
10141e0f9881SDave Ertman 	u16 buf_len, swid;
10153579aa86SDave Ertman 	int status, i;
10161e0f9881SDave Ertman 
10171e0f9881SDave Ertman 	buf_len = struct_size(buf, elem, 1);
10181e0f9881SDave Ertman 	buf = kzalloc(buf_len, GFP_KERNEL);
10191e0f9881SDave Ertman 	if (!buf) {
10201e0f9881SDave Ertman 		dev_err(ice_pf_to_dev(local_lag->pf), "-ENOMEM error setting SWID\n");
10211e0f9881SDave Ertman 		return;
10221e0f9881SDave Ertman 	}
10231e0f9881SDave Ertman 
10241e0f9881SDave Ertman 	buf->num_elems = cpu_to_le16(1);
10251e0f9881SDave Ertman 	buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_SWID);
10261e0f9881SDave Ertman 	/* if unlinnking need to free the shared resource */
10271e0f9881SDave Ertman 	if (!link && local_lag->bond_swid) {
10281e0f9881SDave Ertman 		buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid);
102952da2fb2SPrzemek Kitszel 		status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf,
103052da2fb2SPrzemek Kitszel 					       buf_len, ice_aqc_opc_free_res);
10311e0f9881SDave Ertman 		if (status)
10321e0f9881SDave Ertman 			dev_err(ice_pf_to_dev(local_lag->pf), "Error freeing SWID during LAG unlink\n");
10331e0f9881SDave Ertman 		local_lag->bond_swid = 0;
10341e0f9881SDave Ertman 	}
10351e0f9881SDave Ertman 
10361e0f9881SDave Ertman 	if (link) {
10371e0f9881SDave Ertman 		buf->res_type |=  cpu_to_le16(ICE_LAG_RES_SHARED |
10381e0f9881SDave Ertman 					      ICE_LAG_RES_VALID);
10391e0f9881SDave Ertman 		/* store the primary's SWID in case it leaves bond first */
10401e0f9881SDave Ertman 		local_lag->bond_swid = primary_swid;
10411e0f9881SDave Ertman 		buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid);
10421e0f9881SDave Ertman 	} else {
10431e0f9881SDave Ertman 		buf->elem[0].e.sw_resp =
10441e0f9881SDave Ertman 			cpu_to_le16(local_lag->pf->hw.port_info->sw_id);
10451e0f9881SDave Ertman 	}
10461e0f9881SDave Ertman 
104752da2fb2SPrzemek Kitszel 	status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf, buf_len,
104852da2fb2SPrzemek Kitszel 				       ice_aqc_opc_alloc_res);
10491e0f9881SDave Ertman 	if (status)
10501e0f9881SDave Ertman 		dev_err(ice_pf_to_dev(local_lag->pf), "Error subscribing to SWID 0x%04X\n",
10511e0f9881SDave Ertman 			local_lag->bond_swid);
10521e0f9881SDave Ertman 
10531e0f9881SDave Ertman 	kfree(buf);
10541e0f9881SDave Ertman 
10551e0f9881SDave Ertman 	/* Configure port param SWID to correct value */
10561e0f9881SDave Ertman 	if (link)
10571e0f9881SDave Ertman 		swid = primary_swid;
10581e0f9881SDave Ertman 	else
10591e0f9881SDave Ertman 		swid = local_lag->pf->hw.port_info->sw_id;
10601e0f9881SDave Ertman 
10611e0f9881SDave Ertman 	cmd = &desc.params.set_port_params;
10621e0f9881SDave Ertman 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
10631e0f9881SDave Ertman 
10641e0f9881SDave Ertman 	cmd->swid = cpu_to_le16(ICE_AQC_PORT_SWID_VALID | swid);
10653579aa86SDave Ertman 	/* If this is happening in reset context, it is possible that the
10663579aa86SDave Ertman 	 * primary interface has not finished setting its SWID to SHARED
10673579aa86SDave Ertman 	 * yet.  Allow retries to account for this timing issue between
10683579aa86SDave Ertman 	 * interfaces.
10693579aa86SDave Ertman 	 */
10703579aa86SDave Ertman 	for (i = 0; i < ICE_LAG_RESET_RETRIES; i++) {
10713579aa86SDave Ertman 		status = ice_aq_send_cmd(&local_lag->pf->hw, &desc, NULL, 0,
10723579aa86SDave Ertman 					 NULL);
10733579aa86SDave Ertman 		if (!status)
10743579aa86SDave Ertman 			break;
10753579aa86SDave Ertman 
10763579aa86SDave Ertman 		usleep_range(1000, 2000);
10773579aa86SDave Ertman 	}
10783579aa86SDave Ertman 
10791e0f9881SDave Ertman 	if (status)
10801e0f9881SDave Ertman 		dev_err(ice_pf_to_dev(local_lag->pf), "Error setting SWID in port params %d\n",
10811e0f9881SDave Ertman 			status);
1082ec5a6c5fSDave Ertman }
1083ec5a6c5fSDave Ertman 
1084ec5a6c5fSDave Ertman /**
1085ec5a6c5fSDave Ertman  * ice_lag_primary_swid - set/clear the SHARED attrib of primary's SWID
10863579aa86SDave Ertman  * @lag: primary interface's lag struct
1087ec5a6c5fSDave Ertman  * @link: is this a linking activity
1088ec5a6c5fSDave Ertman  *
1089ec5a6c5fSDave Ertman  * Implement setting primary SWID as shared using 0x020B
1090ec5a6c5fSDave Ertman  */
ice_lag_primary_swid(struct ice_lag * lag,bool link)1091ec5a6c5fSDave Ertman static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
1092ec5a6c5fSDave Ertman {
1093ec5a6c5fSDave Ertman 	struct ice_hw *hw;
1094ec5a6c5fSDave Ertman 	u16 swid;
1095ec5a6c5fSDave Ertman 
1096ec5a6c5fSDave Ertman 	hw = &lag->pf->hw;
1097ec5a6c5fSDave Ertman 	swid = hw->port_info->sw_id;
1098ec5a6c5fSDave Ertman 
1099ec5a6c5fSDave Ertman 	if (ice_share_res(hw, ICE_AQC_RES_TYPE_SWID, link, swid))
1100ec5a6c5fSDave Ertman 		dev_warn(ice_pf_to_dev(lag->pf), "Failure to set primary interface shared status\n");
1101ec5a6c5fSDave Ertman }
1102ec5a6c5fSDave Ertman 
1103ec5a6c5fSDave Ertman /**
1104ec5a6c5fSDave Ertman  * ice_lag_add_prune_list - Adds event_pf's VSI to primary's prune list
1105ec5a6c5fSDave Ertman  * @lag: lag info struct
1106ec5a6c5fSDave Ertman  * @event_pf: PF struct for VSI we are adding to primary's prune list
1107ec5a6c5fSDave Ertman  */
ice_lag_add_prune_list(struct ice_lag * lag,struct ice_pf * event_pf)1108ec5a6c5fSDave Ertman static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
1109ec5a6c5fSDave Ertman {
11101e0f9881SDave Ertman 	u16 num_vsi, rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx;
11111e0f9881SDave Ertman 	struct ice_sw_rule_vsi_list *s_rule = NULL;
11121e0f9881SDave Ertman 	struct device *dev;
11131e0f9881SDave Ertman 
11141e0f9881SDave Ertman 	num_vsi = 1;
11151e0f9881SDave Ertman 
11161e0f9881SDave Ertman 	dev = ice_pf_to_dev(lag->pf);
11171e0f9881SDave Ertman 	event_vsi_num = event_pf->vsi[0]->vsi_num;
11181e0f9881SDave Ertman 	prim_vsi_idx = lag->pf->vsi[0]->idx;
11191e0f9881SDave Ertman 
11201e0f9881SDave Ertman 	if (!ice_find_vsi_list_entry(&lag->pf->hw, ICE_SW_LKUP_VLAN,
11211e0f9881SDave Ertman 				     prim_vsi_idx, &vsi_list_id)) {
11221e0f9881SDave Ertman 		dev_warn(dev, "Could not locate prune list when setting up SRIOV LAG\n");
11231e0f9881SDave Ertman 		return;
11241e0f9881SDave Ertman 	}
11251e0f9881SDave Ertman 
11261e0f9881SDave Ertman 	rule_buf_sz = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
11271e0f9881SDave Ertman 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
11281e0f9881SDave Ertman 	if (!s_rule) {
11291e0f9881SDave Ertman 		dev_warn(dev, "Error allocating space for prune list when configuring SRIOV LAG\n");
11301e0f9881SDave Ertman 		return;
11311e0f9881SDave Ertman 	}
11321e0f9881SDave Ertman 
11331e0f9881SDave Ertman 	s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_SET);
11341e0f9881SDave Ertman 	s_rule->index = cpu_to_le16(vsi_list_id);
11351e0f9881SDave Ertman 	s_rule->number_vsi = cpu_to_le16(num_vsi);
11361e0f9881SDave Ertman 	s_rule->vsi[0] = cpu_to_le16(event_vsi_num);
11371e0f9881SDave Ertman 
11381e0f9881SDave Ertman 	if (ice_aq_sw_rules(&event_pf->hw, s_rule, rule_buf_sz, 1,
11391e0f9881SDave Ertman 			    ice_aqc_opc_update_sw_rules, NULL))
11401e0f9881SDave Ertman 		dev_warn(dev, "Error adding VSI prune list\n");
11411e0f9881SDave Ertman 	kfree(s_rule);
1142ec5a6c5fSDave Ertman }
1143ec5a6c5fSDave Ertman 
1144ec5a6c5fSDave Ertman /**
1145ec5a6c5fSDave Ertman  * ice_lag_del_prune_list - Remove secondary's vsi from primary's prune list
1146ec5a6c5fSDave Ertman  * @lag: primary interface's ice_lag struct
1147ec5a6c5fSDave Ertman  * @event_pf: PF struct for unlinking interface
1148ec5a6c5fSDave Ertman  */
ice_lag_del_prune_list(struct ice_lag * lag,struct ice_pf * event_pf)1149ec5a6c5fSDave Ertman static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
1150ec5a6c5fSDave Ertman {
11511e0f9881SDave Ertman 	u16 num_vsi, vsi_num, vsi_idx, rule_buf_sz, vsi_list_id;
11521e0f9881SDave Ertman 	struct ice_sw_rule_vsi_list *s_rule = NULL;
11531e0f9881SDave Ertman 	struct device *dev;
11541e0f9881SDave Ertman 
11551e0f9881SDave Ertman 	num_vsi = 1;
11561e0f9881SDave Ertman 
11571e0f9881SDave Ertman 	dev = ice_pf_to_dev(lag->pf);
11581e0f9881SDave Ertman 	vsi_num = event_pf->vsi[0]->vsi_num;
11591e0f9881SDave Ertman 	vsi_idx = lag->pf->vsi[0]->idx;
11601e0f9881SDave Ertman 
11611e0f9881SDave Ertman 	if (!ice_find_vsi_list_entry(&lag->pf->hw, ICE_SW_LKUP_VLAN,
11621e0f9881SDave Ertman 				     vsi_idx, &vsi_list_id)) {
11631e0f9881SDave Ertman 		dev_warn(dev, "Could not locate prune list when unwinding SRIOV LAG\n");
11641e0f9881SDave Ertman 		return;
11651e0f9881SDave Ertman 	}
11661e0f9881SDave Ertman 
11671e0f9881SDave Ertman 	rule_buf_sz = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
11681e0f9881SDave Ertman 	s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
11691e0f9881SDave Ertman 	if (!s_rule) {
11701e0f9881SDave Ertman 		dev_warn(dev, "Error allocating prune list when unwinding SRIOV LAG\n");
11711e0f9881SDave Ertman 		return;
11721e0f9881SDave Ertman 	}
11731e0f9881SDave Ertman 
11741e0f9881SDave Ertman 	s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR);
11751e0f9881SDave Ertman 	s_rule->index = cpu_to_le16(vsi_list_id);
11761e0f9881SDave Ertman 	s_rule->number_vsi = cpu_to_le16(num_vsi);
11771e0f9881SDave Ertman 	s_rule->vsi[0] = cpu_to_le16(vsi_num);
11781e0f9881SDave Ertman 
11791e0f9881SDave Ertman 	if (ice_aq_sw_rules(&event_pf->hw, (struct ice_aqc_sw_rules *)s_rule,
11801e0f9881SDave Ertman 			    rule_buf_sz, 1, ice_aqc_opc_update_sw_rules, NULL))
11811e0f9881SDave Ertman 		dev_warn(dev, "Error clearing VSI prune list\n");
11821e0f9881SDave Ertman 
11831e0f9881SDave Ertman 	kfree(s_rule);
1184bea1898fSDave Ertman }
1185bea1898fSDave Ertman 
1186bea1898fSDave Ertman /**
1187bb52f42aSDave Ertman  * ice_lag_init_feature_support_flag - Check for NVM support for LAG
1188bb52f42aSDave Ertman  * @pf: PF struct
1189bb52f42aSDave Ertman  */
ice_lag_init_feature_support_flag(struct ice_pf * pf)1190bb52f42aSDave Ertman static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
1191bb52f42aSDave Ertman {
1192bb52f42aSDave Ertman 	struct ice_hw_common_caps *caps;
1193bb52f42aSDave Ertman 
1194bb52f42aSDave Ertman 	caps = &pf->hw.dev_caps.common_cap;
1195bb52f42aSDave Ertman 	if (caps->roce_lag)
1196bb52f42aSDave Ertman 		ice_set_feature_support(pf, ICE_F_ROCE_LAG);
1197bb52f42aSDave Ertman 	else
1198bb52f42aSDave Ertman 		ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
1199bb52f42aSDave Ertman 
1200bb52f42aSDave Ertman 	if (caps->sriov_lag)
1201bb52f42aSDave Ertman 		ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
1202bb52f42aSDave Ertman 	else
1203bb52f42aSDave Ertman 		ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
1204bb52f42aSDave Ertman }
1205bb52f42aSDave Ertman 
1206bb52f42aSDave Ertman /**
1207df006dd4SDave Ertman  * ice_lag_changeupper_event - handle LAG changeupper event
1208df006dd4SDave Ertman  * @lag: LAG info struct
1209df006dd4SDave Ertman  * @ptr: opaque pointer data
1210df006dd4SDave Ertman  */
ice_lag_changeupper_event(struct ice_lag * lag,void * ptr)1211df006dd4SDave Ertman static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
1212df006dd4SDave Ertman {
1213df006dd4SDave Ertman 	struct netdev_notifier_changeupper_info *info;
1214ec5a6c5fSDave Ertman 	struct ice_lag *primary_lag;
1215df006dd4SDave Ertman 	struct net_device *netdev;
1216df006dd4SDave Ertman 
1217df006dd4SDave Ertman 	info = ptr;
1218df006dd4SDave Ertman 	netdev = netdev_notifier_info_to_dev(ptr);
1219df006dd4SDave Ertman 
1220df006dd4SDave Ertman 	/* not for this netdev */
1221df006dd4SDave Ertman 	if (netdev != lag->netdev)
1222df006dd4SDave Ertman 		return;
1223df006dd4SDave Ertman 
1224ec5a6c5fSDave Ertman 	primary_lag = ice_lag_find_primary(lag);
1225ec5a6c5fSDave Ertman 	if (info->linking) {
1226ec5a6c5fSDave Ertman 		lag->upper_netdev = info->upper_dev;
1227ec5a6c5fSDave Ertman 		/* If there is not already a primary interface in the LAG,
1228ec5a6c5fSDave Ertman 		 * then mark this one as primary.
1229ec5a6c5fSDave Ertman 		 */
1230ec5a6c5fSDave Ertman 		if (!primary_lag) {
1231ec5a6c5fSDave Ertman 			lag->primary = true;
1232ec5a6c5fSDave Ertman 			/* Configure primary's SWID to be shared */
1233ec5a6c5fSDave Ertman 			ice_lag_primary_swid(lag, true);
1234ec5a6c5fSDave Ertman 			primary_lag = lag;
1235ec5a6c5fSDave Ertman 		} else {
1236ec5a6c5fSDave Ertman 			u16 swid;
1237ec5a6c5fSDave Ertman 
1238ec5a6c5fSDave Ertman 			swid = primary_lag->pf->hw.port_info->sw_id;
1239ec5a6c5fSDave Ertman 			ice_lag_set_swid(swid, lag, true);
1240ec5a6c5fSDave Ertman 			ice_lag_add_prune_list(primary_lag, lag->pf);
1241ec5a6c5fSDave Ertman 		}
1242ec5a6c5fSDave Ertman 		/* add filter for primary control packets */
1243ec5a6c5fSDave Ertman 		ice_lag_cfg_cp_fltr(lag, true);
1244ec5a6c5fSDave Ertman 	} else {
1245ec5a6c5fSDave Ertman 		if (!primary_lag && lag->primary)
1246ec5a6c5fSDave Ertman 			primary_lag = lag;
1247ec5a6c5fSDave Ertman 
1248ec5a6c5fSDave Ertman 		if (!lag->primary) {
1249ec5a6c5fSDave Ertman 			ice_lag_set_swid(0, lag, false);
1250ec5a6c5fSDave Ertman 		} else {
1251ba789fb4SDave Ertman 			if (primary_lag && lag->primary) {
1252ec5a6c5fSDave Ertman 				ice_lag_primary_swid(lag, false);
1253ec5a6c5fSDave Ertman 				ice_lag_del_prune_list(primary_lag, lag->pf);
1254ec5a6c5fSDave Ertman 			}
1255ec5a6c5fSDave Ertman 		}
1256ba789fb4SDave Ertman 		/* remove filter for control packets */
1257ba789fb4SDave Ertman 		ice_lag_cfg_cp_fltr(lag, false);
1258ec5a6c5fSDave Ertman 	}
1259df006dd4SDave Ertman }
1260df006dd4SDave Ertman 
1261ec5a6c5fSDave Ertman /**
1262ec5a6c5fSDave Ertman  * ice_lag_monitor_link - monitor interfaces entering/leaving the aggregate
1263ec5a6c5fSDave Ertman  * @lag: lag info struct
1264ec5a6c5fSDave Ertman  * @ptr: opaque data containing notifier event
1265ec5a6c5fSDave Ertman  *
1266ec5a6c5fSDave Ertman  * This function only operates after a primary has been set.
1267ec5a6c5fSDave Ertman  */
ice_lag_monitor_link(struct ice_lag * lag,void * ptr)1268ec5a6c5fSDave Ertman static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
1269ec5a6c5fSDave Ertman {
12701e0f9881SDave Ertman 	struct netdev_notifier_changeupper_info *info;
12711e0f9881SDave Ertman 	struct ice_hw *prim_hw, *active_hw;
12721e0f9881SDave Ertman 	struct net_device *event_netdev;
12731e0f9881SDave Ertman 	struct ice_pf *pf;
12741e0f9881SDave Ertman 	u8 prim_port;
12751e0f9881SDave Ertman 
12761e0f9881SDave Ertman 	if (!lag->primary)
12771e0f9881SDave Ertman 		return;
12781e0f9881SDave Ertman 
12791e0f9881SDave Ertman 	event_netdev = netdev_notifier_info_to_dev(ptr);
12801e0f9881SDave Ertman 	if (!netif_is_same_ice(lag->pf, event_netdev))
12811e0f9881SDave Ertman 		return;
12821e0f9881SDave Ertman 
12831e0f9881SDave Ertman 	pf = lag->pf;
12841e0f9881SDave Ertman 	prim_hw = &pf->hw;
12851e0f9881SDave Ertman 	prim_port = prim_hw->port_info->lport;
12861e0f9881SDave Ertman 
12871e0f9881SDave Ertman 	info = (struct netdev_notifier_changeupper_info *)ptr;
12881e0f9881SDave Ertman 	if (info->upper_dev != lag->upper_netdev)
12891e0f9881SDave Ertman 		return;
12901e0f9881SDave Ertman 
12911e0f9881SDave Ertman 	if (!info->linking) {
12921e0f9881SDave Ertman 		/* Since there are only two interfaces allowed in SRIOV+LAG, if
12931e0f9881SDave Ertman 		 * one port is leaving, then nodes need to be on primary
12941e0f9881SDave Ertman 		 * interface.
12951e0f9881SDave Ertman 		 */
12961e0f9881SDave Ertman 		if (prim_port != lag->active_port &&
12971e0f9881SDave Ertman 		    lag->active_port != ICE_LAG_INVALID_PORT) {
12981e0f9881SDave Ertman 			active_hw = ice_lag_find_hw_by_lport(lag,
12991e0f9881SDave Ertman 							     lag->active_port);
13001e0f9881SDave Ertman 			ice_lag_reclaim_vf_nodes(lag, active_hw);
13011e0f9881SDave Ertman 			lag->active_port = ICE_LAG_INVALID_PORT;
13021e0f9881SDave Ertman 		}
13031e0f9881SDave Ertman 	}
1304df006dd4SDave Ertman }
1305df006dd4SDave Ertman 
1306ec5a6c5fSDave Ertman /**
1307ec5a6c5fSDave Ertman  * ice_lag_monitor_active - main PF keep track of which port is active
1308ec5a6c5fSDave Ertman  * @lag: lag info struct
1309ec5a6c5fSDave Ertman  * @ptr: opaque data containing notifier event
1310ec5a6c5fSDave Ertman  *
1311ec5a6c5fSDave Ertman  * This function is for the primary PF to monitor changes in which port is
1312ec5a6c5fSDave Ertman  * active and handle changes for SRIOV VF functionality
1313ec5a6c5fSDave Ertman  */
ice_lag_monitor_active(struct ice_lag * lag,void * ptr)1314ec5a6c5fSDave Ertman static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
1315ec5a6c5fSDave Ertman {
13161e0f9881SDave Ertman 	struct net_device *event_netdev, *event_upper;
13171e0f9881SDave Ertman 	struct netdev_notifier_bonding_info *info;
13181e0f9881SDave Ertman 	struct netdev_bonding_info *bonding_info;
13191e0f9881SDave Ertman 	struct ice_netdev_priv *event_np;
13201e0f9881SDave Ertman 	struct ice_pf *pf, *event_pf;
13211e0f9881SDave Ertman 	u8 prim_port, event_port;
13221e0f9881SDave Ertman 
13231e0f9881SDave Ertman 	if (!lag->primary)
13241e0f9881SDave Ertman 		return;
13251e0f9881SDave Ertman 
13261e0f9881SDave Ertman 	pf = lag->pf;
13271e0f9881SDave Ertman 	if (!pf)
13281e0f9881SDave Ertman 		return;
13291e0f9881SDave Ertman 
13301e0f9881SDave Ertman 	event_netdev = netdev_notifier_info_to_dev(ptr);
13311e0f9881SDave Ertman 	rcu_read_lock();
13321e0f9881SDave Ertman 	event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
13331e0f9881SDave Ertman 	rcu_read_unlock();
13341e0f9881SDave Ertman 	if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
13351e0f9881SDave Ertman 		return;
13361e0f9881SDave Ertman 
13371e0f9881SDave Ertman 	event_np = netdev_priv(event_netdev);
13381e0f9881SDave Ertman 	event_pf = event_np->vsi->back;
13391e0f9881SDave Ertman 	event_port = event_pf->hw.port_info->lport;
13401e0f9881SDave Ertman 	prim_port = pf->hw.port_info->lport;
13411e0f9881SDave Ertman 
13421e0f9881SDave Ertman 	info = (struct netdev_notifier_bonding_info *)ptr;
13431e0f9881SDave Ertman 	bonding_info = &info->bonding_info;
13441e0f9881SDave Ertman 
13451e0f9881SDave Ertman 	if (!bonding_info->slave.state) {
13461e0f9881SDave Ertman 		/* if no port is currently active, then nodes and filters exist
13471e0f9881SDave Ertman 		 * on primary port, check if we need to move them
13481e0f9881SDave Ertman 		 */
13491e0f9881SDave Ertman 		if (lag->active_port == ICE_LAG_INVALID_PORT) {
13501e0f9881SDave Ertman 			if (event_port != prim_port)
13511e0f9881SDave Ertman 				ice_lag_move_vf_nodes(lag, prim_port,
13521e0f9881SDave Ertman 						      event_port);
13531e0f9881SDave Ertman 			lag->active_port = event_port;
13541e0f9881SDave Ertman 			return;
13551e0f9881SDave Ertman 		}
13561e0f9881SDave Ertman 
13571e0f9881SDave Ertman 		/* active port is already set and is current event port */
13581e0f9881SDave Ertman 		if (lag->active_port == event_port)
13591e0f9881SDave Ertman 			return;
13601e0f9881SDave Ertman 		/* new active port */
13611e0f9881SDave Ertman 		ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
13621e0f9881SDave Ertman 		lag->active_port = event_port;
13631e0f9881SDave Ertman 	} else {
13641e0f9881SDave Ertman 		/* port not set as currently active (e.g. new active port
13651e0f9881SDave Ertman 		 * has already claimed the nodes and filters
13661e0f9881SDave Ertman 		 */
13671e0f9881SDave Ertman 		if (lag->active_port != event_port)
13681e0f9881SDave Ertman 			return;
13691e0f9881SDave Ertman 		/* This is the case when neither port is active (both link down)
13701e0f9881SDave Ertman 		 * Link down on the bond - set active port to invalid and move
13711e0f9881SDave Ertman 		 * nodes and filters back to primary if not already there
13721e0f9881SDave Ertman 		 */
13731e0f9881SDave Ertman 		if (event_port != prim_port)
13741e0f9881SDave Ertman 			ice_lag_move_vf_nodes(lag, event_port, prim_port);
13751e0f9881SDave Ertman 		lag->active_port = ICE_LAG_INVALID_PORT;
13761e0f9881SDave Ertman 	}
1377ec5a6c5fSDave Ertman }
1378ec5a6c5fSDave Ertman 
1379ec5a6c5fSDave Ertman /**
1380ec5a6c5fSDave Ertman  * ice_lag_chk_comp - evaluate bonded interface for feature support
1381ec5a6c5fSDave Ertman  * @lag: lag info struct
1382ec5a6c5fSDave Ertman  * @ptr: opaque data for netdev event info
1383ec5a6c5fSDave Ertman  */
1384ec5a6c5fSDave Ertman static bool
ice_lag_chk_comp(struct ice_lag * lag,void * ptr)1385ec5a6c5fSDave Ertman ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
1386ec5a6c5fSDave Ertman {
13871e0f9881SDave Ertman 	struct net_device *event_netdev, *event_upper;
13881e0f9881SDave Ertman 	struct netdev_notifier_bonding_info *info;
13891e0f9881SDave Ertman 	struct netdev_bonding_info *bonding_info;
13901e0f9881SDave Ertman 	struct list_head *tmp;
1391bf65da2eSDave Ertman 	struct device *dev;
13921e0f9881SDave Ertman 	int count = 0;
13931e0f9881SDave Ertman 
13941e0f9881SDave Ertman 	if (!lag->primary)
13951e0f9881SDave Ertman 		return true;
13961e0f9881SDave Ertman 
13971e0f9881SDave Ertman 	event_netdev = netdev_notifier_info_to_dev(ptr);
13981e0f9881SDave Ertman 	rcu_read_lock();
13991e0f9881SDave Ertman 	event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
14001e0f9881SDave Ertman 	rcu_read_unlock();
14011e0f9881SDave Ertman 	if (event_upper != lag->upper_netdev)
14021e0f9881SDave Ertman 		return true;
14031e0f9881SDave Ertman 
1404bf65da2eSDave Ertman 	dev = ice_pf_to_dev(lag->pf);
1405bf65da2eSDave Ertman 
1406bf65da2eSDave Ertman 	/* only supporting switchdev mode for SRIOV VF LAG.
1407bf65da2eSDave Ertman 	 * primary interface has to be in switchdev mode
1408bf65da2eSDave Ertman 	 */
1409bf65da2eSDave Ertman 	if (!ice_is_switchdev_running(lag->pf)) {
1410bf65da2eSDave Ertman 		dev_info(dev, "Primary interface not in switchdev mode - VF LAG disabled\n");
1411bf65da2eSDave Ertman 		return false;
1412bf65da2eSDave Ertman 	}
1413bf65da2eSDave Ertman 
14141e0f9881SDave Ertman 	info = (struct netdev_notifier_bonding_info *)ptr;
14151e0f9881SDave Ertman 	bonding_info = &info->bonding_info;
14161e0f9881SDave Ertman 	lag->bond_mode = bonding_info->master.bond_mode;
14171e0f9881SDave Ertman 	if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP) {
1418bf65da2eSDave Ertman 		dev_info(dev, "Bond Mode not ACTIVE-BACKUP - VF LAG disabled\n");
14191e0f9881SDave Ertman 		return false;
14201e0f9881SDave Ertman 	}
14211e0f9881SDave Ertman 
14221e0f9881SDave Ertman 	list_for_each(tmp, lag->netdev_head) {
14231e0f9881SDave Ertman 		struct ice_dcbx_cfg *dcb_cfg, *peer_dcb_cfg;
14241e0f9881SDave Ertman 		struct ice_lag_netdev_list *entry;
14251e0f9881SDave Ertman 		struct ice_netdev_priv *peer_np;
14261e0f9881SDave Ertman 		struct net_device *peer_netdev;
14271e0f9881SDave Ertman 		struct ice_vsi *vsi, *peer_vsi;
1428bf65da2eSDave Ertman 		struct ice_pf *peer_pf;
14291e0f9881SDave Ertman 
14301e0f9881SDave Ertman 		entry = list_entry(tmp, struct ice_lag_netdev_list, node);
14311e0f9881SDave Ertman 		peer_netdev = entry->netdev;
14321e0f9881SDave Ertman 		if (!netif_is_ice(peer_netdev)) {
1433bf65da2eSDave Ertman 			dev_info(dev, "Found %s non-ice netdev in LAG - VF LAG disabled\n",
1434bf65da2eSDave Ertman 				 netdev_name(peer_netdev));
14351e0f9881SDave Ertman 			return false;
14361e0f9881SDave Ertman 		}
14371e0f9881SDave Ertman 
14381e0f9881SDave Ertman 		count++;
14391e0f9881SDave Ertman 		if (count > 2) {
1440bf65da2eSDave Ertman 			dev_info(dev, "Found more than two netdevs in LAG - VF LAG disabled\n");
14411e0f9881SDave Ertman 			return false;
14421e0f9881SDave Ertman 		}
14431e0f9881SDave Ertman 
14441e0f9881SDave Ertman 		peer_np = netdev_priv(peer_netdev);
14451e0f9881SDave Ertman 		vsi = ice_get_main_vsi(lag->pf);
14461e0f9881SDave Ertman 		peer_vsi = peer_np->vsi;
14471e0f9881SDave Ertman 		if (lag->pf->pdev->bus != peer_vsi->back->pdev->bus ||
14481e0f9881SDave Ertman 		    lag->pf->pdev->slot != peer_vsi->back->pdev->slot) {
1449bf65da2eSDave Ertman 			dev_info(dev, "Found %s on different device in LAG - VF LAG disabled\n",
1450bf65da2eSDave Ertman 				 netdev_name(peer_netdev));
14511e0f9881SDave Ertman 			return false;
14521e0f9881SDave Ertman 		}
14531e0f9881SDave Ertman 
14541e0f9881SDave Ertman 		dcb_cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
14551e0f9881SDave Ertman 		peer_dcb_cfg = &peer_vsi->port_info->qos_cfg.local_dcbx_cfg;
14561e0f9881SDave Ertman 		if (memcmp(dcb_cfg, peer_dcb_cfg,
14571e0f9881SDave Ertman 			   sizeof(struct ice_dcbx_cfg))) {
1458bf65da2eSDave Ertman 			dev_info(dev, "Found %s with different DCB in LAG - VF LAG disabled\n",
1459bf65da2eSDave Ertman 				 netdev_name(peer_netdev));
1460bf65da2eSDave Ertman 			return false;
1461bf65da2eSDave Ertman 		}
1462bf65da2eSDave Ertman 
1463bf65da2eSDave Ertman 		peer_pf = peer_vsi->back;
1464bf65da2eSDave Ertman 		if (test_bit(ICE_FLAG_FW_LLDP_AGENT, peer_pf->flags)) {
1465bf65da2eSDave Ertman 			dev_warn(dev, "Found %s with FW LLDP agent active - VF LAG disabled\n",
1466bf65da2eSDave Ertman 				 netdev_name(peer_netdev));
14671e0f9881SDave Ertman 			return false;
14681e0f9881SDave Ertman 		}
14691e0f9881SDave Ertman 	}
14701e0f9881SDave Ertman 
1471ec5a6c5fSDave Ertman 	return true;
1472ec5a6c5fSDave Ertman }
1473ec5a6c5fSDave Ertman 
1474ec5a6c5fSDave Ertman /**
1475ec5a6c5fSDave Ertman  * ice_lag_unregister - handle netdev unregister events
1476ec5a6c5fSDave Ertman  * @lag: LAG info struct
1477ec5a6c5fSDave Ertman  * @event_netdev: netdev struct for target of notifier event
1478ec5a6c5fSDave Ertman  */
1479ec5a6c5fSDave Ertman static void
ice_lag_unregister(struct ice_lag * lag,struct net_device * event_netdev)1480ec5a6c5fSDave Ertman ice_lag_unregister(struct ice_lag *lag, struct net_device *event_netdev)
1481ec5a6c5fSDave Ertman {
1482ba789fb4SDave Ertman 	struct ice_netdev_priv *np;
1483ba789fb4SDave Ertman 	struct ice_pf *event_pf;
1484ba789fb4SDave Ertman 	struct ice_lag *p_lag;
1485ba789fb4SDave Ertman 
1486ba789fb4SDave Ertman 	p_lag = ice_lag_find_primary(lag);
1487ba789fb4SDave Ertman 	np = netdev_priv(event_netdev);
1488ba789fb4SDave Ertman 	event_pf = np->vsi->back;
1489ba789fb4SDave Ertman 
1490ba789fb4SDave Ertman 	if (p_lag) {
1491ba789fb4SDave Ertman 		if (p_lag->active_port != p_lag->pf->hw.port_info->lport &&
1492ba789fb4SDave Ertman 		    p_lag->active_port != ICE_LAG_INVALID_PORT) {
1493ba789fb4SDave Ertman 			struct ice_hw *active_hw;
1494ba789fb4SDave Ertman 
1495ba789fb4SDave Ertman 			active_hw = ice_lag_find_hw_by_lport(lag,
1496ba789fb4SDave Ertman 							     p_lag->active_port);
1497ba789fb4SDave Ertman 			if (active_hw)
1498ba789fb4SDave Ertman 				ice_lag_reclaim_vf_nodes(p_lag, active_hw);
1499ba789fb4SDave Ertman 			lag->active_port = ICE_LAG_INVALID_PORT;
1500ba789fb4SDave Ertman 		}
1501ba789fb4SDave Ertman 	}
1502ba789fb4SDave Ertman 
1503ba789fb4SDave Ertman 	/* primary processing for primary */
1504ba789fb4SDave Ertman 	if (lag->primary && lag->netdev == event_netdev)
1505ba789fb4SDave Ertman 		ice_lag_primary_swid(lag, false);
1506ba789fb4SDave Ertman 
1507ba789fb4SDave Ertman 	/* primary processing for secondary */
1508ba789fb4SDave Ertman 	if (lag->primary && lag->netdev != event_netdev)
1509ba789fb4SDave Ertman 		ice_lag_del_prune_list(lag, event_pf);
1510ba789fb4SDave Ertman 
1511ba789fb4SDave Ertman 	/* secondary processing for secondary */
1512ba789fb4SDave Ertman 	if (!lag->primary && lag->netdev == event_netdev)
1513ba789fb4SDave Ertman 		ice_lag_set_swid(0, lag, false);
1514ec5a6c5fSDave Ertman }
1515ec5a6c5fSDave Ertman 
1516ec5a6c5fSDave Ertman /**
1517ec5a6c5fSDave Ertman  * ice_lag_monitor_rdma - set and clear rdma functionality
1518ec5a6c5fSDave Ertman  * @lag: pointer to lag struct
1519ec5a6c5fSDave Ertman  * @ptr: opaque data for netdev event info
1520ec5a6c5fSDave Ertman  */
1521ec5a6c5fSDave Ertman static void
ice_lag_monitor_rdma(struct ice_lag * lag,void * ptr)1522ec5a6c5fSDave Ertman ice_lag_monitor_rdma(struct ice_lag *lag, void *ptr)
1523ec5a6c5fSDave Ertman {
1524ec5a6c5fSDave Ertman 	struct netdev_notifier_changeupper_info *info;
1525ec5a6c5fSDave Ertman 	struct net_device *netdev;
1526ec5a6c5fSDave Ertman 
1527ec5a6c5fSDave Ertman 	info = ptr;
1528ec5a6c5fSDave Ertman 	netdev = netdev_notifier_info_to_dev(ptr);
1529ec5a6c5fSDave Ertman 
1530ec5a6c5fSDave Ertman 	if (netdev != lag->netdev)
1531ec5a6c5fSDave Ertman 		return;
1532ec5a6c5fSDave Ertman 
1533df006dd4SDave Ertman 	if (info->linking)
1534ec5a6c5fSDave Ertman 		ice_clear_rdma_cap(lag->pf);
1535df006dd4SDave Ertman 	else
1536ec5a6c5fSDave Ertman 		ice_set_rdma_cap(lag->pf);
1537df006dd4SDave Ertman }
1538df006dd4SDave Ertman 
1539df006dd4SDave Ertman /**
1540bf65da2eSDave Ertman  * ice_lag_chk_disabled_bond - monitor interfaces entering/leaving disabled bond
1541bf65da2eSDave Ertman  * @lag: lag info struct
1542bf65da2eSDave Ertman  * @ptr: opaque data containing event
1543bf65da2eSDave Ertman  *
1544bf65da2eSDave Ertman  * as interfaces enter a bond - determine if the bond is currently
1545bf65da2eSDave Ertman  * SRIOV LAG compliant and flag if not.  As interfaces leave the
1546bf65da2eSDave Ertman  * bond, reset their compliant status.
1547bf65da2eSDave Ertman  */
ice_lag_chk_disabled_bond(struct ice_lag * lag,void * ptr)1548bf65da2eSDave Ertman static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
1549bf65da2eSDave Ertman {
1550bf65da2eSDave Ertman 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1551bf65da2eSDave Ertman 	struct netdev_notifier_changeupper_info *info = ptr;
1552bf65da2eSDave Ertman 	struct ice_lag *prim_lag;
1553bf65da2eSDave Ertman 
1554bf65da2eSDave Ertman 	if (netdev != lag->netdev)
1555bf65da2eSDave Ertman 		return;
1556bf65da2eSDave Ertman 
1557bf65da2eSDave Ertman 	if (info->linking) {
1558bf65da2eSDave Ertman 		prim_lag = ice_lag_find_primary(lag);
1559bf65da2eSDave Ertman 		if (prim_lag &&
1560bf65da2eSDave Ertman 		    !ice_is_feature_supported(prim_lag->pf, ICE_F_SRIOV_LAG)) {
1561bf65da2eSDave Ertman 			ice_clear_feature_support(lag->pf, ICE_F_SRIOV_LAG);
1562bf65da2eSDave Ertman 			netdev_info(netdev, "Interface added to non-compliant SRIOV LAG aggregate\n");
1563bf65da2eSDave Ertman 		}
1564bf65da2eSDave Ertman 	} else {
1565bf65da2eSDave Ertman 		ice_lag_init_feature_support_flag(lag->pf);
1566bf65da2eSDave Ertman 	}
1567bf65da2eSDave Ertman }
1568bf65da2eSDave Ertman 
1569bf65da2eSDave Ertman /**
1570bf65da2eSDave Ertman  * ice_lag_disable_sriov_bond - set members of bond as not supporting SRIOV LAG
1571bf65da2eSDave Ertman  * @lag: primary interfaces lag struct
1572bf65da2eSDave Ertman  */
ice_lag_disable_sriov_bond(struct ice_lag * lag)1573bf65da2eSDave Ertman static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
1574bf65da2eSDave Ertman {
1575bf65da2eSDave Ertman 	struct ice_netdev_priv *np;
1576bf65da2eSDave Ertman 	struct ice_pf *pf;
1577bf65da2eSDave Ertman 
1578cceeddd5SDave Ertman 	np = netdev_priv(lag->netdev);
1579bf65da2eSDave Ertman 	pf = np->vsi->back;
1580bf65da2eSDave Ertman 	ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
1581bf65da2eSDave Ertman }
1582bf65da2eSDave Ertman 
1583bf65da2eSDave Ertman /**
158441ccedf5SDave Ertman  * ice_lag_process_event - process a task assigned to the lag_wq
158541ccedf5SDave Ertman  * @work: pointer to work_struct
158641ccedf5SDave Ertman  */
ice_lag_process_event(struct work_struct * work)158741ccedf5SDave Ertman static void ice_lag_process_event(struct work_struct *work)
158841ccedf5SDave Ertman {
158941ccedf5SDave Ertman 	struct netdev_notifier_changeupper_info *info;
159041ccedf5SDave Ertman 	struct ice_lag_work *lag_work;
159141ccedf5SDave Ertman 	struct net_device *netdev;
159241ccedf5SDave Ertman 	struct list_head *tmp, *n;
159341ccedf5SDave Ertman 	struct ice_pf *pf;
159441ccedf5SDave Ertman 
159541ccedf5SDave Ertman 	lag_work = container_of(work, struct ice_lag_work, lag_task);
159641ccedf5SDave Ertman 	pf = lag_work->lag->pf;
159741ccedf5SDave Ertman 
159841ccedf5SDave Ertman 	mutex_lock(&pf->lag_mutex);
159941ccedf5SDave Ertman 	lag_work->lag->netdev_head = &lag_work->netdev_list.node;
160041ccedf5SDave Ertman 
160141ccedf5SDave Ertman 	switch (lag_work->event) {
160241ccedf5SDave Ertman 	case NETDEV_CHANGEUPPER:
160341ccedf5SDave Ertman 		info = &lag_work->info.changeupper_info;
1604bf65da2eSDave Ertman 		ice_lag_chk_disabled_bond(lag_work->lag, info);
1605ec5a6c5fSDave Ertman 		if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) {
1606ec5a6c5fSDave Ertman 			ice_lag_monitor_link(lag_work->lag, info);
160741ccedf5SDave Ertman 			ice_lag_changeupper_event(lag_work->lag, info);
1608ec5a6c5fSDave Ertman 			ice_lag_link_unlink(lag_work->lag, info);
1609ec5a6c5fSDave Ertman 		}
1610ec5a6c5fSDave Ertman 		ice_lag_monitor_rdma(lag_work->lag, info);
161141ccedf5SDave Ertman 		break;
161241ccedf5SDave Ertman 	case NETDEV_BONDING_INFO:
1613ec5a6c5fSDave Ertman 		if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) {
1614ec5a6c5fSDave Ertman 			if (!ice_lag_chk_comp(lag_work->lag,
1615ec5a6c5fSDave Ertman 					      &lag_work->info.bonding_info)) {
1616bf65da2eSDave Ertman 				netdev = lag_work->info.bonding_info.info.dev;
1617bf65da2eSDave Ertman 				ice_lag_disable_sriov_bond(lag_work->lag);
1618bf65da2eSDave Ertman 				ice_lag_unregister(lag_work->lag, netdev);
1619ec5a6c5fSDave Ertman 				goto lag_cleanup;
1620ec5a6c5fSDave Ertman 			}
1621ec5a6c5fSDave Ertman 			ice_lag_monitor_active(lag_work->lag,
1622ec5a6c5fSDave Ertman 					       &lag_work->info.bonding_info);
1623ec5a6c5fSDave Ertman 			ice_lag_cfg_pf_fltrs(lag_work->lag,
1624ec5a6c5fSDave Ertman 					     &lag_work->info.bonding_info);
1625ec5a6c5fSDave Ertman 		}
162641ccedf5SDave Ertman 		ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info);
162741ccedf5SDave Ertman 		break;
162841ccedf5SDave Ertman 	case NETDEV_UNREGISTER:
162941ccedf5SDave Ertman 		if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) {
163041ccedf5SDave Ertman 			netdev = lag_work->info.bonding_info.info.dev;
1631ba789fb4SDave Ertman 			if ((netdev == lag_work->lag->netdev ||
1632ba789fb4SDave Ertman 			     lag_work->lag->primary) && lag_work->lag->bonded)
163341ccedf5SDave Ertman 				ice_lag_unregister(lag_work->lag, netdev);
163441ccedf5SDave Ertman 		}
163541ccedf5SDave Ertman 		break;
163641ccedf5SDave Ertman 	default:
163741ccedf5SDave Ertman 		break;
163841ccedf5SDave Ertman 	}
163941ccedf5SDave Ertman 
1640ec5a6c5fSDave Ertman lag_cleanup:
164141ccedf5SDave Ertman 	/* cleanup resources allocated for this work item */
164241ccedf5SDave Ertman 	list_for_each_safe(tmp, n, &lag_work->netdev_list.node) {
164341ccedf5SDave Ertman 		struct ice_lag_netdev_list *entry;
164441ccedf5SDave Ertman 
164541ccedf5SDave Ertman 		entry = list_entry(tmp, struct ice_lag_netdev_list, node);
164641ccedf5SDave Ertman 		list_del(&entry->node);
164741ccedf5SDave Ertman 		kfree(entry);
164841ccedf5SDave Ertman 	}
164941ccedf5SDave Ertman 	lag_work->lag->netdev_head = NULL;
165041ccedf5SDave Ertman 
165141ccedf5SDave Ertman 	mutex_unlock(&pf->lag_mutex);
165241ccedf5SDave Ertman 
165341ccedf5SDave Ertman 	kfree(lag_work);
165441ccedf5SDave Ertman }
165541ccedf5SDave Ertman 
165641ccedf5SDave Ertman /**
1657df006dd4SDave Ertman  * ice_lag_event_handler - handle LAG events from netdev
1658df006dd4SDave Ertman  * @notif_blk: notifier block registered by this netdev
1659df006dd4SDave Ertman  * @event: event type
1660df006dd4SDave Ertman  * @ptr: opaque data containing notifier event
1661df006dd4SDave Ertman  */
1662df006dd4SDave Ertman static int
ice_lag_event_handler(struct notifier_block * notif_blk,unsigned long event,void * ptr)1663df006dd4SDave Ertman ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
1664df006dd4SDave Ertman 		      void *ptr)
1665df006dd4SDave Ertman {
1666df006dd4SDave Ertman 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
166741ccedf5SDave Ertman 	struct net_device *upper_netdev;
166841ccedf5SDave Ertman 	struct ice_lag_work *lag_work;
1669df006dd4SDave Ertman 	struct ice_lag *lag;
1670df006dd4SDave Ertman 
167141ccedf5SDave Ertman 	if (!netif_is_ice(netdev))
167241ccedf5SDave Ertman 		return NOTIFY_DONE;
1673df006dd4SDave Ertman 
167441ccedf5SDave Ertman 	if (event != NETDEV_CHANGEUPPER && event != NETDEV_BONDING_INFO &&
167541ccedf5SDave Ertman 	    event != NETDEV_UNREGISTER)
167641ccedf5SDave Ertman 		return NOTIFY_DONE;
167741ccedf5SDave Ertman 
167841ccedf5SDave Ertman 	if (!(netdev->priv_flags & IFF_BONDING))
167941ccedf5SDave Ertman 		return NOTIFY_DONE;
168041ccedf5SDave Ertman 
168141ccedf5SDave Ertman 	lag = container_of(notif_blk, struct ice_lag, notif_block);
1682df006dd4SDave Ertman 	if (!lag->netdev)
1683df006dd4SDave Ertman 		return NOTIFY_DONE;
1684df006dd4SDave Ertman 
1685df006dd4SDave Ertman 	if (!net_eq(dev_net(netdev), &init_net))
1686df006dd4SDave Ertman 		return NOTIFY_DONE;
1687df006dd4SDave Ertman 
168841ccedf5SDave Ertman 	/* This memory will be freed at the end of ice_lag_process_event */
168941ccedf5SDave Ertman 	lag_work = kzalloc(sizeof(*lag_work), GFP_KERNEL);
169041ccedf5SDave Ertman 	if (!lag_work)
169141ccedf5SDave Ertman 		return -ENOMEM;
169241ccedf5SDave Ertman 
169341ccedf5SDave Ertman 	lag_work->event_netdev = netdev;
169441ccedf5SDave Ertman 	lag_work->lag = lag;
169541ccedf5SDave Ertman 	lag_work->event = event;
169641ccedf5SDave Ertman 	if (event == NETDEV_CHANGEUPPER) {
169741ccedf5SDave Ertman 		struct netdev_notifier_changeupper_info *info;
169841ccedf5SDave Ertman 
169941ccedf5SDave Ertman 		info = ptr;
170041ccedf5SDave Ertman 		upper_netdev = info->upper_dev;
170141ccedf5SDave Ertman 	} else {
170241ccedf5SDave Ertman 		upper_netdev = netdev_master_upper_dev_get(netdev);
170341ccedf5SDave Ertman 	}
170441ccedf5SDave Ertman 
170541ccedf5SDave Ertman 	INIT_LIST_HEAD(&lag_work->netdev_list.node);
170641ccedf5SDave Ertman 	if (upper_netdev) {
170741ccedf5SDave Ertman 		struct ice_lag_netdev_list *nd_list;
170841ccedf5SDave Ertman 		struct net_device *tmp_nd;
170941ccedf5SDave Ertman 
171041ccedf5SDave Ertman 		rcu_read_lock();
171141ccedf5SDave Ertman 		for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
17126f8e5afeSMichal Schmidt 			nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC);
171341ccedf5SDave Ertman 			if (!nd_list)
171441ccedf5SDave Ertman 				break;
171541ccedf5SDave Ertman 
171641ccedf5SDave Ertman 			nd_list->netdev = tmp_nd;
171741ccedf5SDave Ertman 			list_add(&nd_list->node, &lag_work->netdev_list.node);
171841ccedf5SDave Ertman 		}
171941ccedf5SDave Ertman 		rcu_read_unlock();
172041ccedf5SDave Ertman 	}
172141ccedf5SDave Ertman 
1722df006dd4SDave Ertman 	switch (event) {
1723df006dd4SDave Ertman 	case NETDEV_CHANGEUPPER:
172441ccedf5SDave Ertman 		lag_work->info.changeupper_info =
172541ccedf5SDave Ertman 			*((struct netdev_notifier_changeupper_info *)ptr);
1726df006dd4SDave Ertman 		break;
1727df006dd4SDave Ertman 	case NETDEV_BONDING_INFO:
172841ccedf5SDave Ertman 		lag_work->info.bonding_info =
172941ccedf5SDave Ertman 			*((struct netdev_notifier_bonding_info *)ptr);
17306a8b3572SDave Ertman 		break;
1731df006dd4SDave Ertman 	default:
173241ccedf5SDave Ertman 		lag_work->info.notifier_info =
173341ccedf5SDave Ertman 			*((struct netdev_notifier_info *)ptr);
1734df006dd4SDave Ertman 		break;
1735df006dd4SDave Ertman 	}
1736df006dd4SDave Ertman 
173741ccedf5SDave Ertman 	INIT_WORK(&lag_work->lag_task, ice_lag_process_event);
173841ccedf5SDave Ertman 	queue_work(ice_lag_wq, &lag_work->lag_task);
173941ccedf5SDave Ertman 
1740df006dd4SDave Ertman 	return NOTIFY_DONE;
1741df006dd4SDave Ertman }
1742df006dd4SDave Ertman 
1743df006dd4SDave Ertman /**
1744df006dd4SDave Ertman  * ice_register_lag_handler - register LAG handler on netdev
1745df006dd4SDave Ertman  * @lag: LAG struct
1746df006dd4SDave Ertman  */
ice_register_lag_handler(struct ice_lag * lag)1747df006dd4SDave Ertman static int ice_register_lag_handler(struct ice_lag *lag)
1748df006dd4SDave Ertman {
1749df006dd4SDave Ertman 	struct device *dev = ice_pf_to_dev(lag->pf);
1750df006dd4SDave Ertman 	struct notifier_block *notif_blk;
1751df006dd4SDave Ertman 
1752df006dd4SDave Ertman 	notif_blk = &lag->notif_block;
1753df006dd4SDave Ertman 
1754df006dd4SDave Ertman 	if (!notif_blk->notifier_call) {
1755df006dd4SDave Ertman 		notif_blk->notifier_call = ice_lag_event_handler;
1756df006dd4SDave Ertman 		if (register_netdevice_notifier(notif_blk)) {
1757df006dd4SDave Ertman 			notif_blk->notifier_call = NULL;
1758df006dd4SDave Ertman 			dev_err(dev, "FAIL register LAG event handler!\n");
1759df006dd4SDave Ertman 			return -EINVAL;
1760df006dd4SDave Ertman 		}
1761df006dd4SDave Ertman 		dev_dbg(dev, "LAG event handler registered\n");
1762df006dd4SDave Ertman 	}
1763df006dd4SDave Ertman 	return 0;
1764df006dd4SDave Ertman }
1765df006dd4SDave Ertman 
1766df006dd4SDave Ertman /**
1767df006dd4SDave Ertman  * ice_unregister_lag_handler - unregister LAG handler on netdev
1768df006dd4SDave Ertman  * @lag: LAG struct
1769df006dd4SDave Ertman  */
ice_unregister_lag_handler(struct ice_lag * lag)1770df006dd4SDave Ertman static void ice_unregister_lag_handler(struct ice_lag *lag)
1771df006dd4SDave Ertman {
1772df006dd4SDave Ertman 	struct device *dev = ice_pf_to_dev(lag->pf);
1773df006dd4SDave Ertman 	struct notifier_block *notif_blk;
1774df006dd4SDave Ertman 
1775df006dd4SDave Ertman 	notif_blk = &lag->notif_block;
1776df006dd4SDave Ertman 	if (notif_blk->notifier_call) {
1777df006dd4SDave Ertman 		unregister_netdevice_notifier(notif_blk);
1778df006dd4SDave Ertman 		dev_dbg(dev, "LAG event handler unregistered\n");
1779df006dd4SDave Ertman 	}
1780df006dd4SDave Ertman }
1781df006dd4SDave Ertman 
1782df006dd4SDave Ertman /**
1783ec5a6c5fSDave Ertman  * ice_create_lag_recipe
1784ec5a6c5fSDave Ertman  * @hw: pointer to HW struct
17851e0f9881SDave Ertman  * @rid: pointer to u16 to pass back recipe index
1786ec5a6c5fSDave Ertman  * @base_recipe: recipe to base the new recipe on
1787ec5a6c5fSDave Ertman  * @prio: priority for new recipe
1788ec5a6c5fSDave Ertman  *
1789ec5a6c5fSDave Ertman  * function returns 0 on error
1790ec5a6c5fSDave Ertman  */
ice_create_lag_recipe(struct ice_hw * hw,u16 * rid,const u8 * base_recipe,u8 prio)17911e0f9881SDave Ertman static int ice_create_lag_recipe(struct ice_hw *hw, u16 *rid,
17921e0f9881SDave Ertman 				 const u8 *base_recipe, u8 prio)
1793ec5a6c5fSDave Ertman {
17941e0f9881SDave Ertman 	struct ice_aqc_recipe_data_elem *new_rcp;
17951e0f9881SDave Ertman 	int err;
1796ec5a6c5fSDave Ertman 
17971e0f9881SDave Ertman 	err = ice_alloc_recipe(hw, rid);
17981e0f9881SDave Ertman 	if (err)
17991e0f9881SDave Ertman 		return err;
18001e0f9881SDave Ertman 
18011e0f9881SDave Ertman 	new_rcp = kzalloc(ICE_RECIPE_LEN * ICE_MAX_NUM_RECIPES, GFP_KERNEL);
18021e0f9881SDave Ertman 	if (!new_rcp)
18031e0f9881SDave Ertman 		return -ENOMEM;
18041e0f9881SDave Ertman 
18051e0f9881SDave Ertman 	memcpy(new_rcp, base_recipe, ICE_RECIPE_LEN);
18061e0f9881SDave Ertman 	new_rcp->content.act_ctrl_fwd_priority = prio;
18071e0f9881SDave Ertman 	new_rcp->content.rid = *rid | ICE_AQ_RECIPE_ID_IS_ROOT;
18081e0f9881SDave Ertman 	new_rcp->recipe_indx = *rid;
18091e0f9881SDave Ertman 	bitmap_zero((unsigned long *)new_rcp->recipe_bitmap,
18101e0f9881SDave Ertman 		    ICE_MAX_NUM_RECIPES);
18111e0f9881SDave Ertman 	set_bit(*rid, (unsigned long *)new_rcp->recipe_bitmap);
18121e0f9881SDave Ertman 
18131e0f9881SDave Ertman 	err = ice_aq_add_recipe(hw, new_rcp, 1, NULL);
18141e0f9881SDave Ertman 	if (err)
18151e0f9881SDave Ertman 		*rid = 0;
18161e0f9881SDave Ertman 
18171e0f9881SDave Ertman 	kfree(new_rcp);
18181e0f9881SDave Ertman 	return err;
1819ec5a6c5fSDave Ertman }
1820ec5a6c5fSDave Ertman 
1821ec5a6c5fSDave Ertman /**
18223579aa86SDave Ertman  * ice_lag_move_vf_nodes_tc_sync - move a VF's nodes for a tc during reset
18233579aa86SDave Ertman  * @lag: primary interfaces lag struct
18243579aa86SDave Ertman  * @dest_hw: HW struct for destination's interface
18253579aa86SDave Ertman  * @vsi_num: VSI index in PF space
18263579aa86SDave Ertman  * @tc: traffic class to move
18273579aa86SDave Ertman  */
18283579aa86SDave Ertman static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag * lag,struct ice_hw * dest_hw,u16 vsi_num,u8 tc)18293579aa86SDave Ertman ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
18303579aa86SDave Ertman 			      u16 vsi_num, u8 tc)
18313579aa86SDave Ertman {
18323579aa86SDave Ertman 	u16 numq, valq, buf_size, num_moved, qbuf_size;
18333579aa86SDave Ertman 	struct device *dev = ice_pf_to_dev(lag->pf);
18343579aa86SDave Ertman 	struct ice_aqc_cfg_txqs_buf *qbuf;
18353579aa86SDave Ertman 	struct ice_aqc_move_elem *buf;
18363579aa86SDave Ertman 	struct ice_sched_node *n_prt;
18373579aa86SDave Ertman 	__le32 teid, parent_teid;
18383579aa86SDave Ertman 	struct ice_vsi_ctx *ctx;
18393579aa86SDave Ertman 	struct ice_hw *hw;
18403579aa86SDave Ertman 	u32 tmp_teid;
18413579aa86SDave Ertman 
18423579aa86SDave Ertman 	hw = &lag->pf->hw;
18433579aa86SDave Ertman 	ctx = ice_get_vsi_ctx(hw, vsi_num);
18443579aa86SDave Ertman 	if (!ctx) {
18453579aa86SDave Ertman 		dev_warn(dev, "LAG rebuild failed after reset due to VSI Context failure\n");
18463579aa86SDave Ertman 		return;
18473579aa86SDave Ertman 	}
18483579aa86SDave Ertman 
18493579aa86SDave Ertman 	if (!ctx->sched.vsi_node[tc])
18503579aa86SDave Ertman 		return;
18513579aa86SDave Ertman 
18523579aa86SDave Ertman 	numq = ctx->num_lan_q_entries[tc];
18533579aa86SDave Ertman 	teid = ctx->sched.vsi_node[tc]->info.node_teid;
18543579aa86SDave Ertman 	tmp_teid = le32_to_cpu(teid);
18553579aa86SDave Ertman 	parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
18563579aa86SDave Ertman 
18573579aa86SDave Ertman 	if (!tmp_teid || !numq)
18583579aa86SDave Ertman 		return;
18593579aa86SDave Ertman 
18603579aa86SDave Ertman 	if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, true))
18613579aa86SDave Ertman 		dev_dbg(dev, "Problem suspending traffic during reset rebuild\n");
18623579aa86SDave Ertman 
18633579aa86SDave Ertman 	/* reconfig queues for new port */
18643579aa86SDave Ertman 	qbuf_size = struct_size(qbuf, queue_info, numq);
18653579aa86SDave Ertman 	qbuf = kzalloc(qbuf_size, GFP_KERNEL);
18663579aa86SDave Ertman 	if (!qbuf) {
18673579aa86SDave Ertman 		dev_warn(dev, "Failure allocating VF queue recfg buffer for reset rebuild\n");
18683579aa86SDave Ertman 		goto resume_sync;
18693579aa86SDave Ertman 	}
18703579aa86SDave Ertman 
18713579aa86SDave Ertman 	/* add the per queue info for the reconfigure command buffer */
18723579aa86SDave Ertman 	valq = ice_lag_qbuf_recfg(hw, qbuf, vsi_num, numq, tc);
18733579aa86SDave Ertman 	if (!valq) {
18743579aa86SDave Ertman 		dev_warn(dev, "Failure to reconfig queues for LAG reset rebuild\n");
18753579aa86SDave Ertman 		goto sync_none;
18763579aa86SDave Ertman 	}
18773579aa86SDave Ertman 
18783579aa86SDave Ertman 	if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, hw->port_info->lport,
18793579aa86SDave Ertman 			       dest_hw->port_info->lport, NULL)) {
18803579aa86SDave Ertman 		dev_warn(dev, "Failure to configure queues for LAG reset rebuild\n");
18813579aa86SDave Ertman 		goto sync_qerr;
18823579aa86SDave Ertman 	}
18833579aa86SDave Ertman 
18843579aa86SDave Ertman sync_none:
18853579aa86SDave Ertman 	kfree(qbuf);
18863579aa86SDave Ertman 
18873579aa86SDave Ertman 	/* find parent in destination tree */
18883579aa86SDave Ertman 	n_prt = ice_lag_get_sched_parent(dest_hw, tc);
18893579aa86SDave Ertman 	if (!n_prt)
18903579aa86SDave Ertman 		goto resume_sync;
18913579aa86SDave Ertman 
18923579aa86SDave Ertman 	/* Move node to new parent */
18933579aa86SDave Ertman 	buf_size = struct_size(buf, teid, 1);
18943579aa86SDave Ertman 	buf = kzalloc(buf_size, GFP_KERNEL);
18953579aa86SDave Ertman 	if (!buf) {
18963579aa86SDave Ertman 		dev_warn(dev, "Failure to alloc for VF node move in reset rebuild\n");
18973579aa86SDave Ertman 		goto resume_sync;
18983579aa86SDave Ertman 	}
18993579aa86SDave Ertman 
19003579aa86SDave Ertman 	buf->hdr.src_parent_teid = parent_teid;
19013579aa86SDave Ertman 	buf->hdr.dest_parent_teid = n_prt->info.node_teid;
19023579aa86SDave Ertman 	buf->hdr.num_elems = cpu_to_le16(1);
19033579aa86SDave Ertman 	buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
19043579aa86SDave Ertman 	buf->teid[0] = teid;
19053579aa86SDave Ertman 
19063579aa86SDave Ertman 	if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
19073579aa86SDave Ertman 				    NULL))
19083579aa86SDave Ertman 		dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n");
19093579aa86SDave Ertman 	else
19103579aa86SDave Ertman 		ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
19113579aa86SDave Ertman 
19123579aa86SDave Ertman 	kfree(buf);
19133579aa86SDave Ertman 	goto resume_sync;
19143579aa86SDave Ertman 
19153579aa86SDave Ertman sync_qerr:
19163579aa86SDave Ertman 	kfree(qbuf);
19173579aa86SDave Ertman 
19183579aa86SDave Ertman resume_sync:
19193579aa86SDave Ertman 	if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, false))
19203579aa86SDave Ertman 		dev_warn(dev, "Problem restarting traffic for LAG node reset rebuild\n");
19213579aa86SDave Ertman }
19223579aa86SDave Ertman 
19233579aa86SDave Ertman /**
19243579aa86SDave Ertman  * ice_lag_move_vf_nodes_sync - move vf nodes to active interface
19253579aa86SDave Ertman  * @lag: primary interfaces lag struct
19263579aa86SDave Ertman  * @dest_hw: lport value for currently active port
19273579aa86SDave Ertman  *
19283579aa86SDave Ertman  * This function is used in a reset context, outside of event handling,
19293579aa86SDave Ertman  * to move the VF nodes to the secondary interface when that interface
19303579aa86SDave Ertman  * is the active interface during a reset rebuild
19313579aa86SDave Ertman  */
19323579aa86SDave Ertman static void
ice_lag_move_vf_nodes_sync(struct ice_lag * lag,struct ice_hw * dest_hw)19333579aa86SDave Ertman ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw)
19343579aa86SDave Ertman {
19353579aa86SDave Ertman 	struct ice_pf *pf;
19363579aa86SDave Ertman 	int i, tc;
19373579aa86SDave Ertman 
19383579aa86SDave Ertman 	if (!lag->primary || !dest_hw)
19393579aa86SDave Ertman 		return;
19403579aa86SDave Ertman 
19413579aa86SDave Ertman 	pf = lag->pf;
19423579aa86SDave Ertman 	ice_for_each_vsi(pf, i)
19433579aa86SDave Ertman 		if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
19443579aa86SDave Ertman 				   pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
19453579aa86SDave Ertman 			ice_for_each_traffic_class(tc)
19463579aa86SDave Ertman 				ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i,
19473579aa86SDave Ertman 							      tc);
19483579aa86SDave Ertman }
19493579aa86SDave Ertman 
19503579aa86SDave Ertman /**
1951df006dd4SDave Ertman  * ice_init_lag - initialize support for LAG
1952df006dd4SDave Ertman  * @pf: PF struct
1953df006dd4SDave Ertman  *
1954df006dd4SDave Ertman  * Alloc memory for LAG structs and initialize the elements.
1955df006dd4SDave Ertman  * Memory will be freed in ice_deinit_lag
1956df006dd4SDave Ertman  */
ice_init_lag(struct ice_pf * pf)1957df006dd4SDave Ertman int ice_init_lag(struct ice_pf *pf)
1958df006dd4SDave Ertman {
1959df006dd4SDave Ertman 	struct device *dev = ice_pf_to_dev(pf);
1960df006dd4SDave Ertman 	struct ice_lag *lag;
1961df006dd4SDave Ertman 	struct ice_vsi *vsi;
19621e0f9881SDave Ertman 	u64 recipe_bits = 0;
19631e0f9881SDave Ertman 	int n, err;
1964df006dd4SDave Ertman 
1965bb52f42aSDave Ertman 	ice_lag_init_feature_support_flag(pf);
1966fc4d6d13SDave Ertman 	if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
1967fc4d6d13SDave Ertman 		return 0;
1968bb52f42aSDave Ertman 
1969df006dd4SDave Ertman 	pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
1970df006dd4SDave Ertman 	if (!pf->lag)
1971df006dd4SDave Ertman 		return -ENOMEM;
1972df006dd4SDave Ertman 	lag = pf->lag;
1973df006dd4SDave Ertman 
1974df006dd4SDave Ertman 	vsi = ice_get_main_vsi(pf);
1975df006dd4SDave Ertman 	if (!vsi) {
1976df006dd4SDave Ertman 		dev_err(dev, "couldn't get main vsi, link aggregation init fail\n");
1977df006dd4SDave Ertman 		err = -EIO;
1978df006dd4SDave Ertman 		goto lag_error;
1979df006dd4SDave Ertman 	}
1980df006dd4SDave Ertman 
1981df006dd4SDave Ertman 	lag->pf = pf;
1982df006dd4SDave Ertman 	lag->netdev = vsi->netdev;
1983df006dd4SDave Ertman 	lag->role = ICE_LAG_NONE;
19841e0f9881SDave Ertman 	lag->active_port = ICE_LAG_INVALID_PORT;
1985df006dd4SDave Ertman 	lag->bonded = false;
1986df006dd4SDave Ertman 	lag->upper_netdev = NULL;
1987df006dd4SDave Ertman 	lag->notif_block.notifier_call = NULL;
1988df006dd4SDave Ertman 
1989df006dd4SDave Ertman 	err = ice_register_lag_handler(lag);
1990df006dd4SDave Ertman 	if (err) {
1991df006dd4SDave Ertman 		dev_warn(dev, "INIT LAG: Failed to register event handler\n");
1992df006dd4SDave Ertman 		goto lag_error;
1993df006dd4SDave Ertman 	}
1994df006dd4SDave Ertman 
19951e0f9881SDave Ertman 	err = ice_create_lag_recipe(&pf->hw, &lag->pf_recipe, ice_dflt_vsi_rcp,
19961e0f9881SDave Ertman 				    1);
19971e0f9881SDave Ertman 	if (err)
1998ec5a6c5fSDave Ertman 		goto lag_error;
19991e0f9881SDave Ertman 
20001e0f9881SDave Ertman 	/* associate recipes to profiles */
20011e0f9881SDave Ertman 	for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
20021e0f9881SDave Ertman 		err = ice_aq_get_recipe_to_profile(&pf->hw, n,
2003*493b2993SSteven Zou 						   &recipe_bits, NULL);
20041e0f9881SDave Ertman 		if (err)
20051e0f9881SDave Ertman 			continue;
20061e0f9881SDave Ertman 
20071e0f9881SDave Ertman 		if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) {
20081e0f9881SDave Ertman 			recipe_bits |= BIT(lag->pf_recipe);
20091e0f9881SDave Ertman 			ice_aq_map_recipe_to_profile(&pf->hw, n,
2010*493b2993SSteven Zou 						     recipe_bits, NULL);
20111e0f9881SDave Ertman 		}
2012ec5a6c5fSDave Ertman 	}
2013ec5a6c5fSDave Ertman 
2014df006dd4SDave Ertman 	ice_display_lag_info(lag);
2015df006dd4SDave Ertman 
2016df006dd4SDave Ertman 	dev_dbg(dev, "INIT LAG complete\n");
2017df006dd4SDave Ertman 	return 0;
2018df006dd4SDave Ertman 
2019df006dd4SDave Ertman lag_error:
2020df006dd4SDave Ertman 	kfree(lag);
2021df006dd4SDave Ertman 	pf->lag = NULL;
2022df006dd4SDave Ertman 	return err;
2023df006dd4SDave Ertman }
2024df006dd4SDave Ertman 
2025df006dd4SDave Ertman /**
2026df006dd4SDave Ertman  * ice_deinit_lag - Clean up LAG
2027df006dd4SDave Ertman  * @pf: PF struct
2028df006dd4SDave Ertman  *
2029df006dd4SDave Ertman  * Clean up kernel LAG info and free memory
2030df006dd4SDave Ertman  * This function is meant to only be called on driver remove/shutdown
2031df006dd4SDave Ertman  */
ice_deinit_lag(struct ice_pf * pf)2032df006dd4SDave Ertman void ice_deinit_lag(struct ice_pf *pf)
2033df006dd4SDave Ertman {
2034df006dd4SDave Ertman 	struct ice_lag *lag;
2035df006dd4SDave Ertman 
2036df006dd4SDave Ertman 	lag = pf->lag;
2037df006dd4SDave Ertman 
2038df006dd4SDave Ertman 	if (!lag)
2039df006dd4SDave Ertman 		return;
2040df006dd4SDave Ertman 
2041df006dd4SDave Ertman 	if (lag->pf)
2042df006dd4SDave Ertman 		ice_unregister_lag_handler(lag);
2043df006dd4SDave Ertman 
2044bb52f42aSDave Ertman 	flush_workqueue(ice_lag_wq);
2045df006dd4SDave Ertman 
2046ec5a6c5fSDave Ertman 	ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
2047ec5a6c5fSDave Ertman 			&pf->lag->pf_recipe);
2048ec5a6c5fSDave Ertman 
2049df006dd4SDave Ertman 	kfree(lag);
2050df006dd4SDave Ertman 
2051df006dd4SDave Ertman 	pf->lag = NULL;
2052df006dd4SDave Ertman }
20533579aa86SDave Ertman 
20543579aa86SDave Ertman /**
20553579aa86SDave Ertman  * ice_lag_rebuild - rebuild lag resources after reset
20563579aa86SDave Ertman  * @pf: pointer to local pf struct
20573579aa86SDave Ertman  *
20583579aa86SDave Ertman  * PF resets are promoted to CORER resets when interface in an aggregate.  This
20593579aa86SDave Ertman  * means that we need to rebuild the PF resources for the interface.  Since
20603579aa86SDave Ertman  * this will happen outside the normal event processing, need to acquire the lag
20613579aa86SDave Ertman  * lock.
20623579aa86SDave Ertman  *
20633579aa86SDave Ertman  * This function will also evaluate the VF resources if this is the primary
20643579aa86SDave Ertman  * interface.
20653579aa86SDave Ertman  */
ice_lag_rebuild(struct ice_pf * pf)20663579aa86SDave Ertman void ice_lag_rebuild(struct ice_pf *pf)
20673579aa86SDave Ertman {
20683579aa86SDave Ertman 	struct ice_lag_netdev_list ndlist;
20693579aa86SDave Ertman 	struct ice_lag *lag, *prim_lag;
20703579aa86SDave Ertman 	u8 act_port, loc_port;
20713579aa86SDave Ertman 
20723579aa86SDave Ertman 	if (!pf->lag || !pf->lag->bonded)
20733579aa86SDave Ertman 		return;
20743579aa86SDave Ertman 
20753579aa86SDave Ertman 	mutex_lock(&pf->lag_mutex);
20763579aa86SDave Ertman 
20773579aa86SDave Ertman 	lag = pf->lag;
20783579aa86SDave Ertman 	if (lag->primary) {
20793579aa86SDave Ertman 		prim_lag = lag;
20803579aa86SDave Ertman 	} else {
2081fd7f7a8aSDave Ertman 		ice_lag_build_netdev_list(lag, &ndlist);
20823579aa86SDave Ertman 		prim_lag = ice_lag_find_primary(lag);
20833579aa86SDave Ertman 	}
20843579aa86SDave Ertman 
20853579aa86SDave Ertman 	if (!prim_lag) {
20863579aa86SDave Ertman 		dev_dbg(ice_pf_to_dev(pf), "No primary interface in aggregate, can't rebuild\n");
20873579aa86SDave Ertman 		goto lag_rebuild_out;
20883579aa86SDave Ertman 	}
20893579aa86SDave Ertman 
20903579aa86SDave Ertman 	act_port = prim_lag->active_port;
20913579aa86SDave Ertman 	loc_port = lag->pf->hw.port_info->lport;
20923579aa86SDave Ertman 
20933579aa86SDave Ertman 	/* configure SWID for this port */
20943579aa86SDave Ertman 	if (lag->primary) {
20953579aa86SDave Ertman 		ice_lag_primary_swid(lag, true);
20963579aa86SDave Ertman 	} else {
20973579aa86SDave Ertman 		ice_lag_set_swid(prim_lag->pf->hw.port_info->sw_id, lag, true);
20983579aa86SDave Ertman 		ice_lag_add_prune_list(prim_lag, pf);
20993579aa86SDave Ertman 		if (act_port == loc_port)
21003579aa86SDave Ertman 			ice_lag_move_vf_nodes_sync(prim_lag, &pf->hw);
21013579aa86SDave Ertman 	}
21023579aa86SDave Ertman 
21033579aa86SDave Ertman 	ice_lag_cfg_cp_fltr(lag, true);
21043579aa86SDave Ertman 
21053579aa86SDave Ertman 	if (lag->pf_rule_id)
21063579aa86SDave Ertman 		if (ice_lag_cfg_dflt_fltr(lag, true))
21073579aa86SDave Ertman 			dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
21083579aa86SDave Ertman 
21093579aa86SDave Ertman 	ice_clear_rdma_cap(pf);
21103579aa86SDave Ertman lag_rebuild_out:
2111fd7f7a8aSDave Ertman 	ice_lag_destroy_netdev_list(lag, &ndlist);
21123579aa86SDave Ertman 	mutex_unlock(&pf->lag_mutex);
21133579aa86SDave Ertman }
2114776fe199SMichal Swiatkowski 
2115776fe199SMichal Swiatkowski /**
2116776fe199SMichal Swiatkowski  * ice_lag_is_switchdev_running
2117776fe199SMichal Swiatkowski  * @pf: pointer to PF structure
2118776fe199SMichal Swiatkowski  *
2119776fe199SMichal Swiatkowski  * Check if switchdev is running on any of the interfaces connected to lag.
2120776fe199SMichal Swiatkowski  */
ice_lag_is_switchdev_running(struct ice_pf * pf)2121776fe199SMichal Swiatkowski bool ice_lag_is_switchdev_running(struct ice_pf *pf)
2122776fe199SMichal Swiatkowski {
2123776fe199SMichal Swiatkowski 	struct ice_lag *lag = pf->lag;
2124776fe199SMichal Swiatkowski 	struct net_device *tmp_nd;
2125776fe199SMichal Swiatkowski 
2126776fe199SMichal Swiatkowski 	if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
2127776fe199SMichal Swiatkowski 		return false;
2128776fe199SMichal Swiatkowski 
2129776fe199SMichal Swiatkowski 	rcu_read_lock();
2130776fe199SMichal Swiatkowski 	for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
2131776fe199SMichal Swiatkowski 		struct ice_netdev_priv *priv = netdev_priv(tmp_nd);
2132776fe199SMichal Swiatkowski 
2133776fe199SMichal Swiatkowski 		if (!netif_is_ice(tmp_nd) || !priv || !priv->vsi ||
2134776fe199SMichal Swiatkowski 		    !priv->vsi->back)
2135776fe199SMichal Swiatkowski 			continue;
2136776fe199SMichal Swiatkowski 
2137776fe199SMichal Swiatkowski 		if (ice_is_switchdev_running(priv->vsi->back)) {
2138776fe199SMichal Swiatkowski 			rcu_read_unlock();
2139776fe199SMichal Swiatkowski 			return true;
2140776fe199SMichal Swiatkowski 		}
2141776fe199SMichal Swiatkowski 	}
2142776fe199SMichal Swiatkowski 	rcu_read_unlock();
2143776fe199SMichal Swiatkowski 
2144776fe199SMichal Swiatkowski 	return false;
2145776fe199SMichal Swiatkowski }
2146