1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2021, Intel Corporation. */
3 
4 /* Link Aggregation code */
5 
6 #include "ice.h"
7 #include "ice_lag.h"
8 
9 /**
10  * ice_lag_nop_handler - no-op Rx handler to disable LAG
11  * @pskb: pointer to skb pointer
12  */
13 rx_handler_result_t ice_lag_nop_handler(struct sk_buff __always_unused **pskb)
14 {
15 	return RX_HANDLER_PASS;
16 }
17 
18 /**
19  * ice_lag_set_primary - set PF LAG state as Primary
20  * @lag: LAG info struct
21  */
22 static void ice_lag_set_primary(struct ice_lag *lag)
23 {
24 	struct ice_pf *pf = lag->pf;
25 
26 	if (!pf)
27 		return;
28 
29 	if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_BACKUP) {
30 		dev_warn(ice_pf_to_dev(pf), "%s: Attempt to be Primary, but incompatible state.\n",
31 			 netdev_name(lag->netdev));
32 		return;
33 	}
34 
35 	lag->role = ICE_LAG_PRIMARY;
36 }
37 
38 /**
39  * ice_lag_set_backup - set PF LAG state to Backup
40  * @lag: LAG info struct
41  */
42 static void ice_lag_set_backup(struct ice_lag *lag)
43 {
44 	struct ice_pf *pf = lag->pf;
45 
46 	if (!pf)
47 		return;
48 
49 	if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_PRIMARY) {
50 		dev_dbg(ice_pf_to_dev(pf), "%s: Attempt to be Backup, but incompatible state\n",
51 			netdev_name(lag->netdev));
52 		return;
53 	}
54 
55 	lag->role = ICE_LAG_BACKUP;
56 }
57 
58 /**
59  * ice_display_lag_info - print LAG info
60  * @lag: LAG info struct
61  */
62 static void ice_display_lag_info(struct ice_lag *lag)
63 {
64 	const char *name, *peer, *upper, *role, *bonded, *master;
65 	struct device *dev = &lag->pf->pdev->dev;
66 
67 	name = lag->netdev ? netdev_name(lag->netdev) : "unset";
68 	peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
69 	upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
70 	master = lag->master ? "TRUE" : "FALSE";
71 	bonded = lag->bonded ? "BONDED" : "UNBONDED";
72 
73 	switch (lag->role) {
74 	case ICE_LAG_NONE:
75 		role = "NONE";
76 		break;
77 	case ICE_LAG_PRIMARY:
78 		role = "PRIMARY";
79 		break;
80 	case ICE_LAG_BACKUP:
81 		role = "BACKUP";
82 		break;
83 	case ICE_LAG_UNSET:
84 		role = "UNSET";
85 		break;
86 	default:
87 		role = "ERROR";
88 	}
89 
90 	dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, master:%s\n", name,
91 		bonded, peer, upper, role, master);
92 }
93 
94 /**
95  * ice_lag_info_event - handle NETDEV_BONDING_INFO event
96  * @lag: LAG info struct
97  * @ptr: opaque data pointer
98  *
99  * ptr is to be cast to (netdev_notifier_bonding_info *)
100  */
101 static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
102 {
103 	struct net_device *event_netdev, *netdev_tmp;
104 	struct netdev_notifier_bonding_info *info;
105 	struct netdev_bonding_info *bonding_info;
106 	const char *lag_netdev_name;
107 
108 	event_netdev = netdev_notifier_info_to_dev(ptr);
109 	info = ptr;
110 	lag_netdev_name = netdev_name(lag->netdev);
111 	bonding_info = &info->bonding_info;
112 
113 	if (event_netdev != lag->netdev || !lag->bonded || !lag->upper_netdev)
114 		return;
115 
116 	if (bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) {
117 		netdev_dbg(lag->netdev, "Bonding event recv, but mode not active/backup\n");
118 		goto lag_out;
119 	}
120 
121 	if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
122 		netdev_dbg(lag->netdev, "Bonding event recv, but slave info not for us\n");
123 		goto lag_out;
124 	}
125 
126 	rcu_read_lock();
127 	for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
128 		if (!netif_is_ice(netdev_tmp))
129 			continue;
130 
131 		if (netdev_tmp && netdev_tmp != lag->netdev &&
132 		    lag->peer_netdev != netdev_tmp) {
133 			dev_hold(netdev_tmp);
134 			lag->peer_netdev = netdev_tmp;
135 		}
136 	}
137 	rcu_read_unlock();
138 
139 	if (bonding_info->slave.state)
140 		ice_lag_set_backup(lag);
141 	else
142 		ice_lag_set_primary(lag);
143 
144 lag_out:
145 	ice_display_lag_info(lag);
146 }
147 
148 /**
149  * ice_lag_link - handle LAG link event
150  * @lag: LAG info struct
151  * @info: info from the netdev notifier
152  */
153 static void
154 ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
155 {
156 	struct net_device *netdev_tmp, *upper = info->upper_dev;
157 	struct ice_pf *pf = lag->pf;
158 	int peers = 0;
159 
160 	if (lag->bonded)
161 		dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n",
162 			 netdev_name(lag->netdev));
163 
164 	rcu_read_lock();
165 	for_each_netdev_in_bond_rcu(upper, netdev_tmp)
166 		peers++;
167 	rcu_read_unlock();
168 
169 	if (lag->upper_netdev != upper) {
170 		dev_hold(upper);
171 		lag->upper_netdev = upper;
172 	}
173 
174 	ice_clear_sriov_cap(pf);
175 
176 	lag->bonded = true;
177 	lag->role = ICE_LAG_UNSET;
178 
179 	/* if this is the first element in an LAG mark as master */
180 	lag->master = !!(peers == 1);
181 }
182 
183 /**
184  * ice_lag_unlink - handle unlink event
185  * @lag: LAG info struct
186  * @info: info from netdev notification
187  */
188 static void
189 ice_lag_unlink(struct ice_lag *lag,
190 	       struct netdev_notifier_changeupper_info *info)
191 {
192 	struct net_device *netdev_tmp, *upper = info->upper_dev;
193 	struct ice_pf *pf = lag->pf;
194 	bool found = false;
195 
196 	if (!lag->bonded) {
197 		netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n");
198 		return;
199 	}
200 
201 	/* determine if we are in the new LAG config or not */
202 	rcu_read_lock();
203 	for_each_netdev_in_bond_rcu(upper, netdev_tmp) {
204 		if (netdev_tmp == lag->netdev) {
205 			found = true;
206 			break;
207 		}
208 	}
209 	rcu_read_unlock();
210 
211 	if (found)
212 		return;
213 
214 	if (lag->upper_netdev) {
215 		dev_put(lag->upper_netdev);
216 		lag->upper_netdev = NULL;
217 	}
218 
219 	if (lag->peer_netdev) {
220 		dev_put(lag->peer_netdev);
221 		lag->peer_netdev = NULL;
222 	}
223 
224 	ice_set_sriov_cap(pf);
225 	lag->bonded = false;
226 	lag->role = ICE_LAG_NONE;
227 }
228 
229 /**
230  * ice_lag_changeupper_event - handle LAG changeupper event
231  * @lag: LAG info struct
232  * @ptr: opaque pointer data
233  *
234  * ptr is to be cast into netdev_notifier_changeupper_info
235  */
236 static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
237 {
238 	struct netdev_notifier_changeupper_info *info;
239 	struct net_device *netdev;
240 
241 	info = ptr;
242 	netdev = netdev_notifier_info_to_dev(ptr);
243 
244 	/* not for this netdev */
245 	if (netdev != lag->netdev)
246 		return;
247 
248 	if (!info->upper_dev) {
249 		netdev_dbg(netdev, "changeupper rcvd, but no upper defined\n");
250 		return;
251 	}
252 
253 	netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
254 
255 	if (!netif_is_lag_master(info->upper_dev)) {
256 		netdev_dbg(netdev, "changeupper rcvd, but not master. bail\n");
257 		return;
258 	}
259 
260 	if (info->linking)
261 		ice_lag_link(lag, info);
262 	else
263 		ice_lag_unlink(lag, info);
264 
265 	ice_display_lag_info(lag);
266 }
267 
268 /**
269  * ice_lag_changelower_event - handle LAG changelower event
270  * @lag: LAG info struct
271  * @ptr: opaque data pointer
272  *
273  * ptr to be cast to netdev_notifier_changelowerstate_info
274  */
275 static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr)
276 {
277 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
278 
279 	if (netdev != lag->netdev)
280 		return;
281 
282 	netdev_dbg(netdev, "bonding info\n");
283 
284 	if (!netif_is_lag_port(netdev))
285 		netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n");
286 }
287 
288 /**
289  * ice_lag_event_handler - handle LAG events from netdev
290  * @notif_blk: notifier block registered by this netdev
291  * @event: event type
292  * @ptr: opaque data containing notifier event
293  */
294 static int
295 ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
296 		      void *ptr)
297 {
298 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
299 	struct ice_lag *lag;
300 
301 	lag = container_of(notif_blk, struct ice_lag, notif_block);
302 
303 	if (!lag->netdev)
304 		return NOTIFY_DONE;
305 
306 	/* Check that the netdev is in the working namespace */
307 	if (!net_eq(dev_net(netdev), &init_net))
308 		return NOTIFY_DONE;
309 
310 	switch (event) {
311 	case NETDEV_CHANGEUPPER:
312 		ice_lag_changeupper_event(lag, ptr);
313 		break;
314 	case NETDEV_CHANGELOWERSTATE:
315 		ice_lag_changelower_event(lag, ptr);
316 		break;
317 	case NETDEV_BONDING_INFO:
318 		ice_lag_info_event(lag, ptr);
319 		break;
320 	default:
321 		break;
322 	}
323 
324 	return NOTIFY_DONE;
325 }
326 
327 /**
328  * ice_register_lag_handler - register LAG handler on netdev
329  * @lag: LAG struct
330  */
331 static int ice_register_lag_handler(struct ice_lag *lag)
332 {
333 	struct device *dev = ice_pf_to_dev(lag->pf);
334 	struct notifier_block *notif_blk;
335 
336 	notif_blk = &lag->notif_block;
337 
338 	if (!notif_blk->notifier_call) {
339 		notif_blk->notifier_call = ice_lag_event_handler;
340 		if (register_netdevice_notifier(notif_blk)) {
341 			notif_blk->notifier_call = NULL;
342 			dev_err(dev, "FAIL register LAG event handler!\n");
343 			return -EINVAL;
344 		}
345 		dev_dbg(dev, "LAG event handler registered\n");
346 	}
347 	return 0;
348 }
349 
350 /**
351  * ice_unregister_lag_handler - unregister LAG handler on netdev
352  * @lag: LAG struct
353  */
354 static void ice_unregister_lag_handler(struct ice_lag *lag)
355 {
356 	struct device *dev = ice_pf_to_dev(lag->pf);
357 	struct notifier_block *notif_blk;
358 
359 	notif_blk = &lag->notif_block;
360 	if (notif_blk->notifier_call) {
361 		unregister_netdevice_notifier(notif_blk);
362 		dev_dbg(dev, "LAG event handler unregistered\n");
363 	}
364 }
365 
366 /**
367  * ice_init_lag - initialize support for LAG
368  * @pf: PF struct
369  *
370  * Alloc memory for LAG structs and initialize the elements.
371  * Memory will be freed in ice_deinit_lag
372  */
373 int ice_init_lag(struct ice_pf *pf)
374 {
375 	struct device *dev = ice_pf_to_dev(pf);
376 	struct ice_lag *lag;
377 	struct ice_vsi *vsi;
378 	int err;
379 
380 	pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
381 	if (!pf->lag)
382 		return -ENOMEM;
383 	lag = pf->lag;
384 
385 	vsi = ice_get_main_vsi(pf);
386 	if (!vsi) {
387 		dev_err(dev, "couldn't get main vsi, link aggregation init fail\n");
388 		err = -EIO;
389 		goto lag_error;
390 	}
391 
392 	lag->pf = pf;
393 	lag->netdev = vsi->netdev;
394 	lag->role = ICE_LAG_NONE;
395 	lag->bonded = false;
396 	lag->peer_netdev = NULL;
397 	lag->upper_netdev = NULL;
398 	lag->notif_block.notifier_call = NULL;
399 
400 	err = ice_register_lag_handler(lag);
401 	if (err) {
402 		dev_warn(dev, "INIT LAG: Failed to register event handler\n");
403 		goto lag_error;
404 	}
405 
406 	ice_display_lag_info(lag);
407 
408 	dev_dbg(dev, "INIT LAG complete\n");
409 	return 0;
410 
411 lag_error:
412 	kfree(lag);
413 	pf->lag = NULL;
414 	return err;
415 }
416 
417 /**
418  * ice_deinit_lag - Clean up LAG
419  * @pf: PF struct
420  *
421  * Clean up kernel LAG info and free memory
422  * This function is meant to only be called on driver remove/shutdown
423  */
424 void ice_deinit_lag(struct ice_pf *pf)
425 {
426 	struct ice_lag *lag;
427 
428 	lag = pf->lag;
429 
430 	if (!lag)
431 		return;
432 
433 	if (lag->pf)
434 		ice_unregister_lag_handler(lag);
435 
436 	if (lag->upper_netdev)
437 		dev_put(lag->upper_netdev);
438 
439 	if (lag->peer_netdev)
440 		dev_put(lag->peer_netdev);
441 
442 	kfree(lag);
443 
444 	pf->lag = NULL;
445 }
446