1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2021, Intel Corporation. */
3 
4 /* Link Aggregation code */
5 
6 #include "ice.h"
7 #include "ice_lag.h"
8 
9 /**
10  * ice_lag_nop_handler - no-op Rx handler to disable LAG
11  * @pskb: pointer to skb pointer
12  */
13 rx_handler_result_t ice_lag_nop_handler(struct sk_buff __always_unused **pskb)
14 {
15 	return RX_HANDLER_PASS;
16 }
17 
18 /**
19  * ice_lag_set_primary - set PF LAG state as Primary
20  * @lag: LAG info struct
21  */
22 static void ice_lag_set_primary(struct ice_lag *lag)
23 {
24 	struct ice_pf *pf = lag->pf;
25 
26 	if (!pf)
27 		return;
28 
29 	if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_BACKUP) {
30 		dev_warn(ice_pf_to_dev(pf), "%s: Attempt to be Primary, but incompatible state.\n",
31 			 netdev_name(lag->netdev));
32 		return;
33 	}
34 
35 	lag->role = ICE_LAG_PRIMARY;
36 }
37 
38 /**
39  * ice_lag_set_backup - set PF LAG state to Backup
40  * @lag: LAG info struct
41  */
42 static void ice_lag_set_backup(struct ice_lag *lag)
43 {
44 	struct ice_pf *pf = lag->pf;
45 
46 	if (!pf)
47 		return;
48 
49 	if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_PRIMARY) {
50 		dev_dbg(ice_pf_to_dev(pf), "%s: Attempt to be Backup, but incompatible state\n",
51 			netdev_name(lag->netdev));
52 		return;
53 	}
54 
55 	lag->role = ICE_LAG_BACKUP;
56 }
57 
58 /**
59  * ice_display_lag_info - print LAG info
60  * @lag: LAG info struct
61  */
62 static void ice_display_lag_info(struct ice_lag *lag)
63 {
64 	const char *name, *peer, *upper, *role, *bonded, *primary;
65 	struct device *dev = &lag->pf->pdev->dev;
66 
67 	name = lag->netdev ? netdev_name(lag->netdev) : "unset";
68 	peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
69 	upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
70 	primary = lag->primary ? "TRUE" : "FALSE";
71 	bonded = lag->bonded ? "BONDED" : "UNBONDED";
72 
73 	switch (lag->role) {
74 	case ICE_LAG_NONE:
75 		role = "NONE";
76 		break;
77 	case ICE_LAG_PRIMARY:
78 		role = "PRIMARY";
79 		break;
80 	case ICE_LAG_BACKUP:
81 		role = "BACKUP";
82 		break;
83 	case ICE_LAG_UNSET:
84 		role = "UNSET";
85 		break;
86 	default:
87 		role = "ERROR";
88 	}
89 
90 	dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, primary:%s\n", name,
91 		bonded, peer, upper, role, primary);
92 }
93 
94 /**
95  * ice_lag_info_event - handle NETDEV_BONDING_INFO event
96  * @lag: LAG info struct
97  * @ptr: opaque data pointer
98  *
99  * ptr is to be cast to (netdev_notifier_bonding_info *)
100  */
101 static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
102 {
103 	struct netdev_notifier_bonding_info *info;
104 	struct netdev_bonding_info *bonding_info;
105 	struct net_device *event_netdev;
106 	const char *lag_netdev_name;
107 
108 	event_netdev = netdev_notifier_info_to_dev(ptr);
109 	info = ptr;
110 	lag_netdev_name = netdev_name(lag->netdev);
111 	bonding_info = &info->bonding_info;
112 
113 	if (event_netdev != lag->netdev || !lag->bonded || !lag->upper_netdev)
114 		return;
115 
116 	if (bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) {
117 		netdev_dbg(lag->netdev, "Bonding event recv, but mode not active/backup\n");
118 		goto lag_out;
119 	}
120 
121 	if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
122 		netdev_dbg(lag->netdev, "Bonding event recv, but secondary info not for us\n");
123 		goto lag_out;
124 	}
125 
126 	if (bonding_info->slave.state)
127 		ice_lag_set_backup(lag);
128 	else
129 		ice_lag_set_primary(lag);
130 
131 lag_out:
132 	ice_display_lag_info(lag);
133 }
134 
135 /**
136  * ice_lag_link - handle LAG link event
137  * @lag: LAG info struct
138  * @info: info from the netdev notifier
139  */
140 static void
141 ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
142 {
143 	struct net_device *netdev_tmp, *upper = info->upper_dev;
144 	struct ice_pf *pf = lag->pf;
145 	int peers = 0;
146 
147 	if (lag->bonded)
148 		dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n",
149 			 netdev_name(lag->netdev));
150 
151 	rcu_read_lock();
152 	for_each_netdev_in_bond_rcu(upper, netdev_tmp)
153 		peers++;
154 	rcu_read_unlock();
155 
156 	if (lag->upper_netdev != upper) {
157 		dev_hold(upper);
158 		lag->upper_netdev = upper;
159 	}
160 
161 	ice_clear_sriov_cap(pf);
162 	ice_clear_rdma_cap(pf);
163 
164 	lag->bonded = true;
165 	lag->role = ICE_LAG_UNSET;
166 
167 	/* if this is the first element in an LAG mark as primary */
168 	lag->primary = !!(peers == 1);
169 }
170 
171 /**
172  * ice_lag_unlink - handle unlink event
173  * @lag: LAG info struct
174  * @info: info from netdev notification
175  */
176 static void
177 ice_lag_unlink(struct ice_lag *lag,
178 	       struct netdev_notifier_changeupper_info *info)
179 {
180 	struct net_device *netdev_tmp, *upper = info->upper_dev;
181 	struct ice_pf *pf = lag->pf;
182 	bool found = false;
183 
184 	if (!lag->bonded) {
185 		netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n");
186 		return;
187 	}
188 
189 	/* determine if we are in the new LAG config or not */
190 	rcu_read_lock();
191 	for_each_netdev_in_bond_rcu(upper, netdev_tmp) {
192 		if (netdev_tmp == lag->netdev) {
193 			found = true;
194 			break;
195 		}
196 	}
197 	rcu_read_unlock();
198 
199 	if (found)
200 		return;
201 
202 	if (lag->upper_netdev) {
203 		dev_put(lag->upper_netdev);
204 		lag->upper_netdev = NULL;
205 	}
206 
207 	lag->peer_netdev = NULL;
208 	ice_set_sriov_cap(pf);
209 	ice_set_rdma_cap(pf);
210 	lag->bonded = false;
211 	lag->role = ICE_LAG_NONE;
212 }
213 
214 /**
215  * ice_lag_unregister - handle netdev unregister events
216  * @lag: LAG info struct
217  * @netdev: netdev reporting the event
218  */
219 static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
220 {
221 	struct ice_pf *pf = lag->pf;
222 
223 	/* check to see if this event is for this netdev
224 	 * check that we are in an aggregate
225 	 */
226 	if (netdev != lag->netdev || !lag->bonded)
227 		return;
228 
229 	if (lag->upper_netdev) {
230 		dev_put(lag->upper_netdev);
231 		lag->upper_netdev = NULL;
232 		ice_set_sriov_cap(pf);
233 		ice_set_rdma_cap(pf);
234 	}
235 	/* perform some cleanup in case we come back */
236 	lag->bonded = false;
237 	lag->role = ICE_LAG_NONE;
238 }
239 
240 /**
241  * ice_lag_changeupper_event - handle LAG changeupper event
242  * @lag: LAG info struct
243  * @ptr: opaque pointer data
244  *
245  * ptr is to be cast into netdev_notifier_changeupper_info
246  */
247 static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
248 {
249 	struct netdev_notifier_changeupper_info *info;
250 	struct net_device *netdev;
251 
252 	info = ptr;
253 	netdev = netdev_notifier_info_to_dev(ptr);
254 
255 	/* not for this netdev */
256 	if (netdev != lag->netdev)
257 		return;
258 
259 	if (!info->upper_dev) {
260 		netdev_dbg(netdev, "changeupper rcvd, but no upper defined\n");
261 		return;
262 	}
263 
264 	netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
265 
266 	if (!netif_is_lag_master(info->upper_dev)) {
267 		netdev_dbg(netdev, "changeupper rcvd, but not primary. bail\n");
268 		return;
269 	}
270 
271 	if (info->linking)
272 		ice_lag_link(lag, info);
273 	else
274 		ice_lag_unlink(lag, info);
275 
276 	ice_display_lag_info(lag);
277 }
278 
279 /**
280  * ice_lag_changelower_event - handle LAG changelower event
281  * @lag: LAG info struct
282  * @ptr: opaque data pointer
283  *
284  * ptr to be cast to netdev_notifier_changelowerstate_info
285  */
286 static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr)
287 {
288 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
289 
290 	if (netdev != lag->netdev)
291 		return;
292 
293 	netdev_dbg(netdev, "bonding info\n");
294 
295 	if (!netif_is_lag_port(netdev))
296 		netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n");
297 }
298 
299 /**
300  * ice_lag_event_handler - handle LAG events from netdev
301  * @notif_blk: notifier block registered by this netdev
302  * @event: event type
303  * @ptr: opaque data containing notifier event
304  */
305 static int
306 ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
307 		      void *ptr)
308 {
309 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
310 	struct ice_lag *lag;
311 
312 	lag = container_of(notif_blk, struct ice_lag, notif_block);
313 
314 	if (!lag->netdev)
315 		return NOTIFY_DONE;
316 
317 	/* Check that the netdev is in the working namespace */
318 	if (!net_eq(dev_net(netdev), &init_net))
319 		return NOTIFY_DONE;
320 
321 	switch (event) {
322 	case NETDEV_CHANGEUPPER:
323 		ice_lag_changeupper_event(lag, ptr);
324 		break;
325 	case NETDEV_CHANGELOWERSTATE:
326 		ice_lag_changelower_event(lag, ptr);
327 		break;
328 	case NETDEV_BONDING_INFO:
329 		ice_lag_info_event(lag, ptr);
330 		break;
331 	case NETDEV_UNREGISTER:
332 		ice_lag_unregister(lag, netdev);
333 		break;
334 	default:
335 		break;
336 	}
337 
338 	return NOTIFY_DONE;
339 }
340 
341 /**
342  * ice_register_lag_handler - register LAG handler on netdev
343  * @lag: LAG struct
344  */
345 static int ice_register_lag_handler(struct ice_lag *lag)
346 {
347 	struct device *dev = ice_pf_to_dev(lag->pf);
348 	struct notifier_block *notif_blk;
349 
350 	notif_blk = &lag->notif_block;
351 
352 	if (!notif_blk->notifier_call) {
353 		notif_blk->notifier_call = ice_lag_event_handler;
354 		if (register_netdevice_notifier(notif_blk)) {
355 			notif_blk->notifier_call = NULL;
356 			dev_err(dev, "FAIL register LAG event handler!\n");
357 			return -EINVAL;
358 		}
359 		dev_dbg(dev, "LAG event handler registered\n");
360 	}
361 	return 0;
362 }
363 
364 /**
365  * ice_unregister_lag_handler - unregister LAG handler on netdev
366  * @lag: LAG struct
367  */
368 static void ice_unregister_lag_handler(struct ice_lag *lag)
369 {
370 	struct device *dev = ice_pf_to_dev(lag->pf);
371 	struct notifier_block *notif_blk;
372 
373 	notif_blk = &lag->notif_block;
374 	if (notif_blk->notifier_call) {
375 		unregister_netdevice_notifier(notif_blk);
376 		dev_dbg(dev, "LAG event handler unregistered\n");
377 	}
378 }
379 
380 /**
381  * ice_init_lag - initialize support for LAG
382  * @pf: PF struct
383  *
384  * Alloc memory for LAG structs and initialize the elements.
385  * Memory will be freed in ice_deinit_lag
386  */
387 int ice_init_lag(struct ice_pf *pf)
388 {
389 	struct device *dev = ice_pf_to_dev(pf);
390 	struct ice_lag *lag;
391 	struct ice_vsi *vsi;
392 	int err;
393 
394 	pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
395 	if (!pf->lag)
396 		return -ENOMEM;
397 	lag = pf->lag;
398 
399 	vsi = ice_get_main_vsi(pf);
400 	if (!vsi) {
401 		dev_err(dev, "couldn't get main vsi, link aggregation init fail\n");
402 		err = -EIO;
403 		goto lag_error;
404 	}
405 
406 	lag->pf = pf;
407 	lag->netdev = vsi->netdev;
408 	lag->role = ICE_LAG_NONE;
409 	lag->bonded = false;
410 	lag->peer_netdev = NULL;
411 	lag->upper_netdev = NULL;
412 	lag->notif_block.notifier_call = NULL;
413 
414 	err = ice_register_lag_handler(lag);
415 	if (err) {
416 		dev_warn(dev, "INIT LAG: Failed to register event handler\n");
417 		goto lag_error;
418 	}
419 
420 	ice_display_lag_info(lag);
421 
422 	dev_dbg(dev, "INIT LAG complete\n");
423 	return 0;
424 
425 lag_error:
426 	kfree(lag);
427 	pf->lag = NULL;
428 	return err;
429 }
430 
431 /**
432  * ice_deinit_lag - Clean up LAG
433  * @pf: PF struct
434  *
435  * Clean up kernel LAG info and free memory
436  * This function is meant to only be called on driver remove/shutdown
437  */
438 void ice_deinit_lag(struct ice_pf *pf)
439 {
440 	struct ice_lag *lag;
441 
442 	lag = pf->lag;
443 
444 	if (!lag)
445 		return;
446 
447 	if (lag->pf)
448 		ice_unregister_lag_handler(lag);
449 
450 	dev_put(lag->upper_netdev);
451 
452 	dev_put(lag->peer_netdev);
453 
454 	kfree(lag);
455 
456 	pf->lag = NULL;
457 }
458