1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/inetdevice.h>
7 #include <net/switchdev.h>
8 #include <linux/rhashtable.h>
9 
10 #include "prestera.h"
11 #include "prestera_router_hw.h"
12 
13 struct prestera_kern_fib_cache_key {
14 	struct prestera_ip_addr addr;
15 	u32 prefix_len;
16 	u32 kern_tb_id; /* tb_id from kernel (not fixed) */
17 };
18 
19 /* Subscribing on neighbours in kernel */
20 struct prestera_kern_fib_cache {
21 	struct prestera_kern_fib_cache_key key;
22 	struct {
23 		struct prestera_fib_key fib_key;
24 		enum prestera_fib_type fib_type;
25 	} lpm_info; /* hold prepared lpm info */
26 	/* Indicate if route is not overlapped by another table */
27 	struct rhash_head ht_node; /* node of prestera_router */
28 	struct fib_info *fi;
29 	u8 kern_tos;
30 	u8 kern_type;
31 	bool reachable;
32 };
33 
34 static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
35 	.key_offset  = offsetof(struct prestera_kern_fib_cache, key),
36 	.head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
37 	.key_len     = sizeof(struct prestera_kern_fib_cache_key),
38 	.automatic_shrinking = true,
39 };
40 
41 /* This util to be used, to convert kernel rules for default vr in hw_vr */
42 static u32 prestera_fix_tb_id(u32 tb_id)
43 {
44 	if (tb_id == RT_TABLE_UNSPEC ||
45 	    tb_id == RT_TABLE_LOCAL ||
46 	    tb_id == RT_TABLE_DEFAULT)
47 		tb_id = RT_TABLE_MAIN;
48 
49 	return tb_id;
50 }
51 
52 static void
53 prestera_util_fen_info2fib_cache_key(struct fib_entry_notifier_info *fen_info,
54 				     struct prestera_kern_fib_cache_key *key)
55 {
56 	memset(key, 0, sizeof(*key));
57 	key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
58 	key->prefix_len = fen_info->dst_len;
59 	key->kern_tb_id = fen_info->tb_id;
60 }
61 
62 static struct prestera_kern_fib_cache *
63 prestera_kern_fib_cache_find(struct prestera_switch *sw,
64 			     struct prestera_kern_fib_cache_key *key)
65 {
66 	struct prestera_kern_fib_cache *fib_cache;
67 
68 	fib_cache =
69 	 rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key,
70 				__prestera_kern_fib_cache_ht_params);
71 	return fib_cache;
72 }
73 
74 static void
75 prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
76 				struct prestera_kern_fib_cache *fib_cache)
77 {
78 	fib_info_put(fib_cache->fi);
79 	rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
80 			       &fib_cache->ht_node,
81 			       __prestera_kern_fib_cache_ht_params);
82 	kfree(fib_cache);
83 }
84 
85 /* Operations on fi (offload, etc) must be wrapped in utils.
86  * This function just create storage.
87  */
88 static struct prestera_kern_fib_cache *
89 prestera_kern_fib_cache_create(struct prestera_switch *sw,
90 			       struct prestera_kern_fib_cache_key *key,
91 			       struct fib_info *fi, u8 tos, u8 type)
92 {
93 	struct prestera_kern_fib_cache *fib_cache;
94 	int err;
95 
96 	fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL);
97 	if (!fib_cache)
98 		goto err_kzalloc;
99 
100 	memcpy(&fib_cache->key, key, sizeof(*key));
101 	fib_info_hold(fi);
102 	fib_cache->fi = fi;
103 	fib_cache->kern_tos = tos;
104 	fib_cache->kern_type = type;
105 
106 	err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
107 				     &fib_cache->ht_node,
108 				     __prestera_kern_fib_cache_ht_params);
109 	if (err)
110 		goto err_ht_insert;
111 
112 	return fib_cache;
113 
114 err_ht_insert:
115 	fib_info_put(fi);
116 	kfree(fib_cache);
117 err_kzalloc:
118 	return NULL;
119 }
120 
121 static void
122 __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
123 				     struct prestera_kern_fib_cache *fc,
124 				     bool fail, bool offload, bool trap)
125 {
126 	struct fib_rt_info fri;
127 
128 	if (fc->key.addr.v != PRESTERA_IPV4)
129 		return;
130 
131 	fri.fi = fc->fi;
132 	fri.tb_id = fc->key.kern_tb_id;
133 	fri.dst = fc->key.addr.u.ipv4;
134 	fri.dst_len = fc->key.prefix_len;
135 	fri.tos = fc->kern_tos;
136 	fri.type = fc->kern_type;
137 	/* flags begin */
138 	fri.offload = offload;
139 	fri.trap = trap;
140 	fri.offload_failed = fail;
141 	/* flags end */
142 	fib_alias_hw_flags_set(&init_net, &fri);
143 }
144 
145 static int
146 __prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
147 				     struct prestera_kern_fib_cache *fc)
148 {
149 	memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
150 
151 	switch (fc->fi->fib_type) {
152 	case RTN_UNICAST:
153 		fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
154 		break;
155 	/* Unsupported. Leave it for kernel: */
156 	case RTN_BROADCAST:
157 	case RTN_MULTICAST:
158 	/* Routes we must trap by design: */
159 	case RTN_LOCAL:
160 	case RTN_UNREACHABLE:
161 	case RTN_PROHIBIT:
162 		fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
163 		break;
164 	case RTN_BLACKHOLE:
165 		fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP;
166 		break;
167 	default:
168 		dev_err(sw->dev->dev, "Unsupported fib_type");
169 		return -EOPNOTSUPP;
170 	}
171 
172 	fc->lpm_info.fib_key.addr = fc->key.addr;
173 	fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len;
174 	fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id);
175 
176 	return 0;
177 }
178 
179 static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
180 				      struct prestera_kern_fib_cache *fc,
181 				      bool enabled)
182 {
183 	struct prestera_fib_node *fib_node;
184 
185 	fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key);
186 	if (fib_node)
187 		prestera_fib_node_destroy(sw, fib_node);
188 
189 	if (!enabled)
190 		return 0;
191 
192 	fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
193 					    fc->lpm_info.fib_type);
194 
195 	if (!fib_node) {
196 		dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
197 			&fc->key.addr.u.ipv4, fc->key.prefix_len,
198 			fc->key.kern_tb_id);
199 		return -ENOENT;
200 	}
201 
202 	return 0;
203 }
204 
205 static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
206 				     struct prestera_kern_fib_cache *fc)
207 {
208 	int err;
209 
210 	err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc);
211 	if (err)
212 		return err;
213 
214 	err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable);
215 	if (err) {
216 		__prestera_k_arb_fib_lpm_offload_set(sw, fc,
217 						     true, false, false);
218 		return err;
219 	}
220 
221 	switch (fc->lpm_info.fib_type) {
222 	case PRESTERA_FIB_TYPE_TRAP:
223 		__prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
224 						     false, fc->reachable);
225 		break;
226 	case PRESTERA_FIB_TYPE_DROP:
227 		__prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true,
228 						     fc->reachable);
229 		break;
230 	case PRESTERA_FIB_TYPE_INVALID:
231 		break;
232 	}
233 
234 	return 0;
235 }
236 
237 static struct prestera_kern_fib_cache *
238 __prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw,
239 				   struct prestera_kern_fib_cache *fc)
240 {
241 	struct prestera_kern_fib_cache_key fc_key;
242 	struct prestera_kern_fib_cache *rfc;
243 
244 	/* TODO: parse kernel rules */
245 	rfc = NULL;
246 	if (fc->key.kern_tb_id == RT_TABLE_LOCAL) {
247 		memcpy(&fc_key, &fc->key, sizeof(fc_key));
248 		fc_key.kern_tb_id = RT_TABLE_MAIN;
249 		rfc = prestera_kern_fib_cache_find(sw, &fc_key);
250 	}
251 
252 	return rfc;
253 }
254 
255 static struct prestera_kern_fib_cache *
256 __prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
257 				     struct prestera_kern_fib_cache *fc)
258 {
259 	struct prestera_kern_fib_cache_key fc_key;
260 	struct prestera_kern_fib_cache *rfc;
261 
262 	/* TODO: parse kernel rules */
263 	rfc = NULL;
264 	if (fc->key.kern_tb_id == RT_TABLE_MAIN) {
265 		memcpy(&fc_key, &fc->key, sizeof(fc_key));
266 		fc_key.kern_tb_id = RT_TABLE_LOCAL;
267 		rfc = prestera_kern_fib_cache_find(sw, &fc_key);
268 	}
269 
270 	return rfc;
271 }
272 
273 static int
274 prestera_k_arb_fib_evt(struct prestera_switch *sw,
275 		       bool replace, /* replace or del */
276 		       struct fib_entry_notifier_info *fen_info)
277 {
278 	struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
279 	struct prestera_kern_fib_cache_key fc_key;
280 	struct prestera_kern_fib_cache *fib_cache;
281 	int err;
282 
283 	prestera_util_fen_info2fib_cache_key(fen_info, &fc_key);
284 	fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
285 	if (fib_cache) {
286 		fib_cache->reachable = false;
287 		err = __prestera_k_arb_fc_apply(sw, fib_cache);
288 		if (err)
289 			dev_err(sw->dev->dev,
290 				"Applying destroyed fib_cache failed");
291 
292 		bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
293 		tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
294 		if (!tfib_cache && bfib_cache) {
295 			bfib_cache->reachable = true;
296 			err = __prestera_k_arb_fc_apply(sw, bfib_cache);
297 			if (err)
298 				dev_err(sw->dev->dev,
299 					"Applying fib_cache btm failed");
300 		}
301 
302 		prestera_kern_fib_cache_destroy(sw, fib_cache);
303 	}
304 
305 	if (replace) {
306 		fib_cache = prestera_kern_fib_cache_create(sw, &fc_key,
307 							   fen_info->fi,
308 							   fen_info->tos,
309 							   fen_info->type);
310 		if (!fib_cache) {
311 			dev_err(sw->dev->dev, "fib_cache == NULL");
312 			return -ENOENT;
313 		}
314 
315 		bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
316 		tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
317 		if (!tfib_cache)
318 			fib_cache->reachable = true;
319 
320 		if (bfib_cache) {
321 			bfib_cache->reachable = false;
322 			err = __prestera_k_arb_fc_apply(sw, bfib_cache);
323 			if (err)
324 				dev_err(sw->dev->dev,
325 					"Applying fib_cache btm failed");
326 		}
327 
328 		err = __prestera_k_arb_fc_apply(sw, fib_cache);
329 		if (err)
330 			dev_err(sw->dev->dev, "Applying fib_cache failed");
331 	}
332 
333 	return 0;
334 }
335 
336 static int __prestera_inetaddr_port_event(struct net_device *port_dev,
337 					  unsigned long event,
338 					  struct netlink_ext_ack *extack)
339 {
340 	struct prestera_port *port = netdev_priv(port_dev);
341 	struct prestera_rif_entry_key re_key = {};
342 	struct prestera_rif_entry *re;
343 	u32 kern_tb_id;
344 	int err;
345 
346 	err = prestera_is_valid_mac_addr(port, port_dev->dev_addr);
347 	if (err) {
348 		NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix");
349 		return err;
350 	}
351 
352 	kern_tb_id = l3mdev_fib_table(port_dev);
353 	re_key.iface.type = PRESTERA_IF_PORT_E;
354 	re_key.iface.dev_port.hw_dev_num  = port->dev_id;
355 	re_key.iface.dev_port.port_num  = port->hw_id;
356 	re = prestera_rif_entry_find(port->sw, &re_key);
357 
358 	switch (event) {
359 	case NETDEV_UP:
360 		if (re) {
361 			NL_SET_ERR_MSG_MOD(extack, "RIF already exist");
362 			return -EEXIST;
363 		}
364 		re = prestera_rif_entry_create(port->sw, &re_key,
365 					       prestera_fix_tb_id(kern_tb_id),
366 					       port_dev->dev_addr);
367 		if (!re) {
368 			NL_SET_ERR_MSG_MOD(extack, "Can't create RIF");
369 			return -EINVAL;
370 		}
371 		dev_hold(port_dev);
372 		break;
373 	case NETDEV_DOWN:
374 		if (!re) {
375 			NL_SET_ERR_MSG_MOD(extack, "Can't find RIF");
376 			return -EEXIST;
377 		}
378 		prestera_rif_entry_destroy(port->sw, re);
379 		dev_put(port_dev);
380 		break;
381 	}
382 
383 	return 0;
384 }
385 
386 static int __prestera_inetaddr_event(struct prestera_switch *sw,
387 				     struct net_device *dev,
388 				     unsigned long event,
389 				     struct netlink_ext_ack *extack)
390 {
391 	if (!prestera_netdev_check(dev) || netif_is_bridge_port(dev) ||
392 	    netif_is_lag_port(dev) || netif_is_ovs_port(dev))
393 		return 0;
394 
395 	return __prestera_inetaddr_port_event(dev, event, extack);
396 }
397 
398 static int __prestera_inetaddr_cb(struct notifier_block *nb,
399 				  unsigned long event, void *ptr)
400 {
401 	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
402 	struct net_device *dev = ifa->ifa_dev->dev;
403 	struct prestera_router *router = container_of(nb,
404 						      struct prestera_router,
405 						      inetaddr_nb);
406 	struct in_device *idev;
407 	int err = 0;
408 
409 	if (event != NETDEV_DOWN)
410 		goto out;
411 
412 	/* Ignore if this is not latest address */
413 	idev = __in_dev_get_rtnl(dev);
414 	if (idev && idev->ifa_list)
415 		goto out;
416 
417 	err = __prestera_inetaddr_event(router->sw, dev, event, NULL);
418 out:
419 	return notifier_from_errno(err);
420 }
421 
422 static int __prestera_inetaddr_valid_cb(struct notifier_block *nb,
423 					unsigned long event, void *ptr)
424 {
425 	struct in_validator_info *ivi = (struct in_validator_info *)ptr;
426 	struct net_device *dev = ivi->ivi_dev->dev;
427 	struct prestera_router *router = container_of(nb,
428 						      struct prestera_router,
429 						      inetaddr_valid_nb);
430 	struct in_device *idev;
431 	int err = 0;
432 
433 	if (event != NETDEV_UP)
434 		goto out;
435 
436 	/* Ignore if this is not first address */
437 	idev = __in_dev_get_rtnl(dev);
438 	if (idev && idev->ifa_list)
439 		goto out;
440 
441 	if (ipv4_is_multicast(ivi->ivi_addr)) {
442 		NL_SET_ERR_MSG_MOD(ivi->extack,
443 				   "Multicast addr on RIF is not supported");
444 		err = -EINVAL;
445 		goto out;
446 	}
447 
448 	err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack);
449 out:
450 	return notifier_from_errno(err);
451 }
452 
453 struct prestera_fib_event_work {
454 	struct work_struct work;
455 	struct prestera_switch *sw;
456 	struct fib_entry_notifier_info fen_info;
457 	unsigned long event;
458 };
459 
460 static void __prestera_router_fib_event_work(struct work_struct *work)
461 {
462 	struct prestera_fib_event_work *fib_work =
463 			container_of(work, struct prestera_fib_event_work, work);
464 	struct prestera_switch *sw = fib_work->sw;
465 	int err;
466 
467 	rtnl_lock();
468 
469 	switch (fib_work->event) {
470 	case FIB_EVENT_ENTRY_REPLACE:
471 		err = prestera_k_arb_fib_evt(sw, true, &fib_work->fen_info);
472 		if (err)
473 			goto err_out;
474 
475 		break;
476 	case FIB_EVENT_ENTRY_DEL:
477 		err = prestera_k_arb_fib_evt(sw, false, &fib_work->fen_info);
478 		if (err)
479 			goto err_out;
480 
481 		break;
482 	}
483 
484 	goto out;
485 
486 err_out:
487 	dev_err(sw->dev->dev, "Error when processing %pI4h/%d",
488 		&fib_work->fen_info.dst,
489 		fib_work->fen_info.dst_len);
490 out:
491 	fib_info_put(fib_work->fen_info.fi);
492 	rtnl_unlock();
493 	kfree(fib_work);
494 }
495 
496 /* Called with rcu_read_lock() */
497 static int __prestera_router_fib_event(struct notifier_block *nb,
498 				       unsigned long event, void *ptr)
499 {
500 	struct prestera_fib_event_work *fib_work;
501 	struct fib_entry_notifier_info *fen_info;
502 	struct fib_notifier_info *info = ptr;
503 	struct prestera_router *router;
504 
505 	if (info->family != AF_INET)
506 		return NOTIFY_DONE;
507 
508 	router = container_of(nb, struct prestera_router, fib_nb);
509 
510 	switch (event) {
511 	case FIB_EVENT_ENTRY_REPLACE:
512 	case FIB_EVENT_ENTRY_DEL:
513 		fen_info = container_of(info, struct fib_entry_notifier_info,
514 					info);
515 		if (!fen_info->fi)
516 			return NOTIFY_DONE;
517 
518 		fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
519 		if (WARN_ON(!fib_work))
520 			return NOTIFY_BAD;
521 
522 		fib_info_hold(fen_info->fi);
523 		fib_work->fen_info = *fen_info;
524 		fib_work->event = event;
525 		fib_work->sw = router->sw;
526 		INIT_WORK(&fib_work->work, __prestera_router_fib_event_work);
527 		prestera_queue_work(&fib_work->work);
528 		break;
529 	default:
530 		return NOTIFY_DONE;
531 	}
532 
533 	return NOTIFY_DONE;
534 }
535 
536 int prestera_router_init(struct prestera_switch *sw)
537 {
538 	struct prestera_router *router;
539 	int err;
540 
541 	router = kzalloc(sizeof(*sw->router), GFP_KERNEL);
542 	if (!router)
543 		return -ENOMEM;
544 
545 	sw->router = router;
546 	router->sw = sw;
547 
548 	err = prestera_router_hw_init(sw);
549 	if (err)
550 		goto err_router_lib_init;
551 
552 	err = rhashtable_init(&router->kern_fib_cache_ht,
553 			      &__prestera_kern_fib_cache_ht_params);
554 	if (err)
555 		goto err_kern_fib_cache_ht_init;
556 
557 	router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
558 	err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
559 	if (err)
560 		goto err_register_inetaddr_validator_notifier;
561 
562 	router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb;
563 	err = register_inetaddr_notifier(&router->inetaddr_nb);
564 	if (err)
565 		goto err_register_inetaddr_notifier;
566 
567 	router->fib_nb.notifier_call = __prestera_router_fib_event;
568 	err = register_fib_notifier(&init_net, &router->fib_nb,
569 				    /* TODO: flush fib entries */ NULL, NULL);
570 	if (err)
571 		goto err_register_fib_notifier;
572 
573 	return 0;
574 
575 err_register_fib_notifier:
576 	unregister_inetaddr_notifier(&router->inetaddr_nb);
577 err_register_inetaddr_notifier:
578 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
579 err_register_inetaddr_validator_notifier:
580 	rhashtable_destroy(&router->kern_fib_cache_ht);
581 err_kern_fib_cache_ht_init:
582 	prestera_router_hw_fini(sw);
583 err_router_lib_init:
584 	kfree(sw->router);
585 	return err;
586 }
587 
588 void prestera_router_fini(struct prestera_switch *sw)
589 {
590 	unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
591 	unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
592 	rhashtable_destroy(&sw->router->kern_fib_cache_ht);
593 	prestera_router_hw_fini(sw);
594 	kfree(sw->router);
595 	sw->router = NULL;
596 }
597