1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/inetdevice.h>
7 #include <net/inet_dscp.h>
8 #include <net/switchdev.h>
9 #include <linux/rhashtable.h>
10 
11 #include "prestera.h"
12 #include "prestera_router_hw.h"
13 
14 struct prestera_kern_fib_cache_key {
15 	struct prestera_ip_addr addr;
16 	u32 prefix_len;
17 	u32 kern_tb_id; /* tb_id from kernel (not fixed) */
18 };
19 
20 /* Subscribing on neighbours in kernel */
21 struct prestera_kern_fib_cache {
22 	struct prestera_kern_fib_cache_key key;
23 	struct {
24 		struct prestera_fib_key fib_key;
25 		enum prestera_fib_type fib_type;
26 	} lpm_info; /* hold prepared lpm info */
27 	/* Indicate if route is not overlapped by another table */
28 	struct rhash_head ht_node; /* node of prestera_router */
29 	struct fib_info *fi;
30 	dscp_t kern_dscp;
31 	u8 kern_type;
32 	bool reachable;
33 };
34 
35 static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
36 	.key_offset  = offsetof(struct prestera_kern_fib_cache, key),
37 	.head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
38 	.key_len     = sizeof(struct prestera_kern_fib_cache_key),
39 	.automatic_shrinking = true,
40 };
41 
42 /* This util to be used, to convert kernel rules for default vr in hw_vr */
43 static u32 prestera_fix_tb_id(u32 tb_id)
44 {
45 	if (tb_id == RT_TABLE_UNSPEC ||
46 	    tb_id == RT_TABLE_LOCAL ||
47 	    tb_id == RT_TABLE_DEFAULT)
48 		tb_id = RT_TABLE_MAIN;
49 
50 	return tb_id;
51 }
52 
53 static void
54 prestera_util_fen_info2fib_cache_key(struct fib_entry_notifier_info *fen_info,
55 				     struct prestera_kern_fib_cache_key *key)
56 {
57 	memset(key, 0, sizeof(*key));
58 	key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
59 	key->prefix_len = fen_info->dst_len;
60 	key->kern_tb_id = fen_info->tb_id;
61 }
62 
63 static struct prestera_kern_fib_cache *
64 prestera_kern_fib_cache_find(struct prestera_switch *sw,
65 			     struct prestera_kern_fib_cache_key *key)
66 {
67 	struct prestera_kern_fib_cache *fib_cache;
68 
69 	fib_cache =
70 	 rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key,
71 				__prestera_kern_fib_cache_ht_params);
72 	return fib_cache;
73 }
74 
75 static void
76 prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
77 				struct prestera_kern_fib_cache *fib_cache)
78 {
79 	fib_info_put(fib_cache->fi);
80 	rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
81 			       &fib_cache->ht_node,
82 			       __prestera_kern_fib_cache_ht_params);
83 	kfree(fib_cache);
84 }
85 
86 /* Operations on fi (offload, etc) must be wrapped in utils.
87  * This function just create storage.
88  */
89 static struct prestera_kern_fib_cache *
90 prestera_kern_fib_cache_create(struct prestera_switch *sw,
91 			       struct prestera_kern_fib_cache_key *key,
92 			       struct fib_info *fi, dscp_t dscp, u8 type)
93 {
94 	struct prestera_kern_fib_cache *fib_cache;
95 	int err;
96 
97 	fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL);
98 	if (!fib_cache)
99 		goto err_kzalloc;
100 
101 	memcpy(&fib_cache->key, key, sizeof(*key));
102 	fib_info_hold(fi);
103 	fib_cache->fi = fi;
104 	fib_cache->kern_dscp = dscp;
105 	fib_cache->kern_type = type;
106 
107 	err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
108 				     &fib_cache->ht_node,
109 				     __prestera_kern_fib_cache_ht_params);
110 	if (err)
111 		goto err_ht_insert;
112 
113 	return fib_cache;
114 
115 err_ht_insert:
116 	fib_info_put(fi);
117 	kfree(fib_cache);
118 err_kzalloc:
119 	return NULL;
120 }
121 
122 static void
123 __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
124 				     struct prestera_kern_fib_cache *fc,
125 				     bool fail, bool offload, bool trap)
126 {
127 	struct fib_rt_info fri;
128 
129 	if (fc->key.addr.v != PRESTERA_IPV4)
130 		return;
131 
132 	fri.fi = fc->fi;
133 	fri.tb_id = fc->key.kern_tb_id;
134 	fri.dst = fc->key.addr.u.ipv4;
135 	fri.dst_len = fc->key.prefix_len;
136 	fri.dscp = fc->kern_dscp;
137 	fri.type = fc->kern_type;
138 	/* flags begin */
139 	fri.offload = offload;
140 	fri.trap = trap;
141 	fri.offload_failed = fail;
142 	/* flags end */
143 	fib_alias_hw_flags_set(&init_net, &fri);
144 }
145 
146 static int
147 __prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
148 				     struct prestera_kern_fib_cache *fc)
149 {
150 	memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
151 
152 	switch (fc->fi->fib_type) {
153 	case RTN_UNICAST:
154 		fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
155 		break;
156 	/* Unsupported. Leave it for kernel: */
157 	case RTN_BROADCAST:
158 	case RTN_MULTICAST:
159 	/* Routes we must trap by design: */
160 	case RTN_LOCAL:
161 	case RTN_UNREACHABLE:
162 	case RTN_PROHIBIT:
163 		fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
164 		break;
165 	case RTN_BLACKHOLE:
166 		fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP;
167 		break;
168 	default:
169 		dev_err(sw->dev->dev, "Unsupported fib_type");
170 		return -EOPNOTSUPP;
171 	}
172 
173 	fc->lpm_info.fib_key.addr = fc->key.addr;
174 	fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len;
175 	fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id);
176 
177 	return 0;
178 }
179 
180 static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
181 				      struct prestera_kern_fib_cache *fc,
182 				      bool enabled)
183 {
184 	struct prestera_fib_node *fib_node;
185 
186 	fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key);
187 	if (fib_node)
188 		prestera_fib_node_destroy(sw, fib_node);
189 
190 	if (!enabled)
191 		return 0;
192 
193 	fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
194 					    fc->lpm_info.fib_type);
195 
196 	if (!fib_node) {
197 		dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
198 			&fc->key.addr.u.ipv4, fc->key.prefix_len,
199 			fc->key.kern_tb_id);
200 		return -ENOENT;
201 	}
202 
203 	return 0;
204 }
205 
206 static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
207 				     struct prestera_kern_fib_cache *fc)
208 {
209 	int err;
210 
211 	err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc);
212 	if (err)
213 		return err;
214 
215 	err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable);
216 	if (err) {
217 		__prestera_k_arb_fib_lpm_offload_set(sw, fc,
218 						     true, false, false);
219 		return err;
220 	}
221 
222 	switch (fc->lpm_info.fib_type) {
223 	case PRESTERA_FIB_TYPE_TRAP:
224 		__prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
225 						     false, fc->reachable);
226 		break;
227 	case PRESTERA_FIB_TYPE_DROP:
228 		__prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true,
229 						     fc->reachable);
230 		break;
231 	case PRESTERA_FIB_TYPE_INVALID:
232 		break;
233 	}
234 
235 	return 0;
236 }
237 
238 static struct prestera_kern_fib_cache *
239 __prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw,
240 				   struct prestera_kern_fib_cache *fc)
241 {
242 	struct prestera_kern_fib_cache_key fc_key;
243 	struct prestera_kern_fib_cache *rfc;
244 
245 	/* TODO: parse kernel rules */
246 	rfc = NULL;
247 	if (fc->key.kern_tb_id == RT_TABLE_LOCAL) {
248 		memcpy(&fc_key, &fc->key, sizeof(fc_key));
249 		fc_key.kern_tb_id = RT_TABLE_MAIN;
250 		rfc = prestera_kern_fib_cache_find(sw, &fc_key);
251 	}
252 
253 	return rfc;
254 }
255 
256 static struct prestera_kern_fib_cache *
257 __prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
258 				     struct prestera_kern_fib_cache *fc)
259 {
260 	struct prestera_kern_fib_cache_key fc_key;
261 	struct prestera_kern_fib_cache *rfc;
262 
263 	/* TODO: parse kernel rules */
264 	rfc = NULL;
265 	if (fc->key.kern_tb_id == RT_TABLE_MAIN) {
266 		memcpy(&fc_key, &fc->key, sizeof(fc_key));
267 		fc_key.kern_tb_id = RT_TABLE_LOCAL;
268 		rfc = prestera_kern_fib_cache_find(sw, &fc_key);
269 	}
270 
271 	return rfc;
272 }
273 
274 static int
275 prestera_k_arb_fib_evt(struct prestera_switch *sw,
276 		       bool replace, /* replace or del */
277 		       struct fib_entry_notifier_info *fen_info)
278 {
279 	struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
280 	struct prestera_kern_fib_cache_key fc_key;
281 	struct prestera_kern_fib_cache *fib_cache;
282 	int err;
283 
284 	prestera_util_fen_info2fib_cache_key(fen_info, &fc_key);
285 	fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
286 	if (fib_cache) {
287 		fib_cache->reachable = false;
288 		err = __prestera_k_arb_fc_apply(sw, fib_cache);
289 		if (err)
290 			dev_err(sw->dev->dev,
291 				"Applying destroyed fib_cache failed");
292 
293 		bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
294 		tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
295 		if (!tfib_cache && bfib_cache) {
296 			bfib_cache->reachable = true;
297 			err = __prestera_k_arb_fc_apply(sw, bfib_cache);
298 			if (err)
299 				dev_err(sw->dev->dev,
300 					"Applying fib_cache btm failed");
301 		}
302 
303 		prestera_kern_fib_cache_destroy(sw, fib_cache);
304 	}
305 
306 	if (replace) {
307 		fib_cache = prestera_kern_fib_cache_create(sw, &fc_key,
308 							   fen_info->fi,
309 							   fen_info->dscp,
310 							   fen_info->type);
311 		if (!fib_cache) {
312 			dev_err(sw->dev->dev, "fib_cache == NULL");
313 			return -ENOENT;
314 		}
315 
316 		bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
317 		tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
318 		if (!tfib_cache)
319 			fib_cache->reachable = true;
320 
321 		if (bfib_cache) {
322 			bfib_cache->reachable = false;
323 			err = __prestera_k_arb_fc_apply(sw, bfib_cache);
324 			if (err)
325 				dev_err(sw->dev->dev,
326 					"Applying fib_cache btm failed");
327 		}
328 
329 		err = __prestera_k_arb_fc_apply(sw, fib_cache);
330 		if (err)
331 			dev_err(sw->dev->dev, "Applying fib_cache failed");
332 	}
333 
334 	return 0;
335 }
336 
337 static int __prestera_inetaddr_port_event(struct net_device *port_dev,
338 					  unsigned long event,
339 					  struct netlink_ext_ack *extack)
340 {
341 	struct prestera_port *port = netdev_priv(port_dev);
342 	struct prestera_rif_entry_key re_key = {};
343 	struct prestera_rif_entry *re;
344 	u32 kern_tb_id;
345 	int err;
346 
347 	err = prestera_is_valid_mac_addr(port, port_dev->dev_addr);
348 	if (err) {
349 		NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix");
350 		return err;
351 	}
352 
353 	kern_tb_id = l3mdev_fib_table(port_dev);
354 	re_key.iface.type = PRESTERA_IF_PORT_E;
355 	re_key.iface.dev_port.hw_dev_num  = port->dev_id;
356 	re_key.iface.dev_port.port_num  = port->hw_id;
357 	re = prestera_rif_entry_find(port->sw, &re_key);
358 
359 	switch (event) {
360 	case NETDEV_UP:
361 		if (re) {
362 			NL_SET_ERR_MSG_MOD(extack, "RIF already exist");
363 			return -EEXIST;
364 		}
365 		re = prestera_rif_entry_create(port->sw, &re_key,
366 					       prestera_fix_tb_id(kern_tb_id),
367 					       port_dev->dev_addr);
368 		if (!re) {
369 			NL_SET_ERR_MSG_MOD(extack, "Can't create RIF");
370 			return -EINVAL;
371 		}
372 		dev_hold(port_dev);
373 		break;
374 	case NETDEV_DOWN:
375 		if (!re) {
376 			NL_SET_ERR_MSG_MOD(extack, "Can't find RIF");
377 			return -EEXIST;
378 		}
379 		prestera_rif_entry_destroy(port->sw, re);
380 		dev_put(port_dev);
381 		break;
382 	}
383 
384 	return 0;
385 }
386 
387 static int __prestera_inetaddr_event(struct prestera_switch *sw,
388 				     struct net_device *dev,
389 				     unsigned long event,
390 				     struct netlink_ext_ack *extack)
391 {
392 	if (!prestera_netdev_check(dev) || netif_is_bridge_port(dev) ||
393 	    netif_is_lag_port(dev) || netif_is_ovs_port(dev))
394 		return 0;
395 
396 	return __prestera_inetaddr_port_event(dev, event, extack);
397 }
398 
399 static int __prestera_inetaddr_cb(struct notifier_block *nb,
400 				  unsigned long event, void *ptr)
401 {
402 	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
403 	struct net_device *dev = ifa->ifa_dev->dev;
404 	struct prestera_router *router = container_of(nb,
405 						      struct prestera_router,
406 						      inetaddr_nb);
407 	struct in_device *idev;
408 	int err = 0;
409 
410 	if (event != NETDEV_DOWN)
411 		goto out;
412 
413 	/* Ignore if this is not latest address */
414 	idev = __in_dev_get_rtnl(dev);
415 	if (idev && idev->ifa_list)
416 		goto out;
417 
418 	err = __prestera_inetaddr_event(router->sw, dev, event, NULL);
419 out:
420 	return notifier_from_errno(err);
421 }
422 
423 static int __prestera_inetaddr_valid_cb(struct notifier_block *nb,
424 					unsigned long event, void *ptr)
425 {
426 	struct in_validator_info *ivi = (struct in_validator_info *)ptr;
427 	struct net_device *dev = ivi->ivi_dev->dev;
428 	struct prestera_router *router = container_of(nb,
429 						      struct prestera_router,
430 						      inetaddr_valid_nb);
431 	struct in_device *idev;
432 	int err = 0;
433 
434 	if (event != NETDEV_UP)
435 		goto out;
436 
437 	/* Ignore if this is not first address */
438 	idev = __in_dev_get_rtnl(dev);
439 	if (idev && idev->ifa_list)
440 		goto out;
441 
442 	if (ipv4_is_multicast(ivi->ivi_addr)) {
443 		NL_SET_ERR_MSG_MOD(ivi->extack,
444 				   "Multicast addr on RIF is not supported");
445 		err = -EINVAL;
446 		goto out;
447 	}
448 
449 	err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack);
450 out:
451 	return notifier_from_errno(err);
452 }
453 
454 struct prestera_fib_event_work {
455 	struct work_struct work;
456 	struct prestera_switch *sw;
457 	struct fib_entry_notifier_info fen_info;
458 	unsigned long event;
459 };
460 
461 static void __prestera_router_fib_event_work(struct work_struct *work)
462 {
463 	struct prestera_fib_event_work *fib_work =
464 			container_of(work, struct prestera_fib_event_work, work);
465 	struct prestera_switch *sw = fib_work->sw;
466 	int err;
467 
468 	rtnl_lock();
469 
470 	switch (fib_work->event) {
471 	case FIB_EVENT_ENTRY_REPLACE:
472 		err = prestera_k_arb_fib_evt(sw, true, &fib_work->fen_info);
473 		if (err)
474 			goto err_out;
475 
476 		break;
477 	case FIB_EVENT_ENTRY_DEL:
478 		err = prestera_k_arb_fib_evt(sw, false, &fib_work->fen_info);
479 		if (err)
480 			goto err_out;
481 
482 		break;
483 	}
484 
485 	goto out;
486 
487 err_out:
488 	dev_err(sw->dev->dev, "Error when processing %pI4h/%d",
489 		&fib_work->fen_info.dst,
490 		fib_work->fen_info.dst_len);
491 out:
492 	fib_info_put(fib_work->fen_info.fi);
493 	rtnl_unlock();
494 	kfree(fib_work);
495 }
496 
497 /* Called with rcu_read_lock() */
498 static int __prestera_router_fib_event(struct notifier_block *nb,
499 				       unsigned long event, void *ptr)
500 {
501 	struct prestera_fib_event_work *fib_work;
502 	struct fib_entry_notifier_info *fen_info;
503 	struct fib_notifier_info *info = ptr;
504 	struct prestera_router *router;
505 
506 	if (info->family != AF_INET)
507 		return NOTIFY_DONE;
508 
509 	router = container_of(nb, struct prestera_router, fib_nb);
510 
511 	switch (event) {
512 	case FIB_EVENT_ENTRY_REPLACE:
513 	case FIB_EVENT_ENTRY_DEL:
514 		fen_info = container_of(info, struct fib_entry_notifier_info,
515 					info);
516 		if (!fen_info->fi)
517 			return NOTIFY_DONE;
518 
519 		fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
520 		if (WARN_ON(!fib_work))
521 			return NOTIFY_BAD;
522 
523 		fib_info_hold(fen_info->fi);
524 		fib_work->fen_info = *fen_info;
525 		fib_work->event = event;
526 		fib_work->sw = router->sw;
527 		INIT_WORK(&fib_work->work, __prestera_router_fib_event_work);
528 		prestera_queue_work(&fib_work->work);
529 		break;
530 	default:
531 		return NOTIFY_DONE;
532 	}
533 
534 	return NOTIFY_DONE;
535 }
536 
537 int prestera_router_init(struct prestera_switch *sw)
538 {
539 	struct prestera_router *router;
540 	int err;
541 
542 	router = kzalloc(sizeof(*sw->router), GFP_KERNEL);
543 	if (!router)
544 		return -ENOMEM;
545 
546 	sw->router = router;
547 	router->sw = sw;
548 
549 	err = prestera_router_hw_init(sw);
550 	if (err)
551 		goto err_router_lib_init;
552 
553 	err = rhashtable_init(&router->kern_fib_cache_ht,
554 			      &__prestera_kern_fib_cache_ht_params);
555 	if (err)
556 		goto err_kern_fib_cache_ht_init;
557 
558 	router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
559 	err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
560 	if (err)
561 		goto err_register_inetaddr_validator_notifier;
562 
563 	router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb;
564 	err = register_inetaddr_notifier(&router->inetaddr_nb);
565 	if (err)
566 		goto err_register_inetaddr_notifier;
567 
568 	router->fib_nb.notifier_call = __prestera_router_fib_event;
569 	err = register_fib_notifier(&init_net, &router->fib_nb,
570 				    /* TODO: flush fib entries */ NULL, NULL);
571 	if (err)
572 		goto err_register_fib_notifier;
573 
574 	return 0;
575 
576 err_register_fib_notifier:
577 	unregister_inetaddr_notifier(&router->inetaddr_nb);
578 err_register_inetaddr_notifier:
579 	unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
580 err_register_inetaddr_validator_notifier:
581 	rhashtable_destroy(&router->kern_fib_cache_ht);
582 err_kern_fib_cache_ht_init:
583 	prestera_router_hw_fini(sw);
584 err_router_lib_init:
585 	kfree(sw->router);
586 	return err;
587 }
588 
589 void prestera_router_fini(struct prestera_switch *sw)
590 {
591 	unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
592 	unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
593 	rhashtable_destroy(&sw->router->kern_fib_cache_ht);
594 	prestera_router_hw_fini(sw);
595 	kfree(sw->router);
596 	sw->router = NULL;
597 }
598