1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c 3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the names of the copyright holders nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * Alternatively, this software may be distributed under the terms of the 19 * GNU General Public License ("GPL") version 2 as published by the Free 20 * Software Foundation. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <linux/rhashtable.h> 36 37 #include "spectrum_mr.h" 38 #include "spectrum_router.h" 39 40 struct mlxsw_sp_mr { 41 const struct mlxsw_sp_mr_ops *mr_ops; 42 void *catchall_route_priv; 43 struct delayed_work stats_update_dw; 44 struct list_head table_list; 45 #define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */ 46 unsigned long priv[0]; 47 /* priv has to be always the last item */ 48 }; 49 50 struct mlxsw_sp_mr_vif { 51 struct net_device *dev; 52 const struct mlxsw_sp_rif *rif; 53 unsigned long vif_flags; 54 55 /* A list of route_vif_entry structs that point to routes that the VIF 56 * instance is used as one of the egress VIFs 57 */ 58 struct list_head route_evif_list; 59 60 /* A list of route_vif_entry structs that point to routes that the VIF 61 * instance is used as an ingress VIF 62 */ 63 struct list_head route_ivif_list; 64 }; 65 66 struct mlxsw_sp_mr_route_vif_entry { 67 struct list_head vif_node; 68 struct list_head route_node; 69 struct mlxsw_sp_mr_vif *mr_vif; 70 struct mlxsw_sp_mr_route *mr_route; 71 }; 72 73 struct mlxsw_sp_mr_table { 74 struct list_head node; 75 enum mlxsw_sp_l3proto proto; 76 struct mlxsw_sp *mlxsw_sp; 77 u32 vr_id; 78 struct mlxsw_sp_mr_vif vifs[MAXVIFS]; 79 struct list_head route_list; 80 struct rhashtable route_ht; 81 char catchall_route_priv[0]; 82 /* catchall_route_priv has to be always the last item */ 83 }; 84 85 struct mlxsw_sp_mr_route { 86 struct list_head node; 87 struct rhash_head ht_node; 88 struct mlxsw_sp_mr_route_key key; 89 enum mlxsw_sp_mr_route_action route_action; 90 u16 min_mtu; 91 struct mfc_cache *mfc4; 92 void *route_priv; 93 const struct mlxsw_sp_mr_table *mr_table; 94 /* A list of route_vif_entry structs that point to the egress VIFs */ 95 struct list_head evif_list; 96 /* A route_vif_entry struct that point to the ingress VIF */ 97 struct mlxsw_sp_mr_route_vif_entry ivif; 98 }; 99 100 static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = { 101 .key_len = sizeof(struct mlxsw_sp_mr_route_key), 102 .key_offset = offsetof(struct mlxsw_sp_mr_route, key), 103 .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node), 104 .automatic_shrinking = true, 105 }; 106 107 static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif) 108 { 109 return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER)); 110 } 111 112 static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif) 113 { 114 return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif; 115 } 116 117 static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif) 118 { 119 return vif->dev; 120 } 121 122 static bool 123 mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route) 124 { 125 vifi_t ivif; 126 127 switch (mr_route->mr_table->proto) { 128 case MLXSW_SP_L3_PROTO_IPV4: 129 ivif = mr_route->mfc4->mfc_parent; 130 return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255; 131 case MLXSW_SP_L3_PROTO_IPV6: 132 /* fall through */ 133 default: 134 WARN_ON_ONCE(1); 135 } 136 return false; 137 } 138 139 static int 140 mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route) 141 { 142 struct mlxsw_sp_mr_route_vif_entry *rve; 143 int valid_evifs; 144 145 valid_evifs = 0; 146 list_for_each_entry(rve, &mr_route->evif_list, route_node) 147 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) 148 valid_evifs++; 149 return valid_evifs; 150 } 151 152 static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route) 153 { 154 switch (mr_route->mr_table->proto) { 155 case MLXSW_SP_L3_PROTO_IPV4: 156 return mr_route->key.source_mask.addr4 == INADDR_ANY; 157 case MLXSW_SP_L3_PROTO_IPV6: 158 /* fall through */ 159 default: 160 WARN_ON_ONCE(1); 161 } 162 return false; 163 } 164 165 static enum mlxsw_sp_mr_route_action 166 mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route) 167 { 168 struct mlxsw_sp_mr_route_vif_entry *rve; 169 170 /* If the ingress port is not regular and resolved, trap the route */ 171 if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif)) 172 return MLXSW_SP_MR_ROUTE_ACTION_TRAP; 173 174 /* The kernel does not match a (*,G) route that the ingress interface is 175 * not one of the egress interfaces, so trap these kind of routes. 176 */ 177 if (mlxsw_sp_mr_route_starg(mr_route) && 178 !mlxsw_sp_mr_route_ivif_in_evifs(mr_route)) 179 return MLXSW_SP_MR_ROUTE_ACTION_TRAP; 180 181 /* If the route has no valid eVIFs, trap it. */ 182 if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route)) 183 return MLXSW_SP_MR_ROUTE_ACTION_TRAP; 184 185 /* If one of the eVIFs has no RIF, trap-and-forward the route as there 186 * is some more routing to do in software too. 187 */ 188 list_for_each_entry(rve, &mr_route->evif_list, route_node) 189 if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif) 190 return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD; 191 192 return MLXSW_SP_MR_ROUTE_ACTION_FORWARD; 193 } 194 195 static enum mlxsw_sp_mr_route_prio 196 mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route) 197 { 198 return mlxsw_sp_mr_route_starg(mr_route) ? 199 MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG; 200 } 201 202 static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table, 203 struct mlxsw_sp_mr_route_key *key, 204 const struct mfc_cache *mfc) 205 { 206 bool starg = (mfc->mfc_origin == INADDR_ANY); 207 208 memset(key, 0, sizeof(*key)); 209 key->vrid = mr_table->vr_id; 210 key->proto = mr_table->proto; 211 key->group.addr4 = mfc->mfc_mcastgrp; 212 key->group_mask.addr4 = 0xffffffff; 213 key->source.addr4 = mfc->mfc_origin; 214 key->source_mask.addr4 = starg ? 0 : 0xffffffff; 215 } 216 217 static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route, 218 struct mlxsw_sp_mr_vif *mr_vif) 219 { 220 struct mlxsw_sp_mr_route_vif_entry *rve; 221 222 rve = kzalloc(sizeof(*rve), GFP_KERNEL); 223 if (!rve) 224 return -ENOMEM; 225 rve->mr_route = mr_route; 226 rve->mr_vif = mr_vif; 227 list_add_tail(&rve->route_node, &mr_route->evif_list); 228 list_add_tail(&rve->vif_node, &mr_vif->route_evif_list); 229 return 0; 230 } 231 232 static void 233 mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve) 234 { 235 list_del(&rve->route_node); 236 list_del(&rve->vif_node); 237 kfree(rve); 238 } 239 240 static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route, 241 struct mlxsw_sp_mr_vif *mr_vif) 242 { 243 mr_route->ivif.mr_route = mr_route; 244 mr_route->ivif.mr_vif = mr_vif; 245 list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list); 246 } 247 248 static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route) 249 { 250 list_del(&mr_route->ivif.vif_node); 251 } 252 253 static int 254 mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table, 255 struct mlxsw_sp_mr_route *mr_route, 256 struct mlxsw_sp_mr_route_info *route_info) 257 { 258 struct mlxsw_sp_mr_route_vif_entry *rve; 259 u16 *erif_indices; 260 u16 irif_index; 261 u16 erif = 0; 262 263 erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices), 264 GFP_KERNEL); 265 if (!erif_indices) 266 return -ENOMEM; 267 268 list_for_each_entry(rve, &mr_route->evif_list, route_node) { 269 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) { 270 u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif); 271 272 erif_indices[erif++] = rifi; 273 } 274 } 275 276 if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif)) 277 irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif); 278 else 279 irif_index = 0; 280 281 route_info->irif_index = irif_index; 282 route_info->erif_indices = erif_indices; 283 route_info->min_mtu = mr_route->min_mtu; 284 route_info->route_action = mr_route->route_action; 285 route_info->erif_num = erif; 286 return 0; 287 } 288 289 static void 290 mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info) 291 { 292 kfree(route_info->erif_indices); 293 } 294 295 static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table, 296 struct mlxsw_sp_mr_route *mr_route, 297 bool replace) 298 { 299 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 300 struct mlxsw_sp_mr_route_info route_info; 301 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 302 int err; 303 304 err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info); 305 if (err) 306 return err; 307 308 if (!replace) { 309 struct mlxsw_sp_mr_route_params route_params; 310 311 mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size, 312 GFP_KERNEL); 313 if (!mr_route->route_priv) { 314 err = -ENOMEM; 315 goto out; 316 } 317 318 route_params.key = mr_route->key; 319 route_params.value = route_info; 320 route_params.prio = mlxsw_sp_mr_route_prio(mr_route); 321 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv, 322 mr_route->route_priv, 323 &route_params); 324 if (err) 325 kfree(mr_route->route_priv); 326 } else { 327 err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv, 328 &route_info); 329 } 330 out: 331 mlxsw_sp_mr_route_info_destroy(&route_info); 332 return err; 333 } 334 335 static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table, 336 struct mlxsw_sp_mr_route *mr_route) 337 { 338 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 339 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 340 341 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv); 342 kfree(mr_route->route_priv); 343 } 344 345 static struct mlxsw_sp_mr_route * 346 mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table, 347 struct mfc_cache *mfc) 348 { 349 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp; 350 struct mlxsw_sp_mr_route *mr_route; 351 int err = 0; 352 int i; 353 354 /* Allocate and init a new route and fill it with parameters */ 355 mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL); 356 if (!mr_route) 357 return ERR_PTR(-ENOMEM); 358 INIT_LIST_HEAD(&mr_route->evif_list); 359 mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc); 360 361 /* Find min_mtu and link iVIF and eVIFs */ 362 mr_route->min_mtu = ETH_MAX_MTU; 363 ipmr_cache_hold(mfc); 364 mr_route->mfc4 = mfc; 365 mr_route->mr_table = mr_table; 366 for (i = 0; i < MAXVIFS; i++) { 367 if (mfc->mfc_un.res.ttls[i] != 255) { 368 err = mlxsw_sp_mr_route_evif_link(mr_route, 369 &mr_table->vifs[i]); 370 if (err) 371 goto err; 372 if (mr_table->vifs[i].dev && 373 mr_table->vifs[i].dev->mtu < mr_route->min_mtu) 374 mr_route->min_mtu = mr_table->vifs[i].dev->mtu; 375 } 376 } 377 mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]); 378 379 mr_route->route_action = mlxsw_sp_mr_route_action(mr_route); 380 return mr_route; 381 err: 382 ipmr_cache_put(mfc); 383 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node) 384 mlxsw_sp_mr_route_evif_unlink(rve); 385 kfree(mr_route); 386 return ERR_PTR(err); 387 } 388 389 static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table, 390 struct mlxsw_sp_mr_route *mr_route) 391 { 392 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp; 393 394 mlxsw_sp_mr_route_ivif_unlink(mr_route); 395 ipmr_cache_put(mr_route->mfc4); 396 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node) 397 mlxsw_sp_mr_route_evif_unlink(rve); 398 kfree(mr_route); 399 } 400 401 static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table, 402 struct mlxsw_sp_mr_route *mr_route) 403 { 404 switch (mr_table->proto) { 405 case MLXSW_SP_L3_PROTO_IPV4: 406 mlxsw_sp_mr_route4_destroy(mr_table, mr_route); 407 break; 408 case MLXSW_SP_L3_PROTO_IPV6: 409 /* fall through */ 410 default: 411 WARN_ON_ONCE(1); 412 } 413 } 414 415 static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route, 416 bool offload) 417 { 418 switch (mr_route->mr_table->proto) { 419 case MLXSW_SP_L3_PROTO_IPV4: 420 if (offload) 421 mr_route->mfc4->mfc_flags |= MFC_OFFLOAD; 422 else 423 mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD; 424 break; 425 case MLXSW_SP_L3_PROTO_IPV6: 426 /* fall through */ 427 default: 428 WARN_ON_ONCE(1); 429 } 430 } 431 432 static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route) 433 { 434 bool offload; 435 436 offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP; 437 mlxsw_sp_mr_mfc_offload_set(mr_route, offload); 438 } 439 440 static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table, 441 struct mlxsw_sp_mr_route *mr_route) 442 { 443 mlxsw_sp_mr_mfc_offload_set(mr_route, false); 444 mlxsw_sp_mr_route_erase(mr_table, mr_route); 445 rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node, 446 mlxsw_sp_mr_route_ht_params); 447 list_del(&mr_route->node); 448 mlxsw_sp_mr_route_destroy(mr_table, mr_route); 449 } 450 451 int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table, 452 struct mfc_cache *mfc, bool replace) 453 { 454 struct mlxsw_sp_mr_route *mr_orig_route = NULL; 455 struct mlxsw_sp_mr_route *mr_route; 456 int err; 457 458 /* If the route is a (*,*) route, abort, as these kind of routes are 459 * used for proxy routes. 460 */ 461 if (mfc->mfc_origin == INADDR_ANY && mfc->mfc_mcastgrp == INADDR_ANY) { 462 dev_warn(mr_table->mlxsw_sp->bus_info->dev, 463 "Offloading proxy routes is not supported.\n"); 464 return -EINVAL; 465 } 466 467 /* Create a new route */ 468 mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc); 469 if (IS_ERR(mr_route)) 470 return PTR_ERR(mr_route); 471 472 /* Find any route with a matching key */ 473 mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht, 474 &mr_route->key, 475 mlxsw_sp_mr_route_ht_params); 476 if (replace) { 477 /* On replace case, make the route point to the new route_priv. 478 */ 479 if (WARN_ON(!mr_orig_route)) { 480 err = -ENOENT; 481 goto err_no_orig_route; 482 } 483 mr_route->route_priv = mr_orig_route->route_priv; 484 } else if (mr_orig_route) { 485 /* On non replace case, if another route with the same key was 486 * found, abort, as duplicate routes are used for proxy routes. 487 */ 488 dev_warn(mr_table->mlxsw_sp->bus_info->dev, 489 "Offloading proxy routes is not supported.\n"); 490 err = -EINVAL; 491 goto err_duplicate_route; 492 } 493 494 /* Put it in the table data-structures */ 495 list_add_tail(&mr_route->node, &mr_table->route_list); 496 err = rhashtable_insert_fast(&mr_table->route_ht, 497 &mr_route->ht_node, 498 mlxsw_sp_mr_route_ht_params); 499 if (err) 500 goto err_rhashtable_insert; 501 502 /* Write the route to the hardware */ 503 err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace); 504 if (err) 505 goto err_mr_route_write; 506 507 /* Destroy the original route */ 508 if (replace) { 509 rhashtable_remove_fast(&mr_table->route_ht, 510 &mr_orig_route->ht_node, 511 mlxsw_sp_mr_route_ht_params); 512 list_del(&mr_orig_route->node); 513 mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route); 514 } 515 516 mlxsw_sp_mr_mfc_offload_update(mr_route); 517 return 0; 518 519 err_mr_route_write: 520 rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node, 521 mlxsw_sp_mr_route_ht_params); 522 err_rhashtable_insert: 523 list_del(&mr_route->node); 524 err_no_orig_route: 525 err_duplicate_route: 526 mlxsw_sp_mr_route4_destroy(mr_table, mr_route); 527 return err; 528 } 529 530 void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table, 531 struct mfc_cache *mfc) 532 { 533 struct mlxsw_sp_mr_route *mr_route; 534 struct mlxsw_sp_mr_route_key key; 535 536 mlxsw_sp_mr_route4_key(mr_table, &key, mfc); 537 mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key, 538 mlxsw_sp_mr_route_ht_params); 539 if (mr_route) 540 __mlxsw_sp_mr_route_del(mr_table, mr_route); 541 } 542 543 /* Should be called after the VIF struct is updated */ 544 static int 545 mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table, 546 struct mlxsw_sp_mr_route_vif_entry *rve) 547 { 548 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 549 enum mlxsw_sp_mr_route_action route_action; 550 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 551 u16 irif_index; 552 int err; 553 554 route_action = mlxsw_sp_mr_route_action(rve->mr_route); 555 if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP) 556 return 0; 557 558 /* rve->mr_vif->rif is guaranteed to be valid at this stage */ 559 irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif); 560 err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv, 561 irif_index); 562 if (err) 563 return err; 564 565 err = mr->mr_ops->route_action_update(mlxsw_sp, 566 rve->mr_route->route_priv, 567 route_action); 568 if (err) 569 /* No need to rollback here because the iRIF change only takes 570 * place after the action has been updated. 571 */ 572 return err; 573 574 rve->mr_route->route_action = route_action; 575 mlxsw_sp_mr_mfc_offload_update(rve->mr_route); 576 return 0; 577 } 578 579 static void 580 mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table, 581 struct mlxsw_sp_mr_route_vif_entry *rve) 582 { 583 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 584 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 585 586 mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv, 587 MLXSW_SP_MR_ROUTE_ACTION_TRAP); 588 rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP; 589 mlxsw_sp_mr_mfc_offload_update(rve->mr_route); 590 } 591 592 /* Should be called after the RIF struct is updated */ 593 static int 594 mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table, 595 struct mlxsw_sp_mr_route_vif_entry *rve) 596 { 597 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 598 enum mlxsw_sp_mr_route_action route_action; 599 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 600 u16 erif_index = 0; 601 int err; 602 603 /* Update the route action, as the new eVIF can be a tunnel or a pimreg 604 * device which will require updating the action. 605 */ 606 route_action = mlxsw_sp_mr_route_action(rve->mr_route); 607 if (route_action != rve->mr_route->route_action) { 608 err = mr->mr_ops->route_action_update(mlxsw_sp, 609 rve->mr_route->route_priv, 610 route_action); 611 if (err) 612 return err; 613 } 614 615 /* Add the eRIF */ 616 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) { 617 erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif); 618 err = mr->mr_ops->route_erif_add(mlxsw_sp, 619 rve->mr_route->route_priv, 620 erif_index); 621 if (err) 622 goto err_route_erif_add; 623 } 624 625 /* Update the minimum MTU */ 626 if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) { 627 rve->mr_route->min_mtu = rve->mr_vif->dev->mtu; 628 err = mr->mr_ops->route_min_mtu_update(mlxsw_sp, 629 rve->mr_route->route_priv, 630 rve->mr_route->min_mtu); 631 if (err) 632 goto err_route_min_mtu_update; 633 } 634 635 rve->mr_route->route_action = route_action; 636 mlxsw_sp_mr_mfc_offload_update(rve->mr_route); 637 return 0; 638 639 err_route_min_mtu_update: 640 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) 641 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, 642 erif_index); 643 err_route_erif_add: 644 if (route_action != rve->mr_route->route_action) 645 mr->mr_ops->route_action_update(mlxsw_sp, 646 rve->mr_route->route_priv, 647 rve->mr_route->route_action); 648 return err; 649 } 650 651 /* Should be called before the RIF struct is updated */ 652 static void 653 mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table, 654 struct mlxsw_sp_mr_route_vif_entry *rve) 655 { 656 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 657 enum mlxsw_sp_mr_route_action route_action; 658 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 659 u16 rifi; 660 661 /* If the unresolved RIF was not valid, no need to delete it */ 662 if (!mlxsw_sp_mr_vif_valid(rve->mr_vif)) 663 return; 664 665 /* Update the route action: if there is only one valid eVIF in the 666 * route, set the action to trap as the VIF deletion will lead to zero 667 * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to 668 * determine the route action. 669 */ 670 if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1) 671 route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP; 672 else 673 route_action = mlxsw_sp_mr_route_action(rve->mr_route); 674 if (route_action != rve->mr_route->route_action) 675 mr->mr_ops->route_action_update(mlxsw_sp, 676 rve->mr_route->route_priv, 677 route_action); 678 679 /* Delete the erif from the route */ 680 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif); 681 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi); 682 rve->mr_route->route_action = route_action; 683 mlxsw_sp_mr_mfc_offload_update(rve->mr_route); 684 } 685 686 static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table, 687 struct net_device *dev, 688 struct mlxsw_sp_mr_vif *mr_vif, 689 unsigned long vif_flags, 690 const struct mlxsw_sp_rif *rif) 691 { 692 struct mlxsw_sp_mr_route_vif_entry *irve, *erve; 693 int err; 694 695 /* Update the VIF */ 696 mr_vif->dev = dev; 697 mr_vif->rif = rif; 698 mr_vif->vif_flags = vif_flags; 699 700 /* Update all routes where this VIF is used as an unresolved iRIF */ 701 list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) { 702 err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve); 703 if (err) 704 goto err_irif_unresolve; 705 } 706 707 /* Update all routes where this VIF is used as an unresolved eRIF */ 708 list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) { 709 err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve); 710 if (err) 711 goto err_erif_unresolve; 712 } 713 return 0; 714 715 err_erif_unresolve: 716 list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list, 717 vif_node) 718 mlxsw_sp_mr_route_evif_unresolve(mr_table, erve); 719 err_irif_unresolve: 720 list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list, 721 vif_node) 722 mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve); 723 mr_vif->rif = NULL; 724 return err; 725 } 726 727 static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table, 728 struct net_device *dev, 729 struct mlxsw_sp_mr_vif *mr_vif) 730 { 731 struct mlxsw_sp_mr_route_vif_entry *rve; 732 733 /* Update all routes where this VIF is used as an unresolved eRIF */ 734 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) 735 mlxsw_sp_mr_route_evif_unresolve(mr_table, rve); 736 737 /* Update all routes where this VIF is used as an unresolved iRIF */ 738 list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node) 739 mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve); 740 741 /* Update the VIF */ 742 mr_vif->dev = dev; 743 mr_vif->rif = NULL; 744 } 745 746 int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table, 747 struct net_device *dev, vifi_t vif_index, 748 unsigned long vif_flags, const struct mlxsw_sp_rif *rif) 749 { 750 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index]; 751 752 if (WARN_ON(vif_index >= MAXVIFS)) 753 return -EINVAL; 754 if (mr_vif->dev) 755 return -EEXIST; 756 return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif); 757 } 758 759 void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index) 760 { 761 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index]; 762 763 if (WARN_ON(vif_index >= MAXVIFS)) 764 return; 765 if (WARN_ON(!mr_vif->dev)) 766 return; 767 mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif); 768 } 769 770 struct mlxsw_sp_mr_vif * 771 mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table, 772 const struct net_device *dev) 773 { 774 vifi_t vif_index; 775 776 for (vif_index = 0; vif_index < MAXVIFS; vif_index++) 777 if (mr_table->vifs[vif_index].dev == dev) 778 return &mr_table->vifs[vif_index]; 779 return NULL; 780 } 781 782 int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table, 783 const struct mlxsw_sp_rif *rif) 784 { 785 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif); 786 struct mlxsw_sp_mr_vif *mr_vif; 787 788 if (!rif_dev) 789 return 0; 790 791 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev); 792 if (!mr_vif) 793 return 0; 794 return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif, 795 mr_vif->vif_flags, rif); 796 } 797 798 void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table, 799 const struct mlxsw_sp_rif *rif) 800 { 801 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif); 802 struct mlxsw_sp_mr_vif *mr_vif; 803 804 if (!rif_dev) 805 return; 806 807 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev); 808 if (!mr_vif) 809 return; 810 mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif); 811 } 812 813 void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table, 814 const struct mlxsw_sp_rif *rif, int mtu) 815 { 816 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif); 817 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 818 struct mlxsw_sp_mr_route_vif_entry *rve; 819 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 820 struct mlxsw_sp_mr_vif *mr_vif; 821 822 if (!rif_dev) 823 return; 824 825 /* Search for a VIF that use that RIF */ 826 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev); 827 if (!mr_vif) 828 return; 829 830 /* Update all the routes that uses that VIF as eVIF */ 831 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) { 832 if (mtu < rve->mr_route->min_mtu) { 833 rve->mr_route->min_mtu = mtu; 834 mr->mr_ops->route_min_mtu_update(mlxsw_sp, 835 rve->mr_route->route_priv, 836 mtu); 837 } 838 } 839 } 840 841 struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, 842 u32 vr_id, 843 enum mlxsw_sp_l3proto proto) 844 { 845 struct mlxsw_sp_mr_route_params catchall_route_params = { 846 .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, 847 .key = { 848 .vrid = vr_id, 849 }, 850 .value = { 851 .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP, 852 } 853 }; 854 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 855 struct mlxsw_sp_mr_table *mr_table; 856 int err; 857 int i; 858 859 mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size, 860 GFP_KERNEL); 861 if (!mr_table) 862 return ERR_PTR(-ENOMEM); 863 864 mr_table->vr_id = vr_id; 865 mr_table->mlxsw_sp = mlxsw_sp; 866 mr_table->proto = proto; 867 INIT_LIST_HEAD(&mr_table->route_list); 868 869 err = rhashtable_init(&mr_table->route_ht, 870 &mlxsw_sp_mr_route_ht_params); 871 if (err) 872 goto err_route_rhashtable_init; 873 874 for (i = 0; i < MAXVIFS; i++) { 875 INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list); 876 INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list); 877 } 878 879 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv, 880 mr_table->catchall_route_priv, 881 &catchall_route_params); 882 if (err) 883 goto err_ops_route_create; 884 list_add_tail(&mr_table->node, &mr->table_list); 885 return mr_table; 886 887 err_ops_route_create: 888 rhashtable_destroy(&mr_table->route_ht); 889 err_route_rhashtable_init: 890 kfree(mr_table); 891 return ERR_PTR(err); 892 } 893 894 void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table) 895 { 896 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 897 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 898 899 WARN_ON(!mlxsw_sp_mr_table_empty(mr_table)); 900 list_del(&mr_table->node); 901 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, 902 &mr_table->catchall_route_priv); 903 rhashtable_destroy(&mr_table->route_ht); 904 kfree(mr_table); 905 } 906 907 void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table) 908 { 909 struct mlxsw_sp_mr_route *mr_route, *tmp; 910 int i; 911 912 list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node) 913 __mlxsw_sp_mr_route_del(mr_table, mr_route); 914 915 for (i = 0; i < MAXVIFS; i++) { 916 mr_table->vifs[i].dev = NULL; 917 mr_table->vifs[i].rif = NULL; 918 } 919 } 920 921 bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table) 922 { 923 int i; 924 925 for (i = 0; i < MAXVIFS; i++) 926 if (mr_table->vifs[i].dev) 927 return false; 928 return list_empty(&mr_table->route_list); 929 } 930 931 static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp, 932 struct mlxsw_sp_mr_route *mr_route) 933 { 934 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 935 u64 packets, bytes; 936 937 if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP) 938 return; 939 940 mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets, 941 &bytes); 942 943 switch (mr_route->mr_table->proto) { 944 case MLXSW_SP_L3_PROTO_IPV4: 945 if (mr_route->mfc4->mfc_un.res.pkt != packets) 946 mr_route->mfc4->mfc_un.res.lastuse = jiffies; 947 mr_route->mfc4->mfc_un.res.pkt = packets; 948 mr_route->mfc4->mfc_un.res.bytes = bytes; 949 break; 950 case MLXSW_SP_L3_PROTO_IPV6: 951 /* fall through */ 952 default: 953 WARN_ON_ONCE(1); 954 } 955 } 956 957 static void mlxsw_sp_mr_stats_update(struct work_struct *work) 958 { 959 struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr, 960 stats_update_dw.work); 961 struct mlxsw_sp_mr_table *mr_table; 962 struct mlxsw_sp_mr_route *mr_route; 963 unsigned long interval; 964 965 rtnl_lock(); 966 list_for_each_entry(mr_table, &mr->table_list, node) 967 list_for_each_entry(mr_route, &mr_table->route_list, node) 968 mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp, 969 mr_route); 970 rtnl_unlock(); 971 972 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL); 973 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval); 974 } 975 976 int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp, 977 const struct mlxsw_sp_mr_ops *mr_ops) 978 { 979 struct mlxsw_sp_mr *mr; 980 unsigned long interval; 981 int err; 982 983 mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL); 984 if (!mr) 985 return -ENOMEM; 986 mr->mr_ops = mr_ops; 987 mlxsw_sp->mr = mr; 988 INIT_LIST_HEAD(&mr->table_list); 989 990 err = mr_ops->init(mlxsw_sp, mr->priv); 991 if (err) 992 goto err; 993 994 /* Create the delayed work for counter updates */ 995 INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update); 996 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL); 997 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval); 998 return 0; 999 err: 1000 kfree(mr); 1001 return err; 1002 } 1003 1004 void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp) 1005 { 1006 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 1007 1008 cancel_delayed_work_sync(&mr->stats_update_dw); 1009 mr->mr_ops->fini(mr->priv); 1010 kfree(mr); 1011 } 1012