1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c 3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the names of the copyright holders nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * Alternatively, this software may be distributed under the terms of the 19 * GNU General Public License ("GPL") version 2 as published by the Free 20 * Software Foundation. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <linux/rhashtable.h> 36 #include <net/ipv6.h> 37 38 #include "spectrum_mr.h" 39 #include "spectrum_router.h" 40 41 struct mlxsw_sp_mr { 42 const struct mlxsw_sp_mr_ops *mr_ops; 43 void *catchall_route_priv; 44 struct delayed_work stats_update_dw; 45 struct list_head table_list; 46 #define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */ 47 unsigned long priv[0]; 48 /* priv has to be always the last item */ 49 }; 50 51 struct mlxsw_sp_mr_vif; 52 struct mlxsw_sp_mr_vif_ops { 53 bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif); 54 }; 55 56 struct mlxsw_sp_mr_vif { 57 struct net_device *dev; 58 const struct mlxsw_sp_rif *rif; 59 unsigned long vif_flags; 60 61 /* A list of route_vif_entry structs that point to routes that the VIF 62 * instance is used as one of the egress VIFs 63 */ 64 struct list_head route_evif_list; 65 66 /* A list of route_vif_entry structs that point to routes that the VIF 67 * instance is used as an ingress VIF 68 */ 69 struct list_head route_ivif_list; 70 71 /* Protocol specific operations for a VIF */ 72 const struct mlxsw_sp_mr_vif_ops *ops; 73 }; 74 75 struct mlxsw_sp_mr_route_vif_entry { 76 struct list_head vif_node; 77 struct list_head route_node; 78 struct mlxsw_sp_mr_vif *mr_vif; 79 struct mlxsw_sp_mr_route *mr_route; 80 }; 81 82 struct mlxsw_sp_mr_table; 83 struct mlxsw_sp_mr_table_ops { 84 bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table, 85 const struct mr_mfc *mfc); 86 void (*key_create)(struct mlxsw_sp_mr_table *mr_table, 87 struct mlxsw_sp_mr_route_key *key, 88 struct mr_mfc *mfc); 89 bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table, 90 const struct mlxsw_sp_mr_route *mr_route); 91 }; 92 93 struct mlxsw_sp_mr_table { 94 struct list_head node; 95 enum mlxsw_sp_l3proto proto; 96 struct mlxsw_sp *mlxsw_sp; 97 u32 vr_id; 98 struct mlxsw_sp_mr_vif vifs[MAXVIFS]; 99 struct list_head route_list; 100 struct rhashtable route_ht; 101 const struct mlxsw_sp_mr_table_ops *ops; 102 char catchall_route_priv[0]; 103 /* catchall_route_priv has to be always the last item */ 104 }; 105 106 struct mlxsw_sp_mr_route { 107 struct list_head node; 108 struct rhash_head ht_node; 109 struct mlxsw_sp_mr_route_key key; 110 enum mlxsw_sp_mr_route_action route_action; 111 u16 min_mtu; 112 struct mr_mfc *mfc; 113 void *route_priv; 114 const struct mlxsw_sp_mr_table *mr_table; 115 /* A list of route_vif_entry structs that point to the egress VIFs */ 116 struct list_head evif_list; 117 /* A route_vif_entry struct that point to the ingress VIF */ 118 struct mlxsw_sp_mr_route_vif_entry ivif; 119 }; 120 121 static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = { 122 .key_len = sizeof(struct mlxsw_sp_mr_route_key), 123 .key_offset = offsetof(struct mlxsw_sp_mr_route, key), 124 .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node), 125 .automatic_shrinking = true, 126 }; 127 128 static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif) 129 { 130 return vif->ops->is_regular(vif) && vif->dev && vif->rif; 131 } 132 133 static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif) 134 { 135 return vif->dev; 136 } 137 138 static bool 139 mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route) 140 { 141 vifi_t ivif = mr_route->mfc->mfc_parent; 142 143 return mr_route->mfc->mfc_un.res.ttls[ivif] != 255; 144 } 145 146 static int 147 mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route) 148 { 149 struct mlxsw_sp_mr_route_vif_entry *rve; 150 int valid_evifs; 151 152 valid_evifs = 0; 153 list_for_each_entry(rve, &mr_route->evif_list, route_node) 154 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) 155 valid_evifs++; 156 return valid_evifs; 157 } 158 159 static enum mlxsw_sp_mr_route_action 160 mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route) 161 { 162 struct mlxsw_sp_mr_route_vif_entry *rve; 163 164 /* If the ingress port is not regular and resolved, trap the route */ 165 if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif)) 166 return MLXSW_SP_MR_ROUTE_ACTION_TRAP; 167 168 /* The kernel does not match a (*,G) route that the ingress interface is 169 * not one of the egress interfaces, so trap these kind of routes. 170 */ 171 if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table, 172 mr_route) && 173 !mlxsw_sp_mr_route_ivif_in_evifs(mr_route)) 174 return MLXSW_SP_MR_ROUTE_ACTION_TRAP; 175 176 /* If the route has no valid eVIFs, trap it. */ 177 if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route)) 178 return MLXSW_SP_MR_ROUTE_ACTION_TRAP; 179 180 /* If one of the eVIFs has no RIF, trap-and-forward the route as there 181 * is some more routing to do in software too. 182 */ 183 list_for_each_entry(rve, &mr_route->evif_list, route_node) 184 if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif) 185 return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD; 186 187 return MLXSW_SP_MR_ROUTE_ACTION_FORWARD; 188 } 189 190 static enum mlxsw_sp_mr_route_prio 191 mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route) 192 { 193 return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table, 194 mr_route) ? 195 MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG; 196 } 197 198 static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route, 199 struct mlxsw_sp_mr_vif *mr_vif) 200 { 201 struct mlxsw_sp_mr_route_vif_entry *rve; 202 203 rve = kzalloc(sizeof(*rve), GFP_KERNEL); 204 if (!rve) 205 return -ENOMEM; 206 rve->mr_route = mr_route; 207 rve->mr_vif = mr_vif; 208 list_add_tail(&rve->route_node, &mr_route->evif_list); 209 list_add_tail(&rve->vif_node, &mr_vif->route_evif_list); 210 return 0; 211 } 212 213 static void 214 mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve) 215 { 216 list_del(&rve->route_node); 217 list_del(&rve->vif_node); 218 kfree(rve); 219 } 220 221 static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route, 222 struct mlxsw_sp_mr_vif *mr_vif) 223 { 224 mr_route->ivif.mr_route = mr_route; 225 mr_route->ivif.mr_vif = mr_vif; 226 list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list); 227 } 228 229 static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route) 230 { 231 list_del(&mr_route->ivif.vif_node); 232 } 233 234 static int 235 mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table, 236 struct mlxsw_sp_mr_route *mr_route, 237 struct mlxsw_sp_mr_route_info *route_info) 238 { 239 struct mlxsw_sp_mr_route_vif_entry *rve; 240 u16 *erif_indices; 241 u16 irif_index; 242 u16 erif = 0; 243 244 erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices), 245 GFP_KERNEL); 246 if (!erif_indices) 247 return -ENOMEM; 248 249 list_for_each_entry(rve, &mr_route->evif_list, route_node) { 250 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) { 251 u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif); 252 253 erif_indices[erif++] = rifi; 254 } 255 } 256 257 if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif)) 258 irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif); 259 else 260 irif_index = 0; 261 262 route_info->irif_index = irif_index; 263 route_info->erif_indices = erif_indices; 264 route_info->min_mtu = mr_route->min_mtu; 265 route_info->route_action = mr_route->route_action; 266 route_info->erif_num = erif; 267 return 0; 268 } 269 270 static void 271 mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info) 272 { 273 kfree(route_info->erif_indices); 274 } 275 276 static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table, 277 struct mlxsw_sp_mr_route *mr_route, 278 bool replace) 279 { 280 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 281 struct mlxsw_sp_mr_route_info route_info; 282 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 283 int err; 284 285 err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info); 286 if (err) 287 return err; 288 289 if (!replace) { 290 struct mlxsw_sp_mr_route_params route_params; 291 292 mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size, 293 GFP_KERNEL); 294 if (!mr_route->route_priv) { 295 err = -ENOMEM; 296 goto out; 297 } 298 299 route_params.key = mr_route->key; 300 route_params.value = route_info; 301 route_params.prio = mlxsw_sp_mr_route_prio(mr_route); 302 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv, 303 mr_route->route_priv, 304 &route_params); 305 if (err) 306 kfree(mr_route->route_priv); 307 } else { 308 err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv, 309 &route_info); 310 } 311 out: 312 mlxsw_sp_mr_route_info_destroy(&route_info); 313 return err; 314 } 315 316 static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table, 317 struct mlxsw_sp_mr_route *mr_route) 318 { 319 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 320 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 321 322 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv); 323 kfree(mr_route->route_priv); 324 } 325 326 static struct mlxsw_sp_mr_route * 327 mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table, 328 struct mr_mfc *mfc) 329 { 330 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp; 331 struct mlxsw_sp_mr_route *mr_route; 332 int err = 0; 333 int i; 334 335 /* Allocate and init a new route and fill it with parameters */ 336 mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL); 337 if (!mr_route) 338 return ERR_PTR(-ENOMEM); 339 INIT_LIST_HEAD(&mr_route->evif_list); 340 341 /* Find min_mtu and link iVIF and eVIFs */ 342 mr_route->min_mtu = ETH_MAX_MTU; 343 mr_cache_hold(mfc); 344 mr_route->mfc = mfc; 345 mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc); 346 347 mr_route->mr_table = mr_table; 348 for (i = 0; i < MAXVIFS; i++) { 349 if (mfc->mfc_un.res.ttls[i] != 255) { 350 err = mlxsw_sp_mr_route_evif_link(mr_route, 351 &mr_table->vifs[i]); 352 if (err) 353 goto err; 354 if (mr_table->vifs[i].dev && 355 mr_table->vifs[i].dev->mtu < mr_route->min_mtu) 356 mr_route->min_mtu = mr_table->vifs[i].dev->mtu; 357 } 358 } 359 mlxsw_sp_mr_route_ivif_link(mr_route, 360 &mr_table->vifs[mfc->mfc_parent]); 361 362 mr_route->route_action = mlxsw_sp_mr_route_action(mr_route); 363 return mr_route; 364 err: 365 mr_cache_put(mfc); 366 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node) 367 mlxsw_sp_mr_route_evif_unlink(rve); 368 kfree(mr_route); 369 return ERR_PTR(err); 370 } 371 372 static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table, 373 struct mlxsw_sp_mr_route *mr_route) 374 { 375 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp; 376 377 mlxsw_sp_mr_route_ivif_unlink(mr_route); 378 mr_cache_put(mr_route->mfc); 379 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node) 380 mlxsw_sp_mr_route_evif_unlink(rve); 381 kfree(mr_route); 382 } 383 384 static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route, 385 bool offload) 386 { 387 if (offload) 388 mr_route->mfc->mfc_flags |= MFC_OFFLOAD; 389 else 390 mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD; 391 } 392 393 static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route) 394 { 395 bool offload; 396 397 offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP; 398 mlxsw_sp_mr_mfc_offload_set(mr_route, offload); 399 } 400 401 static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table, 402 struct mlxsw_sp_mr_route *mr_route) 403 { 404 mlxsw_sp_mr_mfc_offload_set(mr_route, false); 405 mlxsw_sp_mr_route_erase(mr_table, mr_route); 406 rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node, 407 mlxsw_sp_mr_route_ht_params); 408 list_del(&mr_route->node); 409 mlxsw_sp_mr_route_destroy(mr_table, mr_route); 410 } 411 412 int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table, 413 struct mr_mfc *mfc, bool replace) 414 { 415 struct mlxsw_sp_mr_route *mr_orig_route = NULL; 416 struct mlxsw_sp_mr_route *mr_route; 417 int err; 418 419 if (!mr_table->ops->is_route_valid(mr_table, mfc)) 420 return -EINVAL; 421 422 /* Create a new route */ 423 mr_route = mlxsw_sp_mr_route_create(mr_table, mfc); 424 if (IS_ERR(mr_route)) 425 return PTR_ERR(mr_route); 426 427 /* Find any route with a matching key */ 428 mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht, 429 &mr_route->key, 430 mlxsw_sp_mr_route_ht_params); 431 if (replace) { 432 /* On replace case, make the route point to the new route_priv. 433 */ 434 if (WARN_ON(!mr_orig_route)) { 435 err = -ENOENT; 436 goto err_no_orig_route; 437 } 438 mr_route->route_priv = mr_orig_route->route_priv; 439 } else if (mr_orig_route) { 440 /* On non replace case, if another route with the same key was 441 * found, abort, as duplicate routes are used for proxy routes. 442 */ 443 dev_warn(mr_table->mlxsw_sp->bus_info->dev, 444 "Offloading proxy routes is not supported.\n"); 445 err = -EINVAL; 446 goto err_duplicate_route; 447 } 448 449 /* Put it in the table data-structures */ 450 list_add_tail(&mr_route->node, &mr_table->route_list); 451 err = rhashtable_insert_fast(&mr_table->route_ht, 452 &mr_route->ht_node, 453 mlxsw_sp_mr_route_ht_params); 454 if (err) 455 goto err_rhashtable_insert; 456 457 /* Write the route to the hardware */ 458 err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace); 459 if (err) 460 goto err_mr_route_write; 461 462 /* Destroy the original route */ 463 if (replace) { 464 rhashtable_remove_fast(&mr_table->route_ht, 465 &mr_orig_route->ht_node, 466 mlxsw_sp_mr_route_ht_params); 467 list_del(&mr_orig_route->node); 468 mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route); 469 } 470 471 mlxsw_sp_mr_mfc_offload_update(mr_route); 472 return 0; 473 474 err_mr_route_write: 475 rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node, 476 mlxsw_sp_mr_route_ht_params); 477 err_rhashtable_insert: 478 list_del(&mr_route->node); 479 err_no_orig_route: 480 err_duplicate_route: 481 mlxsw_sp_mr_route_destroy(mr_table, mr_route); 482 return err; 483 } 484 485 void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table, 486 struct mr_mfc *mfc) 487 { 488 struct mlxsw_sp_mr_route *mr_route; 489 struct mlxsw_sp_mr_route_key key; 490 491 mr_table->ops->key_create(mr_table, &key, mfc); 492 mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key, 493 mlxsw_sp_mr_route_ht_params); 494 if (mr_route) 495 __mlxsw_sp_mr_route_del(mr_table, mr_route); 496 } 497 498 /* Should be called after the VIF struct is updated */ 499 static int 500 mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table, 501 struct mlxsw_sp_mr_route_vif_entry *rve) 502 { 503 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 504 enum mlxsw_sp_mr_route_action route_action; 505 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 506 u16 irif_index; 507 int err; 508 509 route_action = mlxsw_sp_mr_route_action(rve->mr_route); 510 if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP) 511 return 0; 512 513 /* rve->mr_vif->rif is guaranteed to be valid at this stage */ 514 irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif); 515 err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv, 516 irif_index); 517 if (err) 518 return err; 519 520 err = mr->mr_ops->route_action_update(mlxsw_sp, 521 rve->mr_route->route_priv, 522 route_action); 523 if (err) 524 /* No need to rollback here because the iRIF change only takes 525 * place after the action has been updated. 526 */ 527 return err; 528 529 rve->mr_route->route_action = route_action; 530 mlxsw_sp_mr_mfc_offload_update(rve->mr_route); 531 return 0; 532 } 533 534 static void 535 mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table, 536 struct mlxsw_sp_mr_route_vif_entry *rve) 537 { 538 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 539 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 540 541 mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv, 542 MLXSW_SP_MR_ROUTE_ACTION_TRAP); 543 rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP; 544 mlxsw_sp_mr_mfc_offload_update(rve->mr_route); 545 } 546 547 /* Should be called after the RIF struct is updated */ 548 static int 549 mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table, 550 struct mlxsw_sp_mr_route_vif_entry *rve) 551 { 552 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 553 enum mlxsw_sp_mr_route_action route_action; 554 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 555 u16 erif_index = 0; 556 int err; 557 558 /* Update the route action, as the new eVIF can be a tunnel or a pimreg 559 * device which will require updating the action. 560 */ 561 route_action = mlxsw_sp_mr_route_action(rve->mr_route); 562 if (route_action != rve->mr_route->route_action) { 563 err = mr->mr_ops->route_action_update(mlxsw_sp, 564 rve->mr_route->route_priv, 565 route_action); 566 if (err) 567 return err; 568 } 569 570 /* Add the eRIF */ 571 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) { 572 erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif); 573 err = mr->mr_ops->route_erif_add(mlxsw_sp, 574 rve->mr_route->route_priv, 575 erif_index); 576 if (err) 577 goto err_route_erif_add; 578 } 579 580 /* Update the minimum MTU */ 581 if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) { 582 rve->mr_route->min_mtu = rve->mr_vif->dev->mtu; 583 err = mr->mr_ops->route_min_mtu_update(mlxsw_sp, 584 rve->mr_route->route_priv, 585 rve->mr_route->min_mtu); 586 if (err) 587 goto err_route_min_mtu_update; 588 } 589 590 rve->mr_route->route_action = route_action; 591 mlxsw_sp_mr_mfc_offload_update(rve->mr_route); 592 return 0; 593 594 err_route_min_mtu_update: 595 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) 596 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, 597 erif_index); 598 err_route_erif_add: 599 if (route_action != rve->mr_route->route_action) 600 mr->mr_ops->route_action_update(mlxsw_sp, 601 rve->mr_route->route_priv, 602 rve->mr_route->route_action); 603 return err; 604 } 605 606 /* Should be called before the RIF struct is updated */ 607 static void 608 mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table, 609 struct mlxsw_sp_mr_route_vif_entry *rve) 610 { 611 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 612 enum mlxsw_sp_mr_route_action route_action; 613 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 614 u16 rifi; 615 616 /* If the unresolved RIF was not valid, no need to delete it */ 617 if (!mlxsw_sp_mr_vif_valid(rve->mr_vif)) 618 return; 619 620 /* Update the route action: if there is only one valid eVIF in the 621 * route, set the action to trap as the VIF deletion will lead to zero 622 * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to 623 * determine the route action. 624 */ 625 if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1) 626 route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP; 627 else 628 route_action = mlxsw_sp_mr_route_action(rve->mr_route); 629 if (route_action != rve->mr_route->route_action) 630 mr->mr_ops->route_action_update(mlxsw_sp, 631 rve->mr_route->route_priv, 632 route_action); 633 634 /* Delete the erif from the route */ 635 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif); 636 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi); 637 rve->mr_route->route_action = route_action; 638 mlxsw_sp_mr_mfc_offload_update(rve->mr_route); 639 } 640 641 static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table, 642 struct net_device *dev, 643 struct mlxsw_sp_mr_vif *mr_vif, 644 unsigned long vif_flags, 645 const struct mlxsw_sp_rif *rif) 646 { 647 struct mlxsw_sp_mr_route_vif_entry *irve, *erve; 648 int err; 649 650 /* Update the VIF */ 651 mr_vif->dev = dev; 652 mr_vif->rif = rif; 653 mr_vif->vif_flags = vif_flags; 654 655 /* Update all routes where this VIF is used as an unresolved iRIF */ 656 list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) { 657 err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve); 658 if (err) 659 goto err_irif_unresolve; 660 } 661 662 /* Update all routes where this VIF is used as an unresolved eRIF */ 663 list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) { 664 err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve); 665 if (err) 666 goto err_erif_unresolve; 667 } 668 return 0; 669 670 err_erif_unresolve: 671 list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list, 672 vif_node) 673 mlxsw_sp_mr_route_evif_unresolve(mr_table, erve); 674 err_irif_unresolve: 675 list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list, 676 vif_node) 677 mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve); 678 mr_vif->rif = NULL; 679 return err; 680 } 681 682 static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table, 683 struct net_device *dev, 684 struct mlxsw_sp_mr_vif *mr_vif) 685 { 686 struct mlxsw_sp_mr_route_vif_entry *rve; 687 688 /* Update all routes where this VIF is used as an unresolved eRIF */ 689 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) 690 mlxsw_sp_mr_route_evif_unresolve(mr_table, rve); 691 692 /* Update all routes where this VIF is used as an unresolved iRIF */ 693 list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node) 694 mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve); 695 696 /* Update the VIF */ 697 mr_vif->dev = dev; 698 mr_vif->rif = NULL; 699 } 700 701 int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table, 702 struct net_device *dev, vifi_t vif_index, 703 unsigned long vif_flags, const struct mlxsw_sp_rif *rif) 704 { 705 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index]; 706 707 if (WARN_ON(vif_index >= MAXVIFS)) 708 return -EINVAL; 709 if (mr_vif->dev) 710 return -EEXIST; 711 return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif); 712 } 713 714 void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index) 715 { 716 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index]; 717 718 if (WARN_ON(vif_index >= MAXVIFS)) 719 return; 720 if (WARN_ON(!mr_vif->dev)) 721 return; 722 mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif); 723 } 724 725 static struct mlxsw_sp_mr_vif * 726 mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table, 727 const struct net_device *dev) 728 { 729 vifi_t vif_index; 730 731 for (vif_index = 0; vif_index < MAXVIFS; vif_index++) 732 if (mr_table->vifs[vif_index].dev == dev) 733 return &mr_table->vifs[vif_index]; 734 return NULL; 735 } 736 737 int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table, 738 const struct mlxsw_sp_rif *rif) 739 { 740 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif); 741 struct mlxsw_sp_mr_vif *mr_vif; 742 743 if (!rif_dev) 744 return 0; 745 746 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev); 747 if (!mr_vif) 748 return 0; 749 return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif, 750 mr_vif->vif_flags, rif); 751 } 752 753 void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table, 754 const struct mlxsw_sp_rif *rif) 755 { 756 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif); 757 struct mlxsw_sp_mr_vif *mr_vif; 758 759 if (!rif_dev) 760 return; 761 762 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev); 763 if (!mr_vif) 764 return; 765 mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif); 766 } 767 768 void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table, 769 const struct mlxsw_sp_rif *rif, int mtu) 770 { 771 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif); 772 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 773 struct mlxsw_sp_mr_route_vif_entry *rve; 774 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 775 struct mlxsw_sp_mr_vif *mr_vif; 776 777 if (!rif_dev) 778 return; 779 780 /* Search for a VIF that use that RIF */ 781 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev); 782 if (!mr_vif) 783 return; 784 785 /* Update all the routes that uses that VIF as eVIF */ 786 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) { 787 if (mtu < rve->mr_route->min_mtu) { 788 rve->mr_route->min_mtu = mtu; 789 mr->mr_ops->route_min_mtu_update(mlxsw_sp, 790 rve->mr_route->route_priv, 791 mtu); 792 } 793 } 794 } 795 796 /* Protocol specific functions */ 797 static bool 798 mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table, 799 const struct mr_mfc *c) 800 { 801 struct mfc_cache *mfc = (struct mfc_cache *) c; 802 803 /* If the route is a (*,*) route, abort, as these kind of routes are 804 * used for proxy routes. 805 */ 806 if (mfc->mfc_origin == htonl(INADDR_ANY) && 807 mfc->mfc_mcastgrp == htonl(INADDR_ANY)) { 808 dev_warn(mr_table->mlxsw_sp->bus_info->dev, 809 "Offloading proxy routes is not supported.\n"); 810 return false; 811 } 812 return true; 813 } 814 815 static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table, 816 struct mlxsw_sp_mr_route_key *key, 817 struct mr_mfc *c) 818 { 819 const struct mfc_cache *mfc = (struct mfc_cache *) c; 820 bool starg; 821 822 starg = (mfc->mfc_origin == htonl(INADDR_ANY)); 823 824 memset(key, 0, sizeof(*key)); 825 key->vrid = mr_table->vr_id; 826 key->proto = MLXSW_SP_L3_PROTO_IPV4; 827 key->group.addr4 = mfc->mfc_mcastgrp; 828 key->group_mask.addr4 = htonl(0xffffffff); 829 key->source.addr4 = mfc->mfc_origin; 830 key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff); 831 } 832 833 static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table, 834 const struct mlxsw_sp_mr_route *mr_route) 835 { 836 return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY); 837 } 838 839 static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif) 840 { 841 return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER)); 842 } 843 844 static bool 845 mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table, 846 const struct mr_mfc *c) 847 { 848 struct mfc6_cache *mfc = (struct mfc6_cache *) c; 849 850 /* If the route is a (*,*) route, abort, as these kind of routes are 851 * used for proxy routes. 852 */ 853 if (ipv6_addr_any(&mfc->mf6c_origin) && 854 ipv6_addr_any(&mfc->mf6c_mcastgrp)) { 855 dev_warn(mr_table->mlxsw_sp->bus_info->dev, 856 "Offloading proxy routes is not supported.\n"); 857 return false; 858 } 859 return true; 860 } 861 862 static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table, 863 struct mlxsw_sp_mr_route_key *key, 864 struct mr_mfc *c) 865 { 866 const struct mfc6_cache *mfc = (struct mfc6_cache *) c; 867 868 memset(key, 0, sizeof(*key)); 869 key->vrid = mr_table->vr_id; 870 key->proto = MLXSW_SP_L3_PROTO_IPV6; 871 key->group.addr6 = mfc->mf6c_mcastgrp; 872 memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6)); 873 key->source.addr6 = mfc->mf6c_origin; 874 if (!ipv6_addr_any(&mfc->mf6c_origin)) 875 memset(&key->source_mask.addr6, 0xff, 876 sizeof(key->source_mask.addr6)); 877 } 878 879 static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table, 880 const struct mlxsw_sp_mr_route *mr_route) 881 { 882 return ipv6_addr_any(&mr_route->key.source_mask.addr6); 883 } 884 885 static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif) 886 { 887 return !(vif->vif_flags & MIFF_REGISTER); 888 } 889 890 static struct 891 mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = { 892 { 893 .is_regular = mlxsw_sp_mr_vif4_is_regular, 894 }, 895 { 896 .is_regular = mlxsw_sp_mr_vif6_is_regular, 897 }, 898 }; 899 900 static struct 901 mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = { 902 { 903 .is_route_valid = mlxsw_sp_mr_route4_validate, 904 .key_create = mlxsw_sp_mr_route4_key, 905 .is_route_starg = mlxsw_sp_mr_route4_starg, 906 }, 907 { 908 .is_route_valid = mlxsw_sp_mr_route6_validate, 909 .key_create = mlxsw_sp_mr_route6_key, 910 .is_route_starg = mlxsw_sp_mr_route6_starg, 911 }, 912 913 }; 914 915 struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, 916 u32 vr_id, 917 enum mlxsw_sp_l3proto proto) 918 { 919 struct mlxsw_sp_mr_route_params catchall_route_params = { 920 .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, 921 .key = { 922 .vrid = vr_id, 923 .proto = proto, 924 }, 925 .value = { 926 .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP, 927 } 928 }; 929 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 930 struct mlxsw_sp_mr_table *mr_table; 931 int err; 932 int i; 933 934 mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size, 935 GFP_KERNEL); 936 if (!mr_table) 937 return ERR_PTR(-ENOMEM); 938 939 mr_table->vr_id = vr_id; 940 mr_table->mlxsw_sp = mlxsw_sp; 941 mr_table->proto = proto; 942 mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto]; 943 INIT_LIST_HEAD(&mr_table->route_list); 944 945 err = rhashtable_init(&mr_table->route_ht, 946 &mlxsw_sp_mr_route_ht_params); 947 if (err) 948 goto err_route_rhashtable_init; 949 950 for (i = 0; i < MAXVIFS; i++) { 951 INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list); 952 INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list); 953 mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto]; 954 } 955 956 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv, 957 mr_table->catchall_route_priv, 958 &catchall_route_params); 959 if (err) 960 goto err_ops_route_create; 961 list_add_tail(&mr_table->node, &mr->table_list); 962 return mr_table; 963 964 err_ops_route_create: 965 rhashtable_destroy(&mr_table->route_ht); 966 err_route_rhashtable_init: 967 kfree(mr_table); 968 return ERR_PTR(err); 969 } 970 971 void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table) 972 { 973 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; 974 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 975 976 WARN_ON(!mlxsw_sp_mr_table_empty(mr_table)); 977 list_del(&mr_table->node); 978 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, 979 &mr_table->catchall_route_priv); 980 rhashtable_destroy(&mr_table->route_ht); 981 kfree(mr_table); 982 } 983 984 void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table) 985 { 986 struct mlxsw_sp_mr_route *mr_route, *tmp; 987 int i; 988 989 list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node) 990 __mlxsw_sp_mr_route_del(mr_table, mr_route); 991 992 for (i = 0; i < MAXVIFS; i++) { 993 mr_table->vifs[i].dev = NULL; 994 mr_table->vifs[i].rif = NULL; 995 } 996 } 997 998 bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table) 999 { 1000 int i; 1001 1002 for (i = 0; i < MAXVIFS; i++) 1003 if (mr_table->vifs[i].dev) 1004 return false; 1005 return list_empty(&mr_table->route_list); 1006 } 1007 1008 static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp, 1009 struct mlxsw_sp_mr_route *mr_route) 1010 { 1011 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 1012 u64 packets, bytes; 1013 1014 if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP) 1015 return; 1016 1017 mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets, 1018 &bytes); 1019 1020 if (mr_route->mfc->mfc_un.res.pkt != packets) 1021 mr_route->mfc->mfc_un.res.lastuse = jiffies; 1022 mr_route->mfc->mfc_un.res.pkt = packets; 1023 mr_route->mfc->mfc_un.res.bytes = bytes; 1024 } 1025 1026 static void mlxsw_sp_mr_stats_update(struct work_struct *work) 1027 { 1028 struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr, 1029 stats_update_dw.work); 1030 struct mlxsw_sp_mr_table *mr_table; 1031 struct mlxsw_sp_mr_route *mr_route; 1032 unsigned long interval; 1033 1034 rtnl_lock(); 1035 list_for_each_entry(mr_table, &mr->table_list, node) 1036 list_for_each_entry(mr_route, &mr_table->route_list, node) 1037 mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp, 1038 mr_route); 1039 rtnl_unlock(); 1040 1041 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL); 1042 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval); 1043 } 1044 1045 int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp, 1046 const struct mlxsw_sp_mr_ops *mr_ops) 1047 { 1048 struct mlxsw_sp_mr *mr; 1049 unsigned long interval; 1050 int err; 1051 1052 mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL); 1053 if (!mr) 1054 return -ENOMEM; 1055 mr->mr_ops = mr_ops; 1056 mlxsw_sp->mr = mr; 1057 INIT_LIST_HEAD(&mr->table_list); 1058 1059 err = mr_ops->init(mlxsw_sp, mr->priv); 1060 if (err) 1061 goto err; 1062 1063 /* Create the delayed work for counter updates */ 1064 INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update); 1065 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL); 1066 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval); 1067 return 0; 1068 err: 1069 kfree(mr); 1070 return err; 1071 } 1072 1073 void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp) 1074 { 1075 struct mlxsw_sp_mr *mr = mlxsw_sp->mr; 1076 1077 cancel_delayed_work_sync(&mr->stats_update_dw); 1078 mr->mr_ops->fini(mr->priv); 1079 kfree(mr); 1080 } 1081