1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c 3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the names of the copyright holders nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * Alternatively, this software may be distributed under the terms of the 19 * GNU General Public License ("GPL") version 2 as published by the Free 20 * Software Foundation. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/slab.h> 37 #include <linux/errno.h> 38 #include <linux/list.h> 39 #include <linux/string.h> 40 #include <linux/rhashtable.h> 41 #include <linux/netdevice.h> 42 #include <net/tc_act/tc_vlan.h> 43 44 #include "reg.h" 45 #include "core.h" 46 #include "resources.h" 47 #include "spectrum.h" 48 #include "core_acl_flex_keys.h" 49 #include "core_acl_flex_actions.h" 50 #include "spectrum_acl_flex_keys.h" 51 52 struct mlxsw_sp_acl { 53 struct mlxsw_sp *mlxsw_sp; 54 struct mlxsw_afk *afk; 55 struct mlxsw_sp_fid *dummy_fid; 56 const struct mlxsw_sp_acl_ops *ops; 57 struct rhashtable ruleset_ht; 58 struct list_head rules; 59 struct { 60 struct delayed_work dw; 61 unsigned long interval; /* ms */ 62 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000 63 } rule_activity_update; 64 unsigned long priv[0]; 65 /* priv has to be always the last item */ 66 }; 67 68 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) 69 { 70 return acl->afk; 71 } 72 73 struct mlxsw_sp_acl_ruleset_ht_key { 74 struct net_device *dev; /* dev this ruleset is bound to */ 75 bool ingress; 76 u32 chain_index; 77 const struct mlxsw_sp_acl_profile_ops *ops; 78 }; 79 80 struct mlxsw_sp_acl_ruleset { 81 struct rhash_head ht_node; /* Member of acl HT */ 82 struct mlxsw_sp_acl_ruleset_ht_key ht_key; 83 struct rhashtable rule_ht; 84 unsigned int ref_count; 85 unsigned long priv[0]; 86 /* priv has to be always the last item */ 87 }; 88 89 struct mlxsw_sp_acl_rule { 90 struct rhash_head ht_node; /* Member of rule HT */ 91 struct list_head list; 92 unsigned long cookie; /* HT key */ 93 struct mlxsw_sp_acl_ruleset *ruleset; 94 struct mlxsw_sp_acl_rule_info *rulei; 95 u64 last_used; 96 u64 last_packets; 97 u64 last_bytes; 98 unsigned long priv[0]; 99 /* priv has to be always the last item */ 100 }; 101 102 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = { 103 .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key), 104 .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key), 105 .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node), 106 .automatic_shrinking = true, 107 }; 108 109 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = { 110 .key_len = sizeof(unsigned long), 111 .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie), 112 .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node), 113 .automatic_shrinking = true, 114 }; 115 116 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp) 117 { 118 return mlxsw_sp->acl->dummy_fid; 119 } 120 121 static struct mlxsw_sp_acl_ruleset * 122 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, 123 const struct mlxsw_sp_acl_profile_ops *ops) 124 { 125 struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 126 struct mlxsw_sp_acl_ruleset *ruleset; 127 size_t alloc_size; 128 int err; 129 130 alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size; 131 ruleset = kzalloc(alloc_size, GFP_KERNEL); 132 if (!ruleset) 133 return ERR_PTR(-ENOMEM); 134 ruleset->ref_count = 1; 135 ruleset->ht_key.ops = ops; 136 137 err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params); 138 if (err) 139 goto err_rhashtable_init; 140 141 err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv); 142 if (err) 143 goto err_ops_ruleset_add; 144 145 return ruleset; 146 147 err_ops_ruleset_add: 148 rhashtable_destroy(&ruleset->rule_ht); 149 err_rhashtable_init: 150 kfree(ruleset); 151 return ERR_PTR(err); 152 } 153 154 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, 155 struct mlxsw_sp_acl_ruleset *ruleset) 156 { 157 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 158 159 ops->ruleset_del(mlxsw_sp, ruleset->priv); 160 rhashtable_destroy(&ruleset->rule_ht); 161 kfree(ruleset); 162 } 163 164 static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, 165 struct mlxsw_sp_acl_ruleset *ruleset, 166 struct net_device *dev, bool ingress, 167 u32 chain_index) 168 { 169 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 170 struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 171 int err; 172 173 ruleset->ht_key.dev = dev; 174 ruleset->ht_key.ingress = ingress; 175 ruleset->ht_key.chain_index = chain_index; 176 err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, 177 mlxsw_sp_acl_ruleset_ht_params); 178 if (err) 179 return err; 180 if (!ruleset->ht_key.chain_index) { 181 /* We only need ruleset with chain index 0, the implicit one, 182 * to be directly bound to device. The rest of the rulesets 183 * are bound by "Goto action set". 184 */ 185 err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress); 186 if (err) 187 goto err_ops_ruleset_bind; 188 } 189 return 0; 190 191 err_ops_ruleset_bind: 192 rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, 193 mlxsw_sp_acl_ruleset_ht_params); 194 return err; 195 } 196 197 static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, 198 struct mlxsw_sp_acl_ruleset *ruleset) 199 { 200 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 201 struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 202 203 if (!ruleset->ht_key.chain_index) 204 ops->ruleset_unbind(mlxsw_sp, ruleset->priv); 205 rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, 206 mlxsw_sp_acl_ruleset_ht_params); 207 } 208 209 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset) 210 { 211 ruleset->ref_count++; 212 } 213 214 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, 215 struct mlxsw_sp_acl_ruleset *ruleset) 216 { 217 if (--ruleset->ref_count) 218 return; 219 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset); 220 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); 221 } 222 223 static struct mlxsw_sp_acl_ruleset * 224 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev, 225 bool ingress, u32 chain_index, 226 const struct mlxsw_sp_acl_profile_ops *ops) 227 { 228 struct mlxsw_sp_acl_ruleset_ht_key ht_key; 229 230 memset(&ht_key, 0, sizeof(ht_key)); 231 ht_key.dev = dev; 232 ht_key.ingress = ingress; 233 ht_key.chain_index = chain_index; 234 ht_key.ops = ops; 235 return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, 236 mlxsw_sp_acl_ruleset_ht_params); 237 } 238 239 struct mlxsw_sp_acl_ruleset * 240 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, 241 bool ingress, u32 chain_index, 242 enum mlxsw_sp_acl_profile profile) 243 { 244 const struct mlxsw_sp_acl_profile_ops *ops; 245 struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 246 struct mlxsw_sp_acl_ruleset *ruleset; 247 248 ops = acl->ops->profile_ops(mlxsw_sp, profile); 249 if (!ops) 250 return ERR_PTR(-EINVAL); 251 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress, 252 chain_index, ops); 253 if (!ruleset) 254 return ERR_PTR(-ENOENT); 255 return ruleset; 256 } 257 258 struct mlxsw_sp_acl_ruleset * 259 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, 260 bool ingress, u32 chain_index, 261 enum mlxsw_sp_acl_profile profile) 262 { 263 const struct mlxsw_sp_acl_profile_ops *ops; 264 struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 265 struct mlxsw_sp_acl_ruleset *ruleset; 266 int err; 267 268 ops = acl->ops->profile_ops(mlxsw_sp, profile); 269 if (!ops) 270 return ERR_PTR(-EINVAL); 271 272 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress, 273 chain_index, ops); 274 if (ruleset) { 275 mlxsw_sp_acl_ruleset_ref_inc(ruleset); 276 return ruleset; 277 } 278 ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops); 279 if (IS_ERR(ruleset)) 280 return ruleset; 281 err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, 282 ingress, chain_index); 283 if (err) 284 goto err_ruleset_bind; 285 return ruleset; 286 287 err_ruleset_bind: 288 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); 289 return ERR_PTR(err); 290 } 291 292 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, 293 struct mlxsw_sp_acl_ruleset *ruleset) 294 { 295 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); 296 } 297 298 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset) 299 { 300 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 301 302 return ops->ruleset_group_id(ruleset->priv); 303 } 304 305 static int 306 mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp, 307 struct mlxsw_sp_acl_rule_info *rulei) 308 { 309 int err; 310 311 err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index); 312 if (err) 313 return err; 314 rulei->counter_valid = true; 315 return 0; 316 } 317 318 static void 319 mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp, 320 struct mlxsw_sp_acl_rule_info *rulei) 321 { 322 rulei->counter_valid = false; 323 mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index); 324 } 325 326 struct mlxsw_sp_acl_rule_info * 327 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) 328 { 329 struct mlxsw_sp_acl_rule_info *rulei; 330 int err; 331 332 rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); 333 if (!rulei) 334 return NULL; 335 rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa); 336 if (IS_ERR(rulei->act_block)) { 337 err = PTR_ERR(rulei->act_block); 338 goto err_afa_block_create; 339 } 340 return rulei; 341 342 err_afa_block_create: 343 kfree(rulei); 344 return ERR_PTR(err); 345 } 346 347 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei) 348 { 349 mlxsw_afa_block_destroy(rulei->act_block); 350 kfree(rulei); 351 } 352 353 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei) 354 { 355 return mlxsw_afa_block_commit(rulei->act_block); 356 } 357 358 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, 359 unsigned int priority) 360 { 361 rulei->priority = priority; 362 } 363 364 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, 365 enum mlxsw_afk_element element, 366 u32 key_value, u32 mask_value) 367 { 368 mlxsw_afk_values_add_u32(&rulei->values, element, 369 key_value, mask_value); 370 } 371 372 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, 373 enum mlxsw_afk_element element, 374 const char *key_value, 375 const char *mask_value, unsigned int len) 376 { 377 mlxsw_afk_values_add_buf(&rulei->values, element, 378 key_value, mask_value, len); 379 } 380 381 int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei) 382 { 383 return mlxsw_afa_block_continue(rulei->act_block); 384 } 385 386 int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, 387 u16 group_id) 388 { 389 return mlxsw_afa_block_jump(rulei->act_block, group_id); 390 } 391 392 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) 393 { 394 return mlxsw_afa_block_append_drop(rulei->act_block); 395 } 396 397 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei) 398 { 399 return mlxsw_afa_block_append_trap(rulei->act_block, 400 MLXSW_TRAP_ID_ACL0); 401 } 402 403 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, 404 struct mlxsw_sp_acl_rule_info *rulei, 405 struct net_device *out_dev) 406 { 407 struct mlxsw_sp_port *mlxsw_sp_port; 408 u8 local_port; 409 bool in_port; 410 411 if (out_dev) { 412 if (!mlxsw_sp_port_dev_check(out_dev)) 413 return -EINVAL; 414 mlxsw_sp_port = netdev_priv(out_dev); 415 if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp) 416 return -EINVAL; 417 local_port = mlxsw_sp_port->local_port; 418 in_port = false; 419 } else { 420 /* If out_dev is NULL, the caller wants to 421 * set forward to ingress port. 422 */ 423 local_port = 0; 424 in_port = true; 425 } 426 return mlxsw_afa_block_append_fwd(rulei->act_block, 427 local_port, in_port); 428 } 429 430 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, 431 struct mlxsw_sp_acl_rule_info *rulei, 432 u32 action, u16 vid, u16 proto, u8 prio) 433 { 434 u8 ethertype; 435 436 if (action == TCA_VLAN_ACT_MODIFY) { 437 switch (proto) { 438 case ETH_P_8021Q: 439 ethertype = 0; 440 break; 441 case ETH_P_8021AD: 442 ethertype = 1; 443 break; 444 default: 445 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n", 446 proto); 447 return -EINVAL; 448 } 449 450 return mlxsw_afa_block_append_vlan_modify(rulei->act_block, 451 vid, prio, ethertype); 452 } else { 453 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n"); 454 return -EINVAL; 455 } 456 } 457 458 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, 459 struct mlxsw_sp_acl_rule_info *rulei) 460 { 461 return mlxsw_afa_block_append_counter(rulei->act_block, 462 rulei->counter_index); 463 } 464 465 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, 466 struct mlxsw_sp_acl_rule_info *rulei, 467 u16 fid) 468 { 469 return mlxsw_afa_block_append_fid_set(rulei->act_block, fid); 470 } 471 472 struct mlxsw_sp_acl_rule * 473 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, 474 struct mlxsw_sp_acl_ruleset *ruleset, 475 unsigned long cookie) 476 { 477 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 478 struct mlxsw_sp_acl_rule *rule; 479 int err; 480 481 mlxsw_sp_acl_ruleset_ref_inc(ruleset); 482 rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL); 483 if (!rule) { 484 err = -ENOMEM; 485 goto err_alloc; 486 } 487 rule->cookie = cookie; 488 rule->ruleset = ruleset; 489 490 rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); 491 if (IS_ERR(rule->rulei)) { 492 err = PTR_ERR(rule->rulei); 493 goto err_rulei_create; 494 } 495 496 err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei); 497 if (err) 498 goto err_counter_alloc; 499 return rule; 500 501 err_counter_alloc: 502 mlxsw_sp_acl_rulei_destroy(rule->rulei); 503 err_rulei_create: 504 kfree(rule); 505 err_alloc: 506 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); 507 return ERR_PTR(err); 508 } 509 510 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, 511 struct mlxsw_sp_acl_rule *rule) 512 { 513 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; 514 515 mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei); 516 mlxsw_sp_acl_rulei_destroy(rule->rulei); 517 kfree(rule); 518 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); 519 } 520 521 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, 522 struct mlxsw_sp_acl_rule *rule) 523 { 524 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; 525 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 526 int err; 527 528 err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei); 529 if (err) 530 return err; 531 532 err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node, 533 mlxsw_sp_acl_rule_ht_params); 534 if (err) 535 goto err_rhashtable_insert; 536 537 list_add_tail(&rule->list, &mlxsw_sp->acl->rules); 538 return 0; 539 540 err_rhashtable_insert: 541 ops->rule_del(mlxsw_sp, rule->priv); 542 return err; 543 } 544 545 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, 546 struct mlxsw_sp_acl_rule *rule) 547 { 548 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; 549 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 550 551 list_del(&rule->list); 552 rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, 553 mlxsw_sp_acl_rule_ht_params); 554 ops->rule_del(mlxsw_sp, rule->priv); 555 } 556 557 struct mlxsw_sp_acl_rule * 558 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, 559 struct mlxsw_sp_acl_ruleset *ruleset, 560 unsigned long cookie) 561 { 562 return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie, 563 mlxsw_sp_acl_rule_ht_params); 564 } 565 566 struct mlxsw_sp_acl_rule_info * 567 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule) 568 { 569 return rule->rulei; 570 } 571 572 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp, 573 struct mlxsw_sp_acl_rule *rule) 574 { 575 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; 576 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; 577 bool active; 578 int err; 579 580 err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active); 581 if (err) 582 return err; 583 if (active) 584 rule->last_used = jiffies; 585 return 0; 586 } 587 588 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl) 589 { 590 struct mlxsw_sp_acl_rule *rule; 591 int err; 592 593 /* Protect internal structures from changes */ 594 rtnl_lock(); 595 list_for_each_entry(rule, &acl->rules, list) { 596 err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp, 597 rule); 598 if (err) 599 goto err_rule_update; 600 } 601 rtnl_unlock(); 602 return 0; 603 604 err_rule_update: 605 rtnl_unlock(); 606 return err; 607 } 608 609 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl) 610 { 611 unsigned long interval = acl->rule_activity_update.interval; 612 613 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 614 msecs_to_jiffies(interval)); 615 } 616 617 static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work) 618 { 619 struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl, 620 rule_activity_update.dw.work); 621 int err; 622 623 err = mlxsw_sp_acl_rules_activity_update(acl); 624 if (err) 625 dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity"); 626 627 mlxsw_sp_acl_rule_activity_work_schedule(acl); 628 } 629 630 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, 631 struct mlxsw_sp_acl_rule *rule, 632 u64 *packets, u64 *bytes, u64 *last_use) 633 634 { 635 struct mlxsw_sp_acl_rule_info *rulei; 636 u64 current_packets; 637 u64 current_bytes; 638 int err; 639 640 rulei = mlxsw_sp_acl_rule_rulei(rule); 641 err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index, 642 ¤t_packets, ¤t_bytes); 643 if (err) 644 return err; 645 646 *packets = current_packets - rule->last_packets; 647 *bytes = current_bytes - rule->last_bytes; 648 *last_use = rule->last_used; 649 650 rule->last_bytes = current_bytes; 651 rule->last_packets = current_packets; 652 653 return 0; 654 } 655 656 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) 657 { 658 const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; 659 struct mlxsw_sp_fid *fid; 660 struct mlxsw_sp_acl *acl; 661 int err; 662 663 acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL); 664 if (!acl) 665 return -ENOMEM; 666 mlxsw_sp->acl = acl; 667 acl->mlxsw_sp = mlxsw_sp; 668 acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, 669 ACL_FLEX_KEYS), 670 mlxsw_sp_afk_blocks, 671 MLXSW_SP_AFK_BLOCKS_COUNT); 672 if (!acl->afk) { 673 err = -ENOMEM; 674 goto err_afk_create; 675 } 676 677 err = rhashtable_init(&acl->ruleset_ht, 678 &mlxsw_sp_acl_ruleset_ht_params); 679 if (err) 680 goto err_rhashtable_init; 681 682 fid = mlxsw_sp_fid_dummy_get(mlxsw_sp); 683 if (IS_ERR(fid)) { 684 err = PTR_ERR(fid); 685 goto err_fid_get; 686 } 687 acl->dummy_fid = fid; 688 689 INIT_LIST_HEAD(&acl->rules); 690 err = acl_ops->init(mlxsw_sp, acl->priv); 691 if (err) 692 goto err_acl_ops_init; 693 694 acl->ops = acl_ops; 695 696 /* Create the delayed work for the rule activity_update */ 697 INIT_DELAYED_WORK(&acl->rule_activity_update.dw, 698 mlxsw_sp_acl_rul_activity_update_work); 699 acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS; 700 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0); 701 return 0; 702 703 err_acl_ops_init: 704 mlxsw_sp_fid_put(fid); 705 err_fid_get: 706 rhashtable_destroy(&acl->ruleset_ht); 707 err_rhashtable_init: 708 mlxsw_afk_destroy(acl->afk); 709 err_afk_create: 710 kfree(acl); 711 return err; 712 } 713 714 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) 715 { 716 struct mlxsw_sp_acl *acl = mlxsw_sp->acl; 717 const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; 718 719 cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); 720 acl_ops->fini(mlxsw_sp, acl->priv); 721 WARN_ON(!list_empty(&acl->rules)); 722 mlxsw_sp_fid_put(acl->dummy_fid); 723 rhashtable_destroy(&acl->ruleset_ht); 724 mlxsw_afk_destroy(acl->afk); 725 kfree(acl); 726 } 727