1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c 3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the names of the copyright holders nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * Alternatively, this software may be distributed under the terms of the 19 * GNU General Public License ("GPL") version 2 as published by the Free 20 * Software Foundation. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/slab.h> 37 #include <linux/errno.h> 38 #include <linux/bitops.h> 39 #include <linux/list.h> 40 #include <linux/rhashtable.h> 41 #include <linux/netdevice.h> 42 #include <linux/parman.h> 43 44 #include "reg.h" 45 #include "core.h" 46 #include "resources.h" 47 #include "spectrum.h" 48 #include "core_acl_flex_keys.h" 49 50 struct mlxsw_sp_acl_tcam { 51 unsigned long *used_regions; /* bit array */ 52 unsigned int max_regions; 53 unsigned long *used_groups; /* bit array */ 54 unsigned int max_groups; 55 unsigned int max_group_size; 56 }; 57 58 static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) 59 { 60 struct mlxsw_sp_acl_tcam *tcam = priv; 61 u64 max_tcam_regions; 62 u64 max_regions; 63 u64 max_groups; 64 size_t alloc_size; 65 int err; 66 67 max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, 68 ACL_MAX_TCAM_REGIONS); 69 max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS); 70 71 /* Use 1:1 mapping between ACL region and TCAM region */ 72 if (max_tcam_regions < max_regions) 73 max_regions = max_tcam_regions; 74 75 alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions); 76 tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL); 77 if (!tcam->used_regions) 78 return -ENOMEM; 79 tcam->max_regions = max_regions; 80 81 max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS); 82 alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups); 83 tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL); 84 if (!tcam->used_groups) { 85 err = -ENOMEM; 86 goto err_alloc_used_groups; 87 } 88 tcam->max_groups = max_groups; 89 tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, 90 ACL_MAX_GROUP_SIZE); 91 return 0; 92 93 err_alloc_used_groups: 94 kfree(tcam->used_regions); 95 return err; 96 } 97 98 static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) 99 { 100 struct mlxsw_sp_acl_tcam *tcam = priv; 101 102 kfree(tcam->used_groups); 103 kfree(tcam->used_regions); 104 } 105 106 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam, 107 u16 *p_id) 108 { 109 u16 id; 110 111 id = find_first_zero_bit(tcam->used_regions, tcam->max_regions); 112 if (id < tcam->max_regions) { 113 __set_bit(id, tcam->used_regions); 114 *p_id = id; 115 return 0; 116 } 117 return -ENOBUFS; 118 } 119 120 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam, 121 u16 id) 122 { 123 __clear_bit(id, tcam->used_regions); 124 } 125 126 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam, 127 u16 *p_id) 128 { 129 u16 id; 130 131 id = find_first_zero_bit(tcam->used_groups, tcam->max_groups); 132 if (id < tcam->max_groups) { 133 __set_bit(id, tcam->used_groups); 134 *p_id = id; 135 return 0; 136 } 137 return -ENOBUFS; 138 } 139 140 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam, 141 u16 id) 142 { 143 __clear_bit(id, tcam->used_groups); 144 } 145 146 struct mlxsw_sp_acl_tcam_pattern { 147 const enum mlxsw_afk_element *elements; 148 unsigned int elements_count; 149 }; 150 151 struct mlxsw_sp_acl_tcam_group { 152 struct mlxsw_sp_acl_tcam *tcam; 153 u16 id; 154 struct list_head region_list; 155 unsigned int region_count; 156 struct rhashtable chunk_ht; 157 struct { 158 u16 local_port; 159 bool ingress; 160 } bound; 161 struct mlxsw_sp_acl_tcam_group_ops *ops; 162 const struct mlxsw_sp_acl_tcam_pattern *patterns; 163 unsigned int patterns_count; 164 }; 165 166 struct mlxsw_sp_acl_tcam_region { 167 struct list_head list; /* Member of a TCAM group */ 168 struct list_head chunk_list; /* List of chunks under this region */ 169 struct parman *parman; 170 struct mlxsw_sp *mlxsw_sp; 171 struct mlxsw_sp_acl_tcam_group *group; 172 u16 id; /* ACL ID and region ID - they are same */ 173 char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; 174 struct mlxsw_afk_key_info *key_info; 175 struct { 176 struct parman_prio parman_prio; 177 struct parman_item parman_item; 178 struct mlxsw_sp_acl_rule_info *rulei; 179 } catchall; 180 }; 181 182 struct mlxsw_sp_acl_tcam_chunk { 183 struct list_head list; /* Member of a TCAM region */ 184 struct rhash_head ht_node; /* Member of a chunk HT */ 185 unsigned int priority; /* Priority within the region and group */ 186 struct parman_prio parman_prio; 187 struct mlxsw_sp_acl_tcam_group *group; 188 struct mlxsw_sp_acl_tcam_region *region; 189 unsigned int ref_count; 190 }; 191 192 struct mlxsw_sp_acl_tcam_entry { 193 struct parman_item parman_item; 194 struct mlxsw_sp_acl_tcam_chunk *chunk; 195 }; 196 197 static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = { 198 .key_len = sizeof(unsigned int), 199 .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority), 200 .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node), 201 .automatic_shrinking = true, 202 }; 203 204 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp, 205 struct mlxsw_sp_acl_tcam_group *group) 206 { 207 struct mlxsw_sp_acl_tcam_region *region; 208 char pagt_pl[MLXSW_REG_PAGT_LEN]; 209 int acl_index = 0; 210 211 mlxsw_reg_pagt_pack(pagt_pl, group->id); 212 list_for_each_entry(region, &group->region_list, list) 213 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id); 214 mlxsw_reg_pagt_size_set(pagt_pl, acl_index); 215 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl); 216 } 217 218 static int 219 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, 220 struct mlxsw_sp_acl_tcam *tcam, 221 struct mlxsw_sp_acl_tcam_group *group, 222 const struct mlxsw_sp_acl_tcam_pattern *patterns, 223 unsigned int patterns_count) 224 { 225 int err; 226 227 group->tcam = tcam; 228 group->patterns = patterns; 229 group->patterns_count = patterns_count; 230 INIT_LIST_HEAD(&group->region_list); 231 err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); 232 if (err) 233 return err; 234 235 err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); 236 if (err) 237 goto err_group_update; 238 239 err = rhashtable_init(&group->chunk_ht, 240 &mlxsw_sp_acl_tcam_chunk_ht_params); 241 if (err) 242 goto err_rhashtable_init; 243 244 return 0; 245 246 err_rhashtable_init: 247 err_group_update: 248 mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); 249 return err; 250 } 251 252 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp, 253 struct mlxsw_sp_acl_tcam_group *group) 254 { 255 struct mlxsw_sp_acl_tcam *tcam = group->tcam; 256 257 rhashtable_destroy(&group->chunk_ht); 258 mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); 259 WARN_ON(!list_empty(&group->region_list)); 260 } 261 262 static int 263 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp, 264 struct mlxsw_sp_acl_tcam_group *group, 265 struct net_device *dev, bool ingress) 266 { 267 struct mlxsw_sp_port *mlxsw_sp_port; 268 char ppbt_pl[MLXSW_REG_PPBT_LEN]; 269 270 if (!mlxsw_sp_port_dev_check(dev)) 271 return -EINVAL; 272 273 mlxsw_sp_port = netdev_priv(dev); 274 group->bound.local_port = mlxsw_sp_port->local_port; 275 group->bound.ingress = ingress; 276 mlxsw_reg_ppbt_pack(ppbt_pl, 277 group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : 278 MLXSW_REG_PXBT_E_EACL, 279 MLXSW_REG_PXBT_OP_BIND, group->bound.local_port, 280 group->id); 281 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); 282 } 283 284 static void 285 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp, 286 struct mlxsw_sp_acl_tcam_group *group) 287 { 288 char ppbt_pl[MLXSW_REG_PPBT_LEN]; 289 290 mlxsw_reg_ppbt_pack(ppbt_pl, 291 group->bound.ingress ? MLXSW_REG_PXBT_E_IACL : 292 MLXSW_REG_PXBT_E_EACL, 293 MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port, 294 group->id); 295 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); 296 } 297 298 static u16 299 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group) 300 { 301 return group->id; 302 } 303 304 static unsigned int 305 mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region) 306 { 307 struct mlxsw_sp_acl_tcam_chunk *chunk; 308 309 if (list_empty(®ion->chunk_list)) 310 return 0; 311 /* As a priority of a region, return priority of the first chunk */ 312 chunk = list_first_entry(®ion->chunk_list, typeof(*chunk), list); 313 return chunk->priority; 314 } 315 316 static unsigned int 317 mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region) 318 { 319 struct mlxsw_sp_acl_tcam_chunk *chunk; 320 321 if (list_empty(®ion->chunk_list)) 322 return 0; 323 chunk = list_last_entry(®ion->chunk_list, typeof(*chunk), list); 324 return chunk->priority; 325 } 326 327 static void 328 mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group, 329 struct mlxsw_sp_acl_tcam_region *region) 330 { 331 struct mlxsw_sp_acl_tcam_region *region2; 332 struct list_head *pos; 333 334 /* Position the region inside the list according to priority */ 335 list_for_each(pos, &group->region_list) { 336 region2 = list_entry(pos, typeof(*region2), list); 337 if (mlxsw_sp_acl_tcam_region_prio(region2) > 338 mlxsw_sp_acl_tcam_region_prio(region)) 339 break; 340 } 341 list_add_tail(®ion->list, pos); 342 group->region_count++; 343 } 344 345 static void 346 mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group, 347 struct mlxsw_sp_acl_tcam_region *region) 348 { 349 group->region_count--; 350 list_del(®ion->list); 351 } 352 353 static int 354 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp, 355 struct mlxsw_sp_acl_tcam_group *group, 356 struct mlxsw_sp_acl_tcam_region *region) 357 { 358 int err; 359 360 if (group->region_count == group->tcam->max_group_size) 361 return -ENOBUFS; 362 363 mlxsw_sp_acl_tcam_group_list_add(group, region); 364 365 err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); 366 if (err) 367 goto err_group_update; 368 region->group = group; 369 370 return 0; 371 372 err_group_update: 373 mlxsw_sp_acl_tcam_group_list_del(group, region); 374 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); 375 return err; 376 } 377 378 static void 379 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp, 380 struct mlxsw_sp_acl_tcam_region *region) 381 { 382 struct mlxsw_sp_acl_tcam_group *group = region->group; 383 384 mlxsw_sp_acl_tcam_group_list_del(group, region); 385 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); 386 } 387 388 static struct mlxsw_sp_acl_tcam_region * 389 mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group, 390 unsigned int priority, 391 struct mlxsw_afk_element_usage *elusage, 392 bool *p_need_split) 393 { 394 struct mlxsw_sp_acl_tcam_region *region, *region2; 395 struct list_head *pos; 396 bool issubset; 397 398 list_for_each(pos, &group->region_list) { 399 region = list_entry(pos, typeof(*region), list); 400 401 /* First, check if the requested priority does not rather belong 402 * under some of the next regions. 403 */ 404 if (pos->next != &group->region_list) { /* not last */ 405 region2 = list_entry(pos->next, typeof(*region2), list); 406 if (priority >= mlxsw_sp_acl_tcam_region_prio(region2)) 407 continue; 408 } 409 410 issubset = mlxsw_afk_key_info_subset(region->key_info, elusage); 411 412 /* If requested element usage would not fit and the priority 413 * is lower than the currently inspected region we cannot 414 * use this region, so return NULL to indicate new region has 415 * to be created. 416 */ 417 if (!issubset && 418 priority < mlxsw_sp_acl_tcam_region_prio(region)) 419 return NULL; 420 421 /* If requested element usage would not fit and the priority 422 * is higher than the currently inspected region we cannot 423 * use this region. There is still some hope that the next 424 * region would be the fit. So let it be processed and 425 * eventually break at the check right above this. 426 */ 427 if (!issubset && 428 priority > mlxsw_sp_acl_tcam_region_max_prio(region)) 429 continue; 430 431 /* Indicate if the region needs to be split in order to add 432 * the requested priority. Split is needed when requested 433 * element usage won't fit into the found region. 434 */ 435 *p_need_split = !issubset; 436 return region; 437 } 438 return NULL; /* New region has to be created. */ 439 } 440 441 static void 442 mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, 443 struct mlxsw_afk_element_usage *elusage, 444 struct mlxsw_afk_element_usage *out) 445 { 446 const struct mlxsw_sp_acl_tcam_pattern *pattern; 447 int i; 448 449 for (i = 0; i < group->patterns_count; i++) { 450 pattern = &group->patterns[i]; 451 mlxsw_afk_element_usage_fill(out, pattern->elements, 452 pattern->elements_count); 453 if (mlxsw_afk_element_usage_subset(elusage, out)) 454 return; 455 } 456 memcpy(out, elusage, sizeof(*out)); 457 } 458 459 #define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16 460 #define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16 461 462 static int 463 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp, 464 struct mlxsw_sp_acl_tcam_region *region) 465 { 466 struct mlxsw_afk_key_info *key_info = region->key_info; 467 char ptar_pl[MLXSW_REG_PTAR_LEN]; 468 unsigned int encodings_count; 469 int i; 470 int err; 471 472 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC, 473 MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, 474 region->id, region->tcam_region_info); 475 encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info); 476 for (i = 0; i < encodings_count; i++) { 477 u16 encoding; 478 479 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i); 480 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding); 481 } 482 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); 483 if (err) 484 return err; 485 mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info); 486 return 0; 487 } 488 489 static void 490 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp, 491 struct mlxsw_sp_acl_tcam_region *region) 492 { 493 char ptar_pl[MLXSW_REG_PTAR_LEN]; 494 495 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id, 496 region->tcam_region_info); 497 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); 498 } 499 500 static int 501 mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp, 502 struct mlxsw_sp_acl_tcam_region *region, 503 u16 new_size) 504 { 505 char ptar_pl[MLXSW_REG_PTAR_LEN]; 506 507 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE, 508 new_size, region->id, region->tcam_region_info); 509 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl); 510 } 511 512 static int 513 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp, 514 struct mlxsw_sp_acl_tcam_region *region) 515 { 516 char pacl_pl[MLXSW_REG_PACL_LEN]; 517 518 mlxsw_reg_pacl_pack(pacl_pl, region->id, true, 519 region->tcam_region_info); 520 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); 521 } 522 523 static void 524 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp, 525 struct mlxsw_sp_acl_tcam_region *region) 526 { 527 char pacl_pl[MLXSW_REG_PACL_LEN]; 528 529 mlxsw_reg_pacl_pack(pacl_pl, region->id, false, 530 region->tcam_region_info); 531 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl); 532 } 533 534 static int 535 mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, 536 struct mlxsw_sp_acl_tcam_region *region, 537 unsigned int offset, 538 struct mlxsw_sp_acl_rule_info *rulei) 539 { 540 char ptce2_pl[MLXSW_REG_PTCE2_LEN]; 541 char *act_set; 542 char *mask; 543 char *key; 544 545 mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE, 546 region->tcam_region_info, offset); 547 key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); 548 mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); 549 mlxsw_afk_encode(region->key_info, &rulei->values, key, mask); 550 551 /* Only the first action set belongs here, the rest is in KVD */ 552 act_set = mlxsw_afa_block_first_set(rulei->act_block); 553 mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); 554 555 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); 556 } 557 558 static void 559 mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, 560 struct mlxsw_sp_acl_tcam_region *region, 561 unsigned int offset) 562 { 563 char ptce2_pl[MLXSW_REG_PTCE2_LEN]; 564 565 mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE, 566 region->tcam_region_info, offset); 567 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); 568 } 569 570 static int 571 mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp, 572 struct mlxsw_sp_acl_tcam_region *region, 573 unsigned int offset, 574 bool *activity) 575 { 576 char ptce2_pl[MLXSW_REG_PTCE2_LEN]; 577 int err; 578 579 mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ, 580 region->tcam_region_info, offset); 581 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); 582 if (err) 583 return err; 584 *activity = mlxsw_reg_ptce2_a_get(ptce2_pl); 585 return 0; 586 } 587 588 #define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) 589 590 static int 591 mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, 592 struct mlxsw_sp_acl_tcam_region *region) 593 { 594 struct parman_prio *parman_prio = ®ion->catchall.parman_prio; 595 struct parman_item *parman_item = ®ion->catchall.parman_item; 596 struct mlxsw_sp_acl_rule_info *rulei; 597 int err; 598 599 parman_prio_init(region->parman, parman_prio, 600 MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); 601 err = parman_item_add(region->parman, parman_prio, parman_item); 602 if (err) 603 goto err_parman_item_add; 604 605 rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); 606 if (IS_ERR(rulei)) { 607 err = PTR_ERR(rulei); 608 goto err_rulei_create; 609 } 610 611 err = mlxsw_sp_acl_rulei_act_continue(rulei); 612 if (WARN_ON(err)) 613 goto err_rulei_act_continue; 614 615 err = mlxsw_sp_acl_rulei_commit(rulei); 616 if (err) 617 goto err_rulei_commit; 618 619 err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, 620 parman_item->index, rulei); 621 region->catchall.rulei = rulei; 622 if (err) 623 goto err_rule_insert; 624 625 return 0; 626 627 err_rule_insert: 628 err_rulei_commit: 629 err_rulei_act_continue: 630 mlxsw_sp_acl_rulei_destroy(rulei); 631 err_rulei_create: 632 parman_item_remove(region->parman, parman_prio, parman_item); 633 err_parman_item_add: 634 parman_prio_fini(parman_prio); 635 return err; 636 } 637 638 static void 639 mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, 640 struct mlxsw_sp_acl_tcam_region *region) 641 { 642 struct parman_prio *parman_prio = ®ion->catchall.parman_prio; 643 struct parman_item *parman_item = ®ion->catchall.parman_item; 644 struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei; 645 646 mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, 647 parman_item->index); 648 mlxsw_sp_acl_rulei_destroy(rulei); 649 parman_item_remove(region->parman, parman_prio, parman_item); 650 parman_prio_fini(parman_prio); 651 } 652 653 static void 654 mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp, 655 struct mlxsw_sp_acl_tcam_region *region, 656 u16 src_offset, u16 dst_offset, u16 size) 657 { 658 char prcr_pl[MLXSW_REG_PRCR_LEN]; 659 660 mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE, 661 region->tcam_region_info, src_offset, 662 region->tcam_region_info, dst_offset, size); 663 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl); 664 } 665 666 static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv, 667 unsigned long new_count) 668 { 669 struct mlxsw_sp_acl_tcam_region *region = priv; 670 struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; 671 u64 max_tcam_rules; 672 673 max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); 674 if (new_count > max_tcam_rules) 675 return -EINVAL; 676 return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count); 677 } 678 679 static void mlxsw_sp_acl_tcam_region_parman_move(void *priv, 680 unsigned long from_index, 681 unsigned long to_index, 682 unsigned long count) 683 { 684 struct mlxsw_sp_acl_tcam_region *region = priv; 685 struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp; 686 687 mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region, 688 from_index, to_index, count); 689 } 690 691 static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = { 692 .base_count = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT, 693 .resize_step = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP, 694 .resize = mlxsw_sp_acl_tcam_region_parman_resize, 695 .move = mlxsw_sp_acl_tcam_region_parman_move, 696 .algo = PARMAN_ALGO_TYPE_LSORT, 697 }; 698 699 static struct mlxsw_sp_acl_tcam_region * 700 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, 701 struct mlxsw_sp_acl_tcam *tcam, 702 struct mlxsw_afk_element_usage *elusage) 703 { 704 struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); 705 struct mlxsw_sp_acl_tcam_region *region; 706 int err; 707 708 region = kzalloc(sizeof(*region), GFP_KERNEL); 709 if (!region) 710 return ERR_PTR(-ENOMEM); 711 INIT_LIST_HEAD(®ion->chunk_list); 712 region->mlxsw_sp = mlxsw_sp; 713 714 region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops, 715 region); 716 if (!region->parman) { 717 err = -ENOMEM; 718 goto err_parman_create; 719 } 720 721 region->key_info = mlxsw_afk_key_info_get(afk, elusage); 722 if (IS_ERR(region->key_info)) { 723 err = PTR_ERR(region->key_info); 724 goto err_key_info_get; 725 } 726 727 err = mlxsw_sp_acl_tcam_region_id_get(tcam, ®ion->id); 728 if (err) 729 goto err_region_id_get; 730 731 err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region); 732 if (err) 733 goto err_tcam_region_alloc; 734 735 err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region); 736 if (err) 737 goto err_tcam_region_enable; 738 739 err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region); 740 if (err) 741 goto err_tcam_region_catchall_add; 742 743 return region; 744 745 err_tcam_region_catchall_add: 746 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); 747 err_tcam_region_enable: 748 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); 749 err_tcam_region_alloc: 750 mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); 751 err_region_id_get: 752 mlxsw_afk_key_info_put(region->key_info); 753 err_key_info_get: 754 parman_destroy(region->parman); 755 err_parman_create: 756 kfree(region); 757 return ERR_PTR(err); 758 } 759 760 static void 761 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, 762 struct mlxsw_sp_acl_tcam_region *region) 763 { 764 mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region); 765 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); 766 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); 767 mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id); 768 mlxsw_afk_key_info_put(region->key_info); 769 parman_destroy(region->parman); 770 kfree(region); 771 } 772 773 static int 774 mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp, 775 struct mlxsw_sp_acl_tcam_group *group, 776 unsigned int priority, 777 struct mlxsw_afk_element_usage *elusage, 778 struct mlxsw_sp_acl_tcam_chunk *chunk) 779 { 780 struct mlxsw_sp_acl_tcam_region *region; 781 bool region_created = false; 782 bool need_split; 783 int err; 784 785 region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage, 786 &need_split); 787 if (region && need_split) { 788 /* According to priority, the chunk should belong to an 789 * existing region. However, this chunk needs elements 790 * that region does not contain. We need to split the existing 791 * region into two and create a new region for this chunk 792 * in between. This is not supported now. 793 */ 794 return -EOPNOTSUPP; 795 } 796 if (!region) { 797 struct mlxsw_afk_element_usage region_elusage; 798 799 mlxsw_sp_acl_tcam_group_use_patterns(group, elusage, 800 ®ion_elusage); 801 region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam, 802 ®ion_elusage); 803 if (IS_ERR(region)) 804 return PTR_ERR(region); 805 region_created = true; 806 } 807 808 chunk->region = region; 809 list_add_tail(&chunk->list, ®ion->chunk_list); 810 811 if (!region_created) 812 return 0; 813 814 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region); 815 if (err) 816 goto err_group_region_attach; 817 818 return 0; 819 820 err_group_region_attach: 821 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); 822 return err; 823 } 824 825 static void 826 mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp, 827 struct mlxsw_sp_acl_tcam_chunk *chunk) 828 { 829 struct mlxsw_sp_acl_tcam_region *region = chunk->region; 830 831 list_del(&chunk->list); 832 if (list_empty(®ion->chunk_list)) { 833 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region); 834 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); 835 } 836 } 837 838 static struct mlxsw_sp_acl_tcam_chunk * 839 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, 840 struct mlxsw_sp_acl_tcam_group *group, 841 unsigned int priority, 842 struct mlxsw_afk_element_usage *elusage) 843 { 844 struct mlxsw_sp_acl_tcam_chunk *chunk; 845 int err; 846 847 if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) 848 return ERR_PTR(-EINVAL); 849 850 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); 851 if (!chunk) 852 return ERR_PTR(-ENOMEM); 853 chunk->priority = priority; 854 chunk->group = group; 855 chunk->ref_count = 1; 856 857 err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority, 858 elusage, chunk); 859 if (err) 860 goto err_chunk_assoc; 861 862 parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority); 863 864 err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node, 865 mlxsw_sp_acl_tcam_chunk_ht_params); 866 if (err) 867 goto err_rhashtable_insert; 868 869 return chunk; 870 871 err_rhashtable_insert: 872 parman_prio_fini(&chunk->parman_prio); 873 mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); 874 err_chunk_assoc: 875 kfree(chunk); 876 return ERR_PTR(err); 877 } 878 879 static void 880 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, 881 struct mlxsw_sp_acl_tcam_chunk *chunk) 882 { 883 struct mlxsw_sp_acl_tcam_group *group = chunk->group; 884 885 rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node, 886 mlxsw_sp_acl_tcam_chunk_ht_params); 887 parman_prio_fini(&chunk->parman_prio); 888 mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); 889 kfree(chunk); 890 } 891 892 static struct mlxsw_sp_acl_tcam_chunk * 893 mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp, 894 struct mlxsw_sp_acl_tcam_group *group, 895 unsigned int priority, 896 struct mlxsw_afk_element_usage *elusage) 897 { 898 struct mlxsw_sp_acl_tcam_chunk *chunk; 899 900 chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority, 901 mlxsw_sp_acl_tcam_chunk_ht_params); 902 if (chunk) { 903 if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info, 904 elusage))) 905 return ERR_PTR(-EINVAL); 906 chunk->ref_count++; 907 return chunk; 908 } 909 return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group, 910 priority, elusage); 911 } 912 913 static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp, 914 struct mlxsw_sp_acl_tcam_chunk *chunk) 915 { 916 if (--chunk->ref_count) 917 return; 918 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk); 919 } 920 921 static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, 922 struct mlxsw_sp_acl_tcam_group *group, 923 struct mlxsw_sp_acl_tcam_entry *entry, 924 struct mlxsw_sp_acl_rule_info *rulei) 925 { 926 struct mlxsw_sp_acl_tcam_chunk *chunk; 927 struct mlxsw_sp_acl_tcam_region *region; 928 int err; 929 930 chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority, 931 &rulei->values.elusage); 932 if (IS_ERR(chunk)) 933 return PTR_ERR(chunk); 934 935 region = chunk->region; 936 err = parman_item_add(region->parman, &chunk->parman_prio, 937 &entry->parman_item); 938 if (err) 939 goto err_parman_item_add; 940 941 err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region, 942 entry->parman_item.index, 943 rulei); 944 if (err) 945 goto err_rule_insert; 946 entry->chunk = chunk; 947 948 return 0; 949 950 err_rule_insert: 951 parman_item_remove(region->parman, &chunk->parman_prio, 952 &entry->parman_item); 953 err_parman_item_add: 954 mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); 955 return err; 956 } 957 958 static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, 959 struct mlxsw_sp_acl_tcam_entry *entry) 960 { 961 struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; 962 struct mlxsw_sp_acl_tcam_region *region = chunk->region; 963 964 mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region, 965 entry->parman_item.index); 966 parman_item_remove(region->parman, &chunk->parman_prio, 967 &entry->parman_item); 968 mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); 969 } 970 971 static int 972 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, 973 struct mlxsw_sp_acl_tcam_entry *entry, 974 bool *activity) 975 { 976 struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; 977 struct mlxsw_sp_acl_tcam_region *region = chunk->region; 978 979 return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region, 980 entry->parman_item.index, 981 activity); 982 } 983 984 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { 985 MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 986 MLXSW_AFK_ELEMENT_DMAC, 987 MLXSW_AFK_ELEMENT_SMAC, 988 MLXSW_AFK_ELEMENT_ETHERTYPE, 989 MLXSW_AFK_ELEMENT_IP_PROTO, 990 MLXSW_AFK_ELEMENT_SRC_IP4, 991 MLXSW_AFK_ELEMENT_DST_IP4, 992 MLXSW_AFK_ELEMENT_DST_L4_PORT, 993 MLXSW_AFK_ELEMENT_SRC_L4_PORT, 994 MLXSW_AFK_ELEMENT_VID, 995 MLXSW_AFK_ELEMENT_PCP, 996 MLXSW_AFK_ELEMENT_TCP_FLAGS, 997 MLXSW_AFK_ELEMENT_IP_TTL_, 998 MLXSW_AFK_ELEMENT_IP_ECN, 999 MLXSW_AFK_ELEMENT_IP_DSCP, 1000 }; 1001 1002 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { 1003 MLXSW_AFK_ELEMENT_ETHERTYPE, 1004 MLXSW_AFK_ELEMENT_IP_PROTO, 1005 MLXSW_AFK_ELEMENT_SRC_IP6_HI, 1006 MLXSW_AFK_ELEMENT_SRC_IP6_LO, 1007 MLXSW_AFK_ELEMENT_DST_IP6_HI, 1008 MLXSW_AFK_ELEMENT_DST_IP6_LO, 1009 MLXSW_AFK_ELEMENT_DST_L4_PORT, 1010 MLXSW_AFK_ELEMENT_SRC_L4_PORT, 1011 }; 1012 1013 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = { 1014 { 1015 .elements = mlxsw_sp_acl_tcam_pattern_ipv4, 1016 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4), 1017 }, 1018 { 1019 .elements = mlxsw_sp_acl_tcam_pattern_ipv6, 1020 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6), 1021 }, 1022 }; 1023 1024 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \ 1025 ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns) 1026 1027 struct mlxsw_sp_acl_tcam_flower_ruleset { 1028 struct mlxsw_sp_acl_tcam_group group; 1029 }; 1030 1031 struct mlxsw_sp_acl_tcam_flower_rule { 1032 struct mlxsw_sp_acl_tcam_entry entry; 1033 }; 1034 1035 static int 1036 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, 1037 void *priv, void *ruleset_priv) 1038 { 1039 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1040 struct mlxsw_sp_acl_tcam *tcam = priv; 1041 1042 return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, 1043 mlxsw_sp_acl_tcam_patterns, 1044 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT); 1045 } 1046 1047 static void 1048 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp, 1049 void *ruleset_priv) 1050 { 1051 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1052 1053 mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group); 1054 } 1055 1056 static int 1057 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp, 1058 void *ruleset_priv, 1059 struct net_device *dev, bool ingress) 1060 { 1061 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1062 1063 return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group, 1064 dev, ingress); 1065 } 1066 1067 static void 1068 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, 1069 void *ruleset_priv) 1070 { 1071 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1072 1073 mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group); 1074 } 1075 1076 static u16 1077 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv) 1078 { 1079 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1080 1081 return mlxsw_sp_acl_tcam_group_id(&ruleset->group); 1082 } 1083 1084 static int 1085 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, 1086 void *ruleset_priv, void *rule_priv, 1087 struct mlxsw_sp_acl_rule_info *rulei) 1088 { 1089 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; 1090 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; 1091 1092 return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group, 1093 &rule->entry, rulei); 1094 } 1095 1096 static void 1097 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) 1098 { 1099 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; 1100 1101 mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); 1102 } 1103 1104 static int 1105 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp, 1106 void *rule_priv, bool *activity) 1107 { 1108 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; 1109 1110 return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry, 1111 activity); 1112 } 1113 1114 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { 1115 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset), 1116 .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add, 1117 .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del, 1118 .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, 1119 .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, 1120 .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id, 1121 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), 1122 .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, 1123 .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, 1124 .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get, 1125 }; 1126 1127 static const struct mlxsw_sp_acl_profile_ops * 1128 mlxsw_sp_acl_tcam_profile_ops_arr[] = { 1129 [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops, 1130 }; 1131 1132 static const struct mlxsw_sp_acl_profile_ops * 1133 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, 1134 enum mlxsw_sp_acl_profile profile) 1135 { 1136 const struct mlxsw_sp_acl_profile_ops *ops; 1137 1138 if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr))) 1139 return NULL; 1140 ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile]; 1141 if (WARN_ON(!ops)) 1142 return NULL; 1143 return ops; 1144 } 1145 1146 const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = { 1147 .priv_size = sizeof(struct mlxsw_sp_acl_tcam), 1148 .init = mlxsw_sp_acl_tcam_init, 1149 .fini = mlxsw_sp_acl_tcam_fini, 1150 .profile_ops = mlxsw_sp_acl_tcam_profile_ops, 1151 }; 1152