1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Microchip Sparx5 Switch driver VCAP implementation 3 * 4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. 5 * 6 * The Sparx5 Chip Register Model can be browsed at this location: 7 * https://github.com/microchip-ung/sparx-5_reginfo 8 */ 9 10 #include "vcap_api_debugfs.h" 11 #include "sparx5_main_regs.h" 12 #include "sparx5_main.h" 13 #include "sparx5_vcap_impl.h" 14 #include "sparx5_vcap_ag_api.h" 15 #include "sparx5_vcap_debugfs.h" 16 17 #define SUPER_VCAP_BLK_SIZE 3072 /* addresses per Super VCAP block */ 18 #define STREAMSIZE (64 * 4) /* bytes in the VCAP cache area */ 19 20 #define SPARX5_IS2_LOOKUPS 4 21 #define VCAP_IS2_KEYSEL(_ena, _noneth, _v4_mc, _v4_uc, _v6_mc, _v6_uc, _arp) \ 22 (ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(_ena) | \ 23 ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_SET(_noneth) | \ 24 ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_SET(_v4_mc) | \ 25 ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_SET(_v4_uc) | \ 26 ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_SET(_v6_mc) | \ 27 ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(_v6_uc) | \ 28 ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(_arp)) 29 30 static struct sparx5_vcap_inst { 31 enum vcap_type vtype; /* type of vcap */ 32 int vinst; /* instance number within the same type */ 33 int lookups; /* number of lookups in this vcap type */ 34 int lookups_per_instance; /* number of lookups in this instance */ 35 int first_cid; /* first chain id in this vcap */ 36 int last_cid; /* last chain id in this vcap */ 37 int count; /* number of available addresses, not in super vcap */ 38 int map_id; /* id in the super vcap block mapping (if applicable) */ 39 int blockno; /* starting block in super vcap (if applicable) */ 40 int blocks; /* number of blocks in super vcap (if applicable) */ 41 } sparx5_vcap_inst_cfg[] = { 42 { 43 .vtype = VCAP_TYPE_IS2, /* IS2-0 */ 44 .vinst = 0, 45 .map_id = 4, 46 .lookups = SPARX5_IS2_LOOKUPS, 47 .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2, 48 .first_cid = SPARX5_VCAP_CID_IS2_L0, 49 .last_cid = SPARX5_VCAP_CID_IS2_L2 - 1, 50 .blockno = 0, /* Maps block 0-1 */ 51 .blocks = 2, 52 }, 53 { 54 .vtype = VCAP_TYPE_IS2, /* IS2-1 */ 55 .vinst = 1, 56 .map_id = 5, 57 .lookups = SPARX5_IS2_LOOKUPS, 58 .lookups_per_instance = SPARX5_IS2_LOOKUPS / 2, 59 .first_cid = SPARX5_VCAP_CID_IS2_L2, 60 .last_cid = SPARX5_VCAP_CID_IS2_MAX, 61 .blockno = 2, /* Maps block 2-3 */ 62 .blocks = 2, 63 }, 64 }; 65 66 /* Await the super VCAP completion of the current operation */ 67 static void sparx5_vcap_wait_super_update(struct sparx5 *sparx5) 68 { 69 u32 value; 70 71 read_poll_timeout(spx5_rd, value, 72 !VCAP_SUPER_CTRL_UPDATE_SHOT_GET(value), 500, 10000, 73 false, sparx5, VCAP_SUPER_CTRL); 74 } 75 76 /* Initializing a VCAP address range: only IS2 for now */ 77 static void _sparx5_vcap_range_init(struct sparx5 *sparx5, 78 struct vcap_admin *admin, 79 u32 addr, u32 count) 80 { 81 u32 size = count - 1; 82 83 spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) | 84 VCAP_SUPER_CFG_MV_SIZE_SET(size), 85 sparx5, VCAP_SUPER_CFG); 86 spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) | 87 VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) | 88 VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) | 89 VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) | 90 VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) | 91 VCAP_SUPER_CTRL_CLEAR_CACHE_SET(true) | 92 VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true), 93 sparx5, VCAP_SUPER_CTRL); 94 sparx5_vcap_wait_super_update(sparx5); 95 } 96 97 /* Initializing VCAP rule data area */ 98 static void sparx5_vcap_block_init(struct sparx5 *sparx5, 99 struct vcap_admin *admin) 100 { 101 _sparx5_vcap_range_init(sparx5, admin, admin->first_valid_addr, 102 admin->last_valid_addr - 103 admin->first_valid_addr); 104 } 105 106 /* Get the keyset name from the sparx5 VCAP model */ 107 static const char *sparx5_vcap_keyset_name(struct net_device *ndev, 108 enum vcap_keyfield_set keyset) 109 { 110 struct sparx5_port *port = netdev_priv(ndev); 111 112 return vcap_keyset_name(port->sparx5->vcap_ctrl, keyset); 113 } 114 115 /* Check if this is the first lookup of IS2 */ 116 static bool sparx5_vcap_is2_is_first_chain(struct vcap_rule *rule) 117 { 118 return (rule->vcap_chain_id >= SPARX5_VCAP_CID_IS2_L0 && 119 rule->vcap_chain_id < SPARX5_VCAP_CID_IS2_L1) || 120 ((rule->vcap_chain_id >= SPARX5_VCAP_CID_IS2_L2 && 121 rule->vcap_chain_id < SPARX5_VCAP_CID_IS2_L3)); 122 } 123 124 /* Set the narrow range ingress port mask on a rule */ 125 static void sparx5_vcap_add_range_port_mask(struct vcap_rule *rule, 126 struct net_device *ndev) 127 { 128 struct sparx5_port *port = netdev_priv(ndev); 129 u32 port_mask; 130 u32 range; 131 132 range = port->portno / BITS_PER_TYPE(u32); 133 /* Port bit set to match-any */ 134 port_mask = ~BIT(port->portno % BITS_PER_TYPE(u32)); 135 vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_SEL, 0, 0xf); 136 vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK_RNG, range, 0xf); 137 vcap_rule_add_key_u32(rule, VCAP_KF_IF_IGR_PORT_MASK, 0, port_mask); 138 } 139 140 /* Set the wide range ingress port mask on a rule */ 141 static void sparx5_vcap_add_wide_port_mask(struct vcap_rule *rule, 142 struct net_device *ndev) 143 { 144 struct sparx5_port *port = netdev_priv(ndev); 145 struct vcap_u72_key port_mask; 146 u32 range; 147 148 /* Port bit set to match-any */ 149 memset(port_mask.value, 0, sizeof(port_mask.value)); 150 memset(port_mask.mask, 0xff, sizeof(port_mask.mask)); 151 range = port->portno / BITS_PER_BYTE; 152 port_mask.mask[range] = ~BIT(port->portno % BITS_PER_BYTE); 153 vcap_rule_add_key_u72(rule, VCAP_KF_IF_IGR_PORT_MASK, &port_mask); 154 } 155 156 /* Convert chain id to vcap lookup id */ 157 static int sparx5_vcap_cid_to_lookup(int cid) 158 { 159 int lookup = 0; 160 161 /* For now only handle IS2 */ 162 if (cid >= SPARX5_VCAP_CID_IS2_L1 && cid < SPARX5_VCAP_CID_IS2_L2) 163 lookup = 1; 164 else if (cid >= SPARX5_VCAP_CID_IS2_L2 && cid < SPARX5_VCAP_CID_IS2_L3) 165 lookup = 2; 166 else if (cid >= SPARX5_VCAP_CID_IS2_L3 && cid < SPARX5_VCAP_CID_IS2_MAX) 167 lookup = 3; 168 169 return lookup; 170 } 171 172 /* Return the list of keysets for the vcap port configuration */ 173 static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev, 174 int lookup, 175 struct vcap_keyset_list *keysetlist, 176 u16 l3_proto) 177 { 178 struct sparx5_port *port = netdev_priv(ndev); 179 struct sparx5 *sparx5 = port->sparx5; 180 int portno = port->portno; 181 u32 value; 182 183 /* Check if the port keyset selection is enabled */ 184 value = spx5_rd(sparx5, ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup)); 185 if (!ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_GET(value)) 186 return -ENOENT; 187 188 /* Collect all keysets for the port in a list */ 189 if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) { 190 switch (ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_GET(value)) { 191 case VCAP_IS2_PS_ARP_MAC_ETYPE: 192 vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE); 193 break; 194 case VCAP_IS2_PS_ARP_ARP: 195 vcap_keyset_list_add(keysetlist, VCAP_KFS_ARP); 196 break; 197 } 198 } 199 200 if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP) { 201 switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_UC_KEY_SEL_GET(value)) { 202 case VCAP_IS2_PS_IPV4_UC_MAC_ETYPE: 203 vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE); 204 break; 205 case VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER: 206 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP); 207 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER); 208 break; 209 case VCAP_IS2_PS_IPV4_UC_IP_7TUPLE: 210 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE); 211 break; 212 } 213 214 switch (ANA_ACL_VCAP_S2_KEY_SEL_IP4_MC_KEY_SEL_GET(value)) { 215 case VCAP_IS2_PS_IPV4_MC_MAC_ETYPE: 216 vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE); 217 break; 218 case VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER: 219 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP); 220 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER); 221 break; 222 case VCAP_IS2_PS_IPV4_MC_IP_7TUPLE: 223 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE); 224 break; 225 } 226 } 227 228 if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) { 229 switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_GET(value)) { 230 case VCAP_IS2_PS_IPV6_UC_MAC_ETYPE: 231 vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE); 232 break; 233 case VCAP_IS2_PS_IPV6_UC_IP_7TUPLE: 234 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE); 235 break; 236 case VCAP_IS2_PS_IPV6_UC_IP6_STD: 237 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD); 238 break; 239 case VCAP_IS2_PS_IPV6_UC_IP4_TCP_UDP_OTHER: 240 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP); 241 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER); 242 break; 243 } 244 245 switch (ANA_ACL_VCAP_S2_KEY_SEL_IP6_MC_KEY_SEL_GET(value)) { 246 case VCAP_IS2_PS_IPV6_MC_MAC_ETYPE: 247 vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE); 248 break; 249 case VCAP_IS2_PS_IPV6_MC_IP_7TUPLE: 250 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE); 251 break; 252 case VCAP_IS2_PS_IPV6_MC_IP6_STD: 253 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD); 254 break; 255 case VCAP_IS2_PS_IPV6_MC_IP4_TCP_UDP_OTHER: 256 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP); 257 vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER); 258 break; 259 case VCAP_IS2_PS_IPV6_MC_IP6_VID: 260 /* Not used */ 261 break; 262 } 263 } 264 265 if (l3_proto != ETH_P_ARP && l3_proto != ETH_P_IP && 266 l3_proto != ETH_P_IPV6) { 267 switch (ANA_ACL_VCAP_S2_KEY_SEL_NON_ETH_KEY_SEL_GET(value)) { 268 case VCAP_IS2_PS_NONETH_MAC_ETYPE: 269 /* IS2 non-classified frames generate MAC_ETYPE */ 270 vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE); 271 break; 272 } 273 } 274 return 0; 275 } 276 277 /* Get the port keyset for the vcap lookup */ 278 int sparx5_vcap_get_port_keyset(struct net_device *ndev, 279 struct vcap_admin *admin, 280 int cid, 281 u16 l3_proto, 282 struct vcap_keyset_list *kslist) 283 { 284 int lookup; 285 286 lookup = sparx5_vcap_cid_to_lookup(cid); 287 return sparx5_vcap_is2_get_port_keysets(ndev, lookup, kslist, l3_proto); 288 } 289 290 /* API callback used for validating a field keyset (check the port keysets) */ 291 static enum vcap_keyfield_set 292 sparx5_vcap_validate_keyset(struct net_device *ndev, 293 struct vcap_admin *admin, 294 struct vcap_rule *rule, 295 struct vcap_keyset_list *kslist, 296 u16 l3_proto) 297 { 298 struct vcap_keyset_list keysetlist = {}; 299 enum vcap_keyfield_set keysets[10] = {}; 300 int idx, jdx, lookup; 301 302 if (!kslist || kslist->cnt == 0) 303 return VCAP_KFS_NO_VALUE; 304 305 /* Get a list of currently configured keysets in the lookups */ 306 lookup = sparx5_vcap_cid_to_lookup(rule->vcap_chain_id); 307 keysetlist.max = ARRAY_SIZE(keysets); 308 keysetlist.keysets = keysets; 309 sparx5_vcap_is2_get_port_keysets(ndev, lookup, &keysetlist, l3_proto); 310 311 /* Check if there is a match and return the match */ 312 for (idx = 0; idx < kslist->cnt; ++idx) 313 for (jdx = 0; jdx < keysetlist.cnt; ++jdx) 314 if (kslist->keysets[idx] == keysets[jdx]) 315 return kslist->keysets[idx]; 316 317 pr_err("%s:%d: %s not supported in port key selection\n", 318 __func__, __LINE__, 319 sparx5_vcap_keyset_name(ndev, kslist->keysets[0])); 320 321 return -ENOENT; 322 } 323 324 /* API callback used for adding default fields to a rule */ 325 static void sparx5_vcap_add_default_fields(struct net_device *ndev, 326 struct vcap_admin *admin, 327 struct vcap_rule *rule) 328 { 329 const struct vcap_field *field; 330 331 field = vcap_lookup_keyfield(rule, VCAP_KF_IF_IGR_PORT_MASK); 332 if (field && field->width == SPX5_PORTS) 333 sparx5_vcap_add_wide_port_mask(rule, ndev); 334 else if (field && field->width == BITS_PER_TYPE(u32)) 335 sparx5_vcap_add_range_port_mask(rule, ndev); 336 else 337 pr_err("%s:%d: %s: could not add an ingress port mask for: %s\n", 338 __func__, __LINE__, netdev_name(ndev), 339 sparx5_vcap_keyset_name(ndev, rule->keyset)); 340 /* add the lookup bit */ 341 if (sparx5_vcap_is2_is_first_chain(rule)) 342 vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1); 343 else 344 vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0); 345 } 346 347 /* API callback used for erasing the vcap cache area (not the register area) */ 348 static void sparx5_vcap_cache_erase(struct vcap_admin *admin) 349 { 350 memset(admin->cache.keystream, 0, STREAMSIZE); 351 memset(admin->cache.maskstream, 0, STREAMSIZE); 352 memset(admin->cache.actionstream, 0, STREAMSIZE); 353 memset(&admin->cache.counter, 0, sizeof(admin->cache.counter)); 354 } 355 356 /* API callback used for writing to the VCAP cache */ 357 static void sparx5_vcap_cache_write(struct net_device *ndev, 358 struct vcap_admin *admin, 359 enum vcap_selection sel, 360 u32 start, 361 u32 count) 362 { 363 struct sparx5_port *port = netdev_priv(ndev); 364 struct sparx5 *sparx5 = port->sparx5; 365 u32 *keystr, *mskstr, *actstr; 366 int idx; 367 368 keystr = &admin->cache.keystream[start]; 369 mskstr = &admin->cache.maskstream[start]; 370 actstr = &admin->cache.actionstream[start]; 371 switch (sel) { 372 case VCAP_SEL_ENTRY: 373 for (idx = 0; idx < count; ++idx) { 374 /* Avoid 'match-off' by setting value & mask */ 375 spx5_wr(keystr[idx] & mskstr[idx], sparx5, 376 VCAP_SUPER_VCAP_ENTRY_DAT(idx)); 377 spx5_wr(~mskstr[idx], sparx5, 378 VCAP_SUPER_VCAP_MASK_DAT(idx)); 379 } 380 break; 381 case VCAP_SEL_ACTION: 382 for (idx = 0; idx < count; ++idx) 383 spx5_wr(actstr[idx], sparx5, 384 VCAP_SUPER_VCAP_ACTION_DAT(idx)); 385 break; 386 case VCAP_SEL_ALL: 387 pr_err("%s:%d: cannot write all streams at once\n", 388 __func__, __LINE__); 389 break; 390 default: 391 break; 392 } 393 if (sel & VCAP_SEL_COUNTER) { 394 start = start & 0xfff; /* counter limit */ 395 if (admin->vinst == 0) 396 spx5_wr(admin->cache.counter, sparx5, 397 ANA_ACL_CNT_A(start)); 398 else 399 spx5_wr(admin->cache.counter, sparx5, 400 ANA_ACL_CNT_B(start)); 401 spx5_wr(admin->cache.sticky, sparx5, 402 VCAP_SUPER_VCAP_CNT_DAT(0)); 403 } 404 } 405 406 /* API callback used for reading from the VCAP into the VCAP cache */ 407 static void sparx5_vcap_cache_read(struct net_device *ndev, 408 struct vcap_admin *admin, 409 enum vcap_selection sel, 410 u32 start, 411 u32 count) 412 { 413 struct sparx5_port *port = netdev_priv(ndev); 414 struct sparx5 *sparx5 = port->sparx5; 415 u32 *keystr, *mskstr, *actstr; 416 int idx; 417 418 keystr = &admin->cache.keystream[start]; 419 mskstr = &admin->cache.maskstream[start]; 420 actstr = &admin->cache.actionstream[start]; 421 if (sel & VCAP_SEL_ENTRY) { 422 for (idx = 0; idx < count; ++idx) { 423 keystr[idx] = spx5_rd(sparx5, 424 VCAP_SUPER_VCAP_ENTRY_DAT(idx)); 425 mskstr[idx] = ~spx5_rd(sparx5, 426 VCAP_SUPER_VCAP_MASK_DAT(idx)); 427 } 428 } 429 if (sel & VCAP_SEL_ACTION) { 430 for (idx = 0; idx < count; ++idx) 431 actstr[idx] = spx5_rd(sparx5, 432 VCAP_SUPER_VCAP_ACTION_DAT(idx)); 433 } 434 if (sel & VCAP_SEL_COUNTER) { 435 start = start & 0xfff; /* counter limit */ 436 if (admin->vinst == 0) 437 admin->cache.counter = 438 spx5_rd(sparx5, ANA_ACL_CNT_A(start)); 439 else 440 admin->cache.counter = 441 spx5_rd(sparx5, ANA_ACL_CNT_B(start)); 442 admin->cache.sticky = 443 spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0)); 444 } 445 } 446 447 /* API callback used for initializing a VCAP address range */ 448 static void sparx5_vcap_range_init(struct net_device *ndev, 449 struct vcap_admin *admin, u32 addr, 450 u32 count) 451 { 452 struct sparx5_port *port = netdev_priv(ndev); 453 struct sparx5 *sparx5 = port->sparx5; 454 455 _sparx5_vcap_range_init(sparx5, admin, addr, count); 456 } 457 458 /* API callback used for updating the VCAP cache */ 459 static void sparx5_vcap_update(struct net_device *ndev, 460 struct vcap_admin *admin, enum vcap_command cmd, 461 enum vcap_selection sel, u32 addr) 462 { 463 struct sparx5_port *port = netdev_priv(ndev); 464 struct sparx5 *sparx5 = port->sparx5; 465 bool clear; 466 467 clear = (cmd == VCAP_CMD_INITIALIZE); 468 spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) | 469 VCAP_SUPER_CFG_MV_SIZE_SET(0), sparx5, VCAP_SUPER_CFG); 470 spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) | 471 VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) | 472 VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) | 473 VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) | 474 VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) | 475 VCAP_SUPER_CTRL_CLEAR_CACHE_SET(clear) | 476 VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true), 477 sparx5, VCAP_SUPER_CTRL); 478 sparx5_vcap_wait_super_update(sparx5); 479 } 480 481 /* API callback used for moving a block of rules in the VCAP */ 482 static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin, 483 u32 addr, int offset, int count) 484 { 485 struct sparx5_port *port = netdev_priv(ndev); 486 struct sparx5 *sparx5 = port->sparx5; 487 enum vcap_command cmd; 488 u16 mv_num_pos; 489 u16 mv_size; 490 491 mv_size = count - 1; 492 if (offset > 0) { 493 mv_num_pos = offset - 1; 494 cmd = VCAP_CMD_MOVE_DOWN; 495 } else { 496 mv_num_pos = -offset - 1; 497 cmd = VCAP_CMD_MOVE_UP; 498 } 499 spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(mv_num_pos) | 500 VCAP_SUPER_CFG_MV_SIZE_SET(mv_size), 501 sparx5, VCAP_SUPER_CFG); 502 spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) | 503 VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) | 504 VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) | 505 VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) | 506 VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) | 507 VCAP_SUPER_CTRL_CLEAR_CACHE_SET(false) | 508 VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true), 509 sparx5, VCAP_SUPER_CTRL); 510 sparx5_vcap_wait_super_update(sparx5); 511 } 512 513 /* Enable all lookups in the VCAP instance */ 514 static int sparx5_vcap_enable(struct net_device *ndev, 515 struct vcap_admin *admin, 516 bool enable) 517 { 518 struct sparx5_port *port = netdev_priv(ndev); 519 struct sparx5 *sparx5; 520 int portno; 521 522 sparx5 = port->sparx5; 523 portno = port->portno; 524 525 /* For now we only consider IS2 */ 526 if (enable) 527 spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf), sparx5, 528 ANA_ACL_VCAP_S2_CFG(portno)); 529 else 530 spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0), sparx5, 531 ANA_ACL_VCAP_S2_CFG(portno)); 532 return 0; 533 } 534 535 /* API callback operations: only IS2 is supported for now */ 536 static struct vcap_operations sparx5_vcap_ops = { 537 .validate_keyset = sparx5_vcap_validate_keyset, 538 .add_default_fields = sparx5_vcap_add_default_fields, 539 .cache_erase = sparx5_vcap_cache_erase, 540 .cache_write = sparx5_vcap_cache_write, 541 .cache_read = sparx5_vcap_cache_read, 542 .init = sparx5_vcap_range_init, 543 .update = sparx5_vcap_update, 544 .move = sparx5_vcap_move, 545 .port_info = sparx5_port_info, 546 .enable = sparx5_vcap_enable, 547 }; 548 549 /* Enable lookups per port and set the keyset generation: only IS2 for now */ 550 static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5, 551 struct vcap_admin *admin) 552 { 553 int portno, lookup; 554 u32 keysel; 555 556 /* all traffic types generate the MAC_ETYPE keyset for now in all 557 * lookups on all ports 558 */ 559 keysel = VCAP_IS2_KEYSEL(true, VCAP_IS2_PS_NONETH_MAC_ETYPE, 560 VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER, 561 VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER, 562 VCAP_IS2_PS_IPV6_MC_IP_7TUPLE, 563 VCAP_IS2_PS_IPV6_UC_IP_7TUPLE, 564 VCAP_IS2_PS_ARP_ARP); 565 for (lookup = 0; lookup < admin->lookups; ++lookup) { 566 for (portno = 0; portno < SPX5_PORTS; ++portno) { 567 spx5_wr(keysel, sparx5, 568 ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup)); 569 } 570 } 571 } 572 573 /* Disable lookups per port and set the keyset generation: only IS2 for now */ 574 static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5, 575 struct vcap_admin *admin) 576 { 577 int portno; 578 579 for (portno = 0; portno < SPX5_PORTS; ++portno) 580 spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0), 581 ANA_ACL_VCAP_S2_CFG_SEC_ENA, 582 sparx5, 583 ANA_ACL_VCAP_S2_CFG(portno)); 584 } 585 586 static void sparx5_vcap_admin_free(struct vcap_admin *admin) 587 { 588 if (!admin) 589 return; 590 mutex_destroy(&admin->lock); 591 kfree(admin->cache.keystream); 592 kfree(admin->cache.maskstream); 593 kfree(admin->cache.actionstream); 594 kfree(admin); 595 } 596 597 /* Allocate a vcap instance with a rule list and a cache area */ 598 static struct vcap_admin * 599 sparx5_vcap_admin_alloc(struct sparx5 *sparx5, struct vcap_control *ctrl, 600 const struct sparx5_vcap_inst *cfg) 601 { 602 struct vcap_admin *admin; 603 604 admin = kzalloc(sizeof(*admin), GFP_KERNEL); 605 if (!admin) 606 return ERR_PTR(-ENOMEM); 607 INIT_LIST_HEAD(&admin->list); 608 INIT_LIST_HEAD(&admin->rules); 609 INIT_LIST_HEAD(&admin->enabled); 610 mutex_init(&admin->lock); 611 admin->vtype = cfg->vtype; 612 admin->vinst = cfg->vinst; 613 admin->lookups = cfg->lookups; 614 admin->lookups_per_instance = cfg->lookups_per_instance; 615 admin->first_cid = cfg->first_cid; 616 admin->last_cid = cfg->last_cid; 617 admin->cache.keystream = 618 kzalloc(STREAMSIZE, GFP_KERNEL); 619 admin->cache.maskstream = 620 kzalloc(STREAMSIZE, GFP_KERNEL); 621 admin->cache.actionstream = 622 kzalloc(STREAMSIZE, GFP_KERNEL); 623 if (!admin->cache.keystream || !admin->cache.maskstream || 624 !admin->cache.actionstream) { 625 sparx5_vcap_admin_free(admin); 626 return ERR_PTR(-ENOMEM); 627 } 628 return admin; 629 } 630 631 /* Do block allocations and provide addresses for VCAP instances */ 632 static void sparx5_vcap_block_alloc(struct sparx5 *sparx5, 633 struct vcap_admin *admin, 634 const struct sparx5_vcap_inst *cfg) 635 { 636 int idx; 637 638 /* Super VCAP block mapping and address configuration. Block 0 639 * is assigned addresses 0 through 3071, block 1 is assigned 640 * addresses 3072 though 6143, and so on. 641 */ 642 for (idx = cfg->blockno; idx < cfg->blockno + cfg->blocks; ++idx) { 643 spx5_wr(VCAP_SUPER_IDX_CORE_IDX_SET(idx), sparx5, 644 VCAP_SUPER_IDX); 645 spx5_wr(VCAP_SUPER_MAP_CORE_MAP_SET(cfg->map_id), sparx5, 646 VCAP_SUPER_MAP); 647 } 648 admin->first_valid_addr = cfg->blockno * SUPER_VCAP_BLK_SIZE; 649 admin->last_used_addr = admin->first_valid_addr + 650 cfg->blocks * SUPER_VCAP_BLK_SIZE; 651 admin->last_valid_addr = admin->last_used_addr - 1; 652 } 653 654 /* Allocate a vcap control and vcap instances and configure the system */ 655 int sparx5_vcap_init(struct sparx5 *sparx5) 656 { 657 const struct sparx5_vcap_inst *cfg; 658 struct vcap_control *ctrl; 659 struct vcap_admin *admin; 660 struct dentry *dir; 661 int err = 0, idx; 662 663 /* Create a VCAP control instance that owns the platform specific VCAP 664 * model with VCAP instances and information about keysets, keys, 665 * actionsets and actions 666 * - Create administrative state for each available VCAP 667 * - Lists of rules 668 * - Address information 669 * - Initialize VCAP blocks 670 * - Configure port keysets 671 */ 672 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 673 if (!ctrl) 674 return -ENOMEM; 675 676 sparx5->vcap_ctrl = ctrl; 677 /* select the sparx5 VCAP model */ 678 ctrl->vcaps = sparx5_vcaps; 679 ctrl->stats = &sparx5_vcap_stats; 680 /* Setup callbacks to allow the API to use the VCAP HW */ 681 ctrl->ops = &sparx5_vcap_ops; 682 683 INIT_LIST_HEAD(&ctrl->list); 684 for (idx = 0; idx < ARRAY_SIZE(sparx5_vcap_inst_cfg); ++idx) { 685 cfg = &sparx5_vcap_inst_cfg[idx]; 686 admin = sparx5_vcap_admin_alloc(sparx5, ctrl, cfg); 687 if (IS_ERR(admin)) { 688 err = PTR_ERR(admin); 689 pr_err("%s:%d: vcap allocation failed: %d\n", 690 __func__, __LINE__, err); 691 return err; 692 } 693 sparx5_vcap_block_alloc(sparx5, admin, cfg); 694 sparx5_vcap_block_init(sparx5, admin); 695 if (cfg->vinst == 0) 696 sparx5_vcap_port_key_selection(sparx5, admin); 697 list_add_tail(&admin->list, &ctrl->list); 698 } 699 dir = vcap_debugfs(sparx5->dev, sparx5->debugfs_root, ctrl); 700 for (idx = 0; idx < SPX5_PORTS; ++idx) 701 if (sparx5->ports[idx]) 702 vcap_port_debugfs(sparx5->dev, dir, ctrl, 703 sparx5->ports[idx]->ndev); 704 705 return err; 706 } 707 708 void sparx5_vcap_destroy(struct sparx5 *sparx5) 709 { 710 struct vcap_control *ctrl = sparx5->vcap_ctrl; 711 struct vcap_admin *admin, *admin_next; 712 713 if (!ctrl) 714 return; 715 716 list_for_each_entry_safe(admin, admin_next, &ctrl->list, list) { 717 sparx5_vcap_port_key_deselection(sparx5, admin); 718 vcap_del_rules(ctrl, admin); 719 list_del(&admin->list); 720 sparx5_vcap_admin_free(admin); 721 } 722 kfree(ctrl); 723 } 724