1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Microchip VCAP API 3 * 4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. 5 */ 6 7 #include <net/tcp.h> 8 9 #include "sparx5_tc.h" 10 #include "vcap_api.h" 11 #include "vcap_api_client.h" 12 #include "sparx5_main.h" 13 #include "sparx5_vcap_impl.h" 14 15 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */ 16 17 /* Collect keysets and type ids for multiple rules per size */ 18 struct sparx5_wildcard_rule { 19 bool selected; 20 u8 value; 21 u8 mask; 22 enum vcap_keyfield_set keyset; 23 }; 24 25 struct sparx5_multiple_rules { 26 struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE]; 27 }; 28 29 struct sparx5_tc_flower_parse_usage { 30 struct flow_cls_offload *fco; 31 struct flow_rule *frule; 32 struct vcap_rule *vrule; 33 struct vcap_admin *admin; 34 u16 l3_proto; 35 u8 l4_proto; 36 unsigned int used_keys; 37 }; 38 39 /* These protocols have dedicated keysets in IS2 and a TC dissector 40 * ETH_P_ARP does not have a TC dissector 41 */ 42 static u16 sparx5_tc_known_etypes[] = { 43 ETH_P_ALL, 44 ETH_P_ARP, 45 ETH_P_IP, 46 ETH_P_IPV6, 47 }; 48 49 enum sparx5_is2_arp_opcode { 50 SPX5_IS2_ARP_REQUEST, 51 SPX5_IS2_ARP_REPLY, 52 SPX5_IS2_RARP_REQUEST, 53 SPX5_IS2_RARP_REPLY, 54 }; 55 56 enum tc_arp_opcode { 57 TC_ARP_OP_RESERVED, 58 TC_ARP_OP_REQUEST, 59 TC_ARP_OP_REPLY, 60 }; 61 62 static bool sparx5_tc_is_known_etype(u16 etype) 63 { 64 int idx; 65 66 /* For now this only knows about IS2 traffic classification */ 67 for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_known_etypes); ++idx) 68 if (sparx5_tc_known_etypes[idx] == etype) 69 return true; 70 71 return false; 72 } 73 74 static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st) 75 { 76 enum vcap_key_field smac_key = VCAP_KF_L2_SMAC; 77 enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC; 78 struct flow_match_eth_addrs match; 79 struct vcap_u48_key smac, dmac; 80 int err = 0; 81 82 flow_rule_match_eth_addrs(st->frule, &match); 83 84 if (!is_zero_ether_addr(match.mask->src)) { 85 vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN); 86 vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN); 87 err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac); 88 if (err) 89 goto out; 90 } 91 92 if (!is_zero_ether_addr(match.mask->dst)) { 93 vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN); 94 vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN); 95 err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac); 96 if (err) 97 goto out; 98 } 99 100 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS); 101 102 return err; 103 104 out: 105 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error"); 106 return err; 107 } 108 109 static int 110 sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st) 111 { 112 int err = 0; 113 114 if (st->l3_proto == ETH_P_IP) { 115 struct flow_match_ipv4_addrs mt; 116 117 flow_rule_match_ipv4_addrs(st->frule, &mt); 118 if (mt.mask->src) { 119 err = vcap_rule_add_key_u32(st->vrule, 120 VCAP_KF_L3_IP4_SIP, 121 be32_to_cpu(mt.key->src), 122 be32_to_cpu(mt.mask->src)); 123 if (err) 124 goto out; 125 } 126 if (mt.mask->dst) { 127 err = vcap_rule_add_key_u32(st->vrule, 128 VCAP_KF_L3_IP4_DIP, 129 be32_to_cpu(mt.key->dst), 130 be32_to_cpu(mt.mask->dst)); 131 if (err) 132 goto out; 133 } 134 } 135 136 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS); 137 138 return err; 139 140 out: 141 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error"); 142 return err; 143 } 144 145 static int 146 sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st) 147 { 148 int err = 0; 149 150 if (st->l3_proto == ETH_P_IPV6) { 151 struct flow_match_ipv6_addrs mt; 152 struct vcap_u128_key sip; 153 struct vcap_u128_key dip; 154 155 flow_rule_match_ipv6_addrs(st->frule, &mt); 156 /* Check if address masks are non-zero */ 157 if (!ipv6_addr_any(&mt.mask->src)) { 158 vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16); 159 vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16); 160 err = vcap_rule_add_key_u128(st->vrule, 161 VCAP_KF_L3_IP6_SIP, &sip); 162 if (err) 163 goto out; 164 } 165 if (!ipv6_addr_any(&mt.mask->dst)) { 166 vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16); 167 vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16); 168 err = vcap_rule_add_key_u128(st->vrule, 169 VCAP_KF_L3_IP6_DIP, &dip); 170 if (err) 171 goto out; 172 } 173 } 174 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS); 175 return err; 176 out: 177 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error"); 178 return err; 179 } 180 181 static int 182 sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st) 183 { 184 struct flow_match_control mt; 185 u32 value, mask; 186 int err = 0; 187 188 flow_rule_match_control(st->frule, &mt); 189 190 if (mt.mask->flags) { 191 if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) { 192 if (mt.key->flags & FLOW_DIS_FIRST_FRAG) { 193 value = 1; /* initial fragment */ 194 mask = 0x3; 195 } else { 196 if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) { 197 value = 3; /* follow up fragment */ 198 mask = 0x3; 199 } else { 200 value = 0; /* no fragment */ 201 mask = 0x3; 202 } 203 } 204 } else { 205 if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) { 206 value = 3; /* follow up fragment */ 207 mask = 0x3; 208 } else { 209 value = 0; /* no fragment */ 210 mask = 0x3; 211 } 212 } 213 214 err = vcap_rule_add_key_u32(st->vrule, 215 VCAP_KF_L3_FRAGMENT_TYPE, 216 value, mask); 217 if (err) 218 goto out; 219 } 220 221 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL); 222 223 return err; 224 225 out: 226 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error"); 227 return err; 228 } 229 230 static int 231 sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st) 232 { 233 struct flow_match_ports mt; 234 u16 value, mask; 235 int err = 0; 236 237 flow_rule_match_ports(st->frule, &mt); 238 239 if (mt.mask->src) { 240 value = be16_to_cpu(mt.key->src); 241 mask = be16_to_cpu(mt.mask->src); 242 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value, 243 mask); 244 if (err) 245 goto out; 246 } 247 248 if (mt.mask->dst) { 249 value = be16_to_cpu(mt.key->dst); 250 mask = be16_to_cpu(mt.mask->dst); 251 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value, 252 mask); 253 if (err) 254 goto out; 255 } 256 257 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS); 258 259 return err; 260 261 out: 262 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error"); 263 return err; 264 } 265 266 static int 267 sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st) 268 { 269 struct flow_match_basic mt; 270 int err = 0; 271 272 flow_rule_match_basic(st->frule, &mt); 273 274 if (mt.mask->n_proto) { 275 st->l3_proto = be16_to_cpu(mt.key->n_proto); 276 if (!sparx5_tc_is_known_etype(st->l3_proto)) { 277 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE, 278 st->l3_proto, ~0); 279 if (err) 280 goto out; 281 } else if (st->l3_proto == ETH_P_IP) { 282 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS, 283 VCAP_BIT_1); 284 if (err) 285 goto out; 286 } else if (st->l3_proto == ETH_P_IPV6) { 287 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS, 288 VCAP_BIT_0); 289 if (err) 290 goto out; 291 } 292 } 293 294 if (mt.mask->ip_proto) { 295 st->l4_proto = mt.key->ip_proto; 296 if (st->l4_proto == IPPROTO_TCP) { 297 err = vcap_rule_add_key_bit(st->vrule, 298 VCAP_KF_TCP_IS, 299 VCAP_BIT_1); 300 if (err) 301 goto out; 302 } else if (st->l4_proto == IPPROTO_UDP) { 303 err = vcap_rule_add_key_bit(st->vrule, 304 VCAP_KF_TCP_IS, 305 VCAP_BIT_0); 306 if (err) 307 goto out; 308 if (st->admin->vtype == VCAP_TYPE_IS0) { 309 err = vcap_rule_add_key_bit(st->vrule, 310 VCAP_KF_TCP_UDP_IS, 311 VCAP_BIT_1); 312 if (err) 313 goto out; 314 } 315 } else { 316 err = vcap_rule_add_key_u32(st->vrule, 317 VCAP_KF_L3_IP_PROTO, 318 st->l4_proto, ~0); 319 if (err) 320 goto out; 321 } 322 } 323 324 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC); 325 326 return err; 327 328 out: 329 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error"); 330 return err; 331 } 332 333 static int 334 sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st) 335 { 336 enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS; 337 enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS; 338 struct flow_match_vlan mt; 339 int err; 340 341 flow_rule_match_vlan(st->frule, &mt); 342 343 if (mt.mask->vlan_id) { 344 err = vcap_rule_add_key_u32(st->vrule, vid_key, 345 mt.key->vlan_id, 346 mt.mask->vlan_id); 347 if (err) 348 goto out; 349 } 350 351 if (mt.mask->vlan_priority) { 352 err = vcap_rule_add_key_u32(st->vrule, pcp_key, 353 mt.key->vlan_priority, 354 mt.mask->vlan_priority); 355 if (err) 356 goto out; 357 } 358 359 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN); 360 361 return 0; 362 out: 363 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error"); 364 return err; 365 } 366 367 static int 368 sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st) 369 { 370 struct flow_match_tcp mt; 371 u16 tcp_flags_mask; 372 u16 tcp_flags_key; 373 enum vcap_bit val; 374 int err = 0; 375 376 flow_rule_match_tcp(st->frule, &mt); 377 tcp_flags_key = be16_to_cpu(mt.key->flags); 378 tcp_flags_mask = be16_to_cpu(mt.mask->flags); 379 380 if (tcp_flags_mask & TCPHDR_FIN) { 381 val = VCAP_BIT_0; 382 if (tcp_flags_key & TCPHDR_FIN) 383 val = VCAP_BIT_1; 384 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val); 385 if (err) 386 goto out; 387 } 388 389 if (tcp_flags_mask & TCPHDR_SYN) { 390 val = VCAP_BIT_0; 391 if (tcp_flags_key & TCPHDR_SYN) 392 val = VCAP_BIT_1; 393 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val); 394 if (err) 395 goto out; 396 } 397 398 if (tcp_flags_mask & TCPHDR_RST) { 399 val = VCAP_BIT_0; 400 if (tcp_flags_key & TCPHDR_RST) 401 val = VCAP_BIT_1; 402 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val); 403 if (err) 404 goto out; 405 } 406 407 if (tcp_flags_mask & TCPHDR_PSH) { 408 val = VCAP_BIT_0; 409 if (tcp_flags_key & TCPHDR_PSH) 410 val = VCAP_BIT_1; 411 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val); 412 if (err) 413 goto out; 414 } 415 416 if (tcp_flags_mask & TCPHDR_ACK) { 417 val = VCAP_BIT_0; 418 if (tcp_flags_key & TCPHDR_ACK) 419 val = VCAP_BIT_1; 420 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val); 421 if (err) 422 goto out; 423 } 424 425 if (tcp_flags_mask & TCPHDR_URG) { 426 val = VCAP_BIT_0; 427 if (tcp_flags_key & TCPHDR_URG) 428 val = VCAP_BIT_1; 429 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val); 430 if (err) 431 goto out; 432 } 433 434 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP); 435 436 return err; 437 438 out: 439 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error"); 440 return err; 441 } 442 443 static int 444 sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st) 445 { 446 struct flow_match_arp mt; 447 u16 value, mask; 448 u32 ipval, ipmsk; 449 int err; 450 451 flow_rule_match_arp(st->frule, &mt); 452 453 if (mt.mask->op) { 454 mask = 0x3; 455 if (st->l3_proto == ETH_P_ARP) { 456 value = mt.key->op == TC_ARP_OP_REQUEST ? 457 SPX5_IS2_ARP_REQUEST : 458 SPX5_IS2_ARP_REPLY; 459 } else { /* RARP */ 460 value = mt.key->op == TC_ARP_OP_REQUEST ? 461 SPX5_IS2_RARP_REQUEST : 462 SPX5_IS2_RARP_REPLY; 463 } 464 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE, 465 value, mask); 466 if (err) 467 goto out; 468 } 469 470 /* The IS2 ARP keyset does not support ARP hardware addresses */ 471 if (!is_zero_ether_addr(mt.mask->sha) || 472 !is_zero_ether_addr(mt.mask->tha)) { 473 err = -EINVAL; 474 goto out; 475 } 476 477 if (mt.mask->sip) { 478 ipval = be32_to_cpu((__force __be32)mt.key->sip); 479 ipmsk = be32_to_cpu((__force __be32)mt.mask->sip); 480 481 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP, 482 ipval, ipmsk); 483 if (err) 484 goto out; 485 } 486 487 if (mt.mask->tip) { 488 ipval = be32_to_cpu((__force __be32)mt.key->tip); 489 ipmsk = be32_to_cpu((__force __be32)mt.mask->tip); 490 491 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP, 492 ipval, ipmsk); 493 if (err) 494 goto out; 495 } 496 497 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP); 498 499 return 0; 500 501 out: 502 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error"); 503 return err; 504 } 505 506 static int 507 sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st) 508 { 509 struct flow_match_ip mt; 510 int err = 0; 511 512 flow_rule_match_ip(st->frule, &mt); 513 514 if (mt.mask->tos) { 515 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS, 516 mt.key->tos, 517 mt.mask->tos); 518 if (err) 519 goto out; 520 } 521 522 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP); 523 524 return err; 525 526 out: 527 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error"); 528 return err; 529 } 530 531 static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = { 532 [FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage, 533 [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage, 534 [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage, 535 [FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage, 536 [FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage, 537 [FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage, 538 [FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage, 539 [FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage, 540 [FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage, 541 [FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage, 542 }; 543 544 static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco, 545 struct vcap_admin *admin, 546 struct vcap_rule *vrule, 547 u16 *l3_proto) 548 { 549 struct sparx5_tc_flower_parse_usage state = { 550 .fco = fco, 551 .vrule = vrule, 552 .l3_proto = ETH_P_ALL, 553 .admin = admin, 554 }; 555 int idx, err = 0; 556 557 state.frule = flow_cls_offload_flow_rule(fco); 558 for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) { 559 if (!flow_rule_match_key(state.frule, idx)) 560 continue; 561 if (!sparx5_tc_flower_usage_handlers[idx]) 562 continue; 563 err = sparx5_tc_flower_usage_handlers[idx](&state); 564 if (err) 565 return err; 566 } 567 568 if (state.frule->match.dissector->used_keys ^ state.used_keys) { 569 NL_SET_ERR_MSG_MOD(fco->common.extack, 570 "Unsupported match item"); 571 return -ENOENT; 572 } 573 574 if (l3_proto) 575 *l3_proto = state.l3_proto; 576 return err; 577 } 578 579 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl, 580 struct net_device *ndev, 581 struct flow_cls_offload *fco) 582 { 583 struct flow_rule *rule = flow_cls_offload_flow_rule(fco); 584 struct flow_action_entry *actent, *last_actent = NULL; 585 struct flow_action *act = &rule->action; 586 u64 action_mask = 0; 587 int idx; 588 589 if (!flow_action_has_entries(act)) { 590 NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions"); 591 return -EINVAL; 592 } 593 594 if (!flow_action_basic_hw_stats_check(act, fco->common.extack)) 595 return -EOPNOTSUPP; 596 597 flow_action_for_each(idx, actent, act) { 598 if (action_mask & BIT(actent->id)) { 599 NL_SET_ERR_MSG_MOD(fco->common.extack, 600 "More actions of the same type"); 601 return -EINVAL; 602 } 603 action_mask |= BIT(actent->id); 604 last_actent = actent; /* Save last action for later check */ 605 } 606 607 /* Check if last action is a goto 608 * The last chain/lookup does not need to have a goto action 609 */ 610 if (last_actent->id == FLOW_ACTION_GOTO) { 611 /* Check if the destination chain is in one of the VCAPs */ 612 if (!vcap_is_next_lookup(vctrl, fco->common.chain_index, 613 last_actent->chain_index)) { 614 NL_SET_ERR_MSG_MOD(fco->common.extack, 615 "Invalid goto chain"); 616 return -EINVAL; 617 } 618 } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index)) { 619 NL_SET_ERR_MSG_MOD(fco->common.extack, 620 "Last action must be 'goto'"); 621 return -EINVAL; 622 } 623 624 /* Catch unsupported combinations of actions */ 625 if (action_mask & BIT(FLOW_ACTION_TRAP) && 626 action_mask & BIT(FLOW_ACTION_ACCEPT)) { 627 NL_SET_ERR_MSG_MOD(fco->common.extack, 628 "Cannot combine pass and trap action"); 629 return -EOPNOTSUPP; 630 } 631 632 return 0; 633 } 634 635 /* Add a rule counter action */ 636 static int sparx5_tc_add_rule_counter(struct vcap_admin *admin, 637 struct vcap_rule *vrule) 638 { 639 int err; 640 641 if (admin->vtype == VCAP_TYPE_IS2) { 642 err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID, 643 vrule->id); 644 if (err) 645 return err; 646 vcap_rule_set_counter_id(vrule, vrule->id); 647 } 648 649 return 0; 650 } 651 652 /* Collect all port keysets and apply the first of them, possibly wildcarded */ 653 static int sparx5_tc_select_protocol_keyset(struct net_device *ndev, 654 struct vcap_rule *vrule, 655 struct vcap_admin *admin, 656 u16 l3_proto, 657 struct sparx5_multiple_rules *multi) 658 { 659 struct sparx5_port *port = netdev_priv(ndev); 660 struct vcap_keyset_list portkeysetlist = {}; 661 enum vcap_keyfield_set portkeysets[10] = {}; 662 struct vcap_keyset_list matches = {}; 663 enum vcap_keyfield_set keysets[10]; 664 int idx, jdx, err = 0, count = 0; 665 struct sparx5_wildcard_rule *mru; 666 const struct vcap_set *kinfo; 667 struct vcap_control *vctrl; 668 669 vctrl = port->sparx5->vcap_ctrl; 670 671 /* Find the keysets that the rule can use */ 672 matches.keysets = keysets; 673 matches.max = ARRAY_SIZE(keysets); 674 if (vcap_rule_find_keysets(vrule, &matches) == 0) 675 return -EINVAL; 676 677 /* Find the keysets that the port configuration supports */ 678 portkeysetlist.max = ARRAY_SIZE(portkeysets); 679 portkeysetlist.keysets = portkeysets; 680 err = sparx5_vcap_get_port_keyset(ndev, 681 admin, vrule->vcap_chain_id, 682 l3_proto, 683 &portkeysetlist); 684 if (err) 685 return err; 686 687 /* Find the intersection of the two sets of keyset */ 688 for (idx = 0; idx < portkeysetlist.cnt; ++idx) { 689 kinfo = vcap_keyfieldset(vctrl, admin->vtype, 690 portkeysetlist.keysets[idx]); 691 if (!kinfo) 692 continue; 693 694 /* Find a port keyset that matches the required keys 695 * If there are multiple keysets then compose a type id mask 696 */ 697 for (jdx = 0; jdx < matches.cnt; ++jdx) { 698 if (portkeysetlist.keysets[idx] != matches.keysets[jdx]) 699 continue; 700 701 mru = &multi->rule[kinfo->sw_per_item]; 702 if (!mru->selected) { 703 mru->selected = true; 704 mru->keyset = portkeysetlist.keysets[idx]; 705 mru->value = kinfo->type_id; 706 } 707 mru->value &= kinfo->type_id; 708 mru->mask |= kinfo->type_id; 709 ++count; 710 } 711 } 712 if (count == 0) 713 return -EPROTO; 714 715 if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt) 716 return -ENOENT; 717 718 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) { 719 mru = &multi->rule[idx]; 720 if (!mru->selected) 721 continue; 722 723 /* Align the mask to the combined value */ 724 mru->mask ^= mru->value; 725 } 726 727 /* Set the chosen keyset on the rule and set a wildcarded type if there 728 * are more than one keyset 729 */ 730 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) { 731 mru = &multi->rule[idx]; 732 if (!mru->selected) 733 continue; 734 735 vcap_set_rule_set_keyset(vrule, mru->keyset); 736 if (count > 1) 737 /* Some keysets do not have a type field */ 738 vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, 739 mru->value, 740 ~mru->mask); 741 mru->selected = false; /* mark as done */ 742 break; /* Stop here and add more rules later */ 743 } 744 return err; 745 } 746 747 static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl, 748 struct flow_cls_offload *fco, 749 struct vcap_rule *erule, 750 struct vcap_admin *admin, 751 struct sparx5_wildcard_rule *rule) 752 { 753 enum vcap_key_field keylist[] = { 754 VCAP_KF_IF_IGR_PORT_MASK, 755 VCAP_KF_IF_IGR_PORT_MASK_SEL, 756 VCAP_KF_IF_IGR_PORT_MASK_RNG, 757 VCAP_KF_LOOKUP_FIRST_IS, 758 VCAP_KF_TYPE, 759 }; 760 struct vcap_rule *vrule; 761 int err; 762 763 /* Add an extra rule with a special user and the new keyset */ 764 erule->user = VCAP_USER_TC_EXTRA; 765 vrule = vcap_copy_rule(erule); 766 if (IS_ERR(vrule)) 767 return PTR_ERR(vrule); 768 769 /* Link the new rule to the existing rule with the cookie */ 770 vrule->cookie = erule->cookie; 771 vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true); 772 err = vcap_set_rule_set_keyset(vrule, rule->keyset); 773 if (err) { 774 pr_err("%s:%d: could not set keyset %s in rule: %u\n", 775 __func__, __LINE__, 776 vcap_keyset_name(vctrl, rule->keyset), 777 vrule->id); 778 goto out; 779 } 780 781 /* Some keysets do not have a type field, so ignore return value */ 782 vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask); 783 784 err = vcap_set_rule_set_actionset(vrule, erule->actionset); 785 if (err) 786 goto out; 787 788 err = sparx5_tc_add_rule_counter(admin, vrule); 789 if (err) 790 goto out; 791 792 err = vcap_val_rule(vrule, ETH_P_ALL); 793 if (err) { 794 pr_err("%s:%d: could not validate rule: %u\n", 795 __func__, __LINE__, vrule->id); 796 vcap_set_tc_exterr(fco, vrule); 797 goto out; 798 } 799 err = vcap_add_rule(vrule); 800 if (err) { 801 pr_err("%s:%d: could not add rule: %u\n", 802 __func__, __LINE__, vrule->id); 803 goto out; 804 } 805 out: 806 vcap_free_rule(vrule); 807 return err; 808 } 809 810 static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl, 811 struct flow_cls_offload *fco, 812 struct vcap_rule *erule, 813 struct vcap_admin *admin, 814 struct sparx5_multiple_rules *multi) 815 { 816 int idx, err = 0; 817 818 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) { 819 if (!multi->rule[idx].selected) 820 continue; 821 822 err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin, 823 &multi->rule[idx]); 824 if (err) 825 break; 826 } 827 return err; 828 } 829 830 /* Add the actionset that is the default for the VCAP type */ 831 static int sparx5_tc_set_actionset(struct vcap_admin *admin, 832 struct vcap_rule *vrule) 833 { 834 enum vcap_actionfield_set aset; 835 int err = 0; 836 837 switch (admin->vtype) { 838 case VCAP_TYPE_IS0: 839 aset = VCAP_AFS_CLASSIFICATION; 840 break; 841 case VCAP_TYPE_IS2: 842 aset = VCAP_AFS_BASE_TYPE; 843 break; 844 default: 845 return -EINVAL; 846 } 847 /* Do not overwrite any current actionset */ 848 if (vrule->actionset == VCAP_AFS_NO_VALUE) 849 err = vcap_set_rule_set_actionset(vrule, aset); 850 return err; 851 } 852 853 static int sparx5_tc_flower_replace(struct net_device *ndev, 854 struct flow_cls_offload *fco, 855 struct vcap_admin *admin) 856 { 857 struct sparx5_port *port = netdev_priv(ndev); 858 struct sparx5_multiple_rules multi = {}; 859 struct flow_action_entry *act; 860 struct vcap_control *vctrl; 861 struct flow_rule *frule; 862 struct vcap_rule *vrule; 863 u16 l3_proto; 864 int err, idx; 865 866 vctrl = port->sparx5->vcap_ctrl; 867 868 err = sparx5_tc_flower_action_check(vctrl, ndev, fco); 869 if (err) 870 return err; 871 872 vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC, 873 fco->common.prio, 0); 874 if (IS_ERR(vrule)) 875 return PTR_ERR(vrule); 876 877 vrule->cookie = fco->cookie; 878 879 l3_proto = ETH_P_ALL; 880 err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto); 881 if (err) 882 goto out; 883 884 err = sparx5_tc_add_rule_counter(admin, vrule); 885 if (err) 886 goto out; 887 888 frule = flow_cls_offload_flow_rule(fco); 889 flow_action_for_each(idx, act, &frule->action) { 890 switch (act->id) { 891 case FLOW_ACTION_TRAP: 892 err = vcap_rule_add_action_bit(vrule, 893 VCAP_AF_CPU_COPY_ENA, 894 VCAP_BIT_1); 895 if (err) 896 goto out; 897 err = vcap_rule_add_action_u32(vrule, 898 VCAP_AF_CPU_QUEUE_NUM, 0); 899 if (err) 900 goto out; 901 err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, 902 SPX5_PMM_REPLACE_ALL); 903 if (err) 904 goto out; 905 /* For now the actionset is hardcoded */ 906 err = vcap_set_rule_set_actionset(vrule, 907 VCAP_AFS_BASE_TYPE); 908 if (err) 909 goto out; 910 break; 911 case FLOW_ACTION_ACCEPT: 912 err = sparx5_tc_set_actionset(admin, vrule); 913 if (err) 914 goto out; 915 break; 916 case FLOW_ACTION_GOTO: 917 err = sparx5_tc_set_actionset(admin, vrule); 918 if (err) 919 goto out; 920 /* Links between VCAPs will be added later */ 921 break; 922 default: 923 NL_SET_ERR_MSG_MOD(fco->common.extack, 924 "Unsupported TC action"); 925 err = -EOPNOTSUPP; 926 goto out; 927 } 928 } 929 930 err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto, 931 &multi); 932 if (err) { 933 NL_SET_ERR_MSG_MOD(fco->common.extack, 934 "No matching port keyset for filter protocol and keys"); 935 goto out; 936 } 937 938 /* provide the l3 protocol to guide the keyset selection */ 939 err = vcap_val_rule(vrule, l3_proto); 940 if (err) { 941 vcap_set_tc_exterr(fco, vrule); 942 goto out; 943 } 944 err = vcap_add_rule(vrule); 945 if (err) 946 NL_SET_ERR_MSG_MOD(fco->common.extack, 947 "Could not add the filter"); 948 949 if (l3_proto == ETH_P_ALL) 950 err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin, 951 &multi); 952 953 out: 954 vcap_free_rule(vrule); 955 return err; 956 } 957 958 static int sparx5_tc_flower_destroy(struct net_device *ndev, 959 struct flow_cls_offload *fco, 960 struct vcap_admin *admin) 961 { 962 struct sparx5_port *port = netdev_priv(ndev); 963 struct vcap_control *vctrl; 964 int err = -ENOENT, rule_id; 965 966 vctrl = port->sparx5->vcap_ctrl; 967 while (true) { 968 rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie); 969 if (rule_id <= 0) 970 break; 971 err = vcap_del_rule(vctrl, ndev, rule_id); 972 if (err) { 973 pr_err("%s:%d: could not delete rule %d\n", 974 __func__, __LINE__, rule_id); 975 break; 976 } 977 } 978 return err; 979 } 980 981 static int sparx5_tc_flower_stats(struct net_device *ndev, 982 struct flow_cls_offload *fco, 983 struct vcap_admin *admin) 984 { 985 struct sparx5_port *port = netdev_priv(ndev); 986 struct vcap_counter ctr = {}; 987 struct vcap_control *vctrl; 988 ulong lastused = 0; 989 int err; 990 991 vctrl = port->sparx5->vcap_ctrl; 992 err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie); 993 if (err) 994 return err; 995 flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused, 996 FLOW_ACTION_HW_STATS_IMMEDIATE); 997 return err; 998 } 999 1000 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco, 1001 bool ingress) 1002 { 1003 struct sparx5_port *port = netdev_priv(ndev); 1004 struct vcap_control *vctrl; 1005 struct vcap_admin *admin; 1006 int err = -EINVAL; 1007 1008 /* Get vcap instance from the chain id */ 1009 vctrl = port->sparx5->vcap_ctrl; 1010 admin = vcap_find_admin(vctrl, fco->common.chain_index); 1011 if (!admin) { 1012 NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain"); 1013 return err; 1014 } 1015 1016 switch (fco->command) { 1017 case FLOW_CLS_REPLACE: 1018 return sparx5_tc_flower_replace(ndev, fco, admin); 1019 case FLOW_CLS_DESTROY: 1020 return sparx5_tc_flower_destroy(ndev, fco, admin); 1021 case FLOW_CLS_STATS: 1022 return sparx5_tc_flower_stats(ndev, fco, admin); 1023 default: 1024 return -EOPNOTSUPP; 1025 } 1026 } 1027