1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Microchip VCAP API 3 * 4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries. 5 */ 6 7 #include <net/tcp.h> 8 9 #include "sparx5_tc.h" 10 #include "vcap_api.h" 11 #include "vcap_api_client.h" 12 #include "sparx5_main.h" 13 #include "sparx5_vcap_impl.h" 14 15 #define SPX5_MAX_RULE_SIZE 13 /* allows X1, X2, X4, X6 and X12 rules */ 16 17 /* Collect keysets and type ids for multiple rules per size */ 18 struct sparx5_wildcard_rule { 19 bool selected; 20 u8 value; 21 u8 mask; 22 enum vcap_keyfield_set keyset; 23 }; 24 25 struct sparx5_multiple_rules { 26 struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE]; 27 }; 28 29 struct sparx5_tc_flower_parse_usage { 30 struct flow_cls_offload *fco; 31 struct flow_rule *frule; 32 struct vcap_rule *vrule; 33 struct vcap_admin *admin; 34 u16 l3_proto; 35 u8 l4_proto; 36 unsigned int used_keys; 37 }; 38 39 enum sparx5_is2_arp_opcode { 40 SPX5_IS2_ARP_REQUEST, 41 SPX5_IS2_ARP_REPLY, 42 SPX5_IS2_RARP_REQUEST, 43 SPX5_IS2_RARP_REPLY, 44 }; 45 46 enum tc_arp_opcode { 47 TC_ARP_OP_RESERVED, 48 TC_ARP_OP_REQUEST, 49 TC_ARP_OP_REPLY, 50 }; 51 52 static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st) 53 { 54 enum vcap_key_field smac_key = VCAP_KF_L2_SMAC; 55 enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC; 56 struct flow_match_eth_addrs match; 57 struct vcap_u48_key smac, dmac; 58 int err = 0; 59 60 flow_rule_match_eth_addrs(st->frule, &match); 61 62 if (!is_zero_ether_addr(match.mask->src)) { 63 vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN); 64 vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN); 65 err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac); 66 if (err) 67 goto out; 68 } 69 70 if (!is_zero_ether_addr(match.mask->dst)) { 71 vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN); 72 vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN); 73 err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac); 74 if (err) 75 goto out; 76 } 77 78 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS); 79 80 return err; 81 82 out: 83 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error"); 84 return err; 85 } 86 87 static int 88 sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st) 89 { 90 int err = 0; 91 92 if (st->l3_proto == ETH_P_IP) { 93 struct flow_match_ipv4_addrs mt; 94 95 flow_rule_match_ipv4_addrs(st->frule, &mt); 96 if (mt.mask->src) { 97 err = vcap_rule_add_key_u32(st->vrule, 98 VCAP_KF_L3_IP4_SIP, 99 be32_to_cpu(mt.key->src), 100 be32_to_cpu(mt.mask->src)); 101 if (err) 102 goto out; 103 } 104 if (mt.mask->dst) { 105 err = vcap_rule_add_key_u32(st->vrule, 106 VCAP_KF_L3_IP4_DIP, 107 be32_to_cpu(mt.key->dst), 108 be32_to_cpu(mt.mask->dst)); 109 if (err) 110 goto out; 111 } 112 } 113 114 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS); 115 116 return err; 117 118 out: 119 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error"); 120 return err; 121 } 122 123 static int 124 sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st) 125 { 126 int err = 0; 127 128 if (st->l3_proto == ETH_P_IPV6) { 129 struct flow_match_ipv6_addrs mt; 130 struct vcap_u128_key sip; 131 struct vcap_u128_key dip; 132 133 flow_rule_match_ipv6_addrs(st->frule, &mt); 134 /* Check if address masks are non-zero */ 135 if (!ipv6_addr_any(&mt.mask->src)) { 136 vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16); 137 vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16); 138 err = vcap_rule_add_key_u128(st->vrule, 139 VCAP_KF_L3_IP6_SIP, &sip); 140 if (err) 141 goto out; 142 } 143 if (!ipv6_addr_any(&mt.mask->dst)) { 144 vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16); 145 vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16); 146 err = vcap_rule_add_key_u128(st->vrule, 147 VCAP_KF_L3_IP6_DIP, &dip); 148 if (err) 149 goto out; 150 } 151 } 152 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS); 153 return err; 154 out: 155 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error"); 156 return err; 157 } 158 159 static int 160 sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st) 161 { 162 struct flow_match_control mt; 163 u32 value, mask; 164 int err = 0; 165 166 flow_rule_match_control(st->frule, &mt); 167 168 if (mt.mask->flags) { 169 if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) { 170 if (mt.key->flags & FLOW_DIS_FIRST_FRAG) { 171 value = 1; /* initial fragment */ 172 mask = 0x3; 173 } else { 174 if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) { 175 value = 3; /* follow up fragment */ 176 mask = 0x3; 177 } else { 178 value = 0; /* no fragment */ 179 mask = 0x3; 180 } 181 } 182 } else { 183 if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) { 184 value = 3; /* follow up fragment */ 185 mask = 0x3; 186 } else { 187 value = 0; /* no fragment */ 188 mask = 0x3; 189 } 190 } 191 192 err = vcap_rule_add_key_u32(st->vrule, 193 VCAP_KF_L3_FRAGMENT_TYPE, 194 value, mask); 195 if (err) 196 goto out; 197 } 198 199 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL); 200 201 return err; 202 203 out: 204 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error"); 205 return err; 206 } 207 208 static int 209 sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st) 210 { 211 struct flow_match_ports mt; 212 u16 value, mask; 213 int err = 0; 214 215 flow_rule_match_ports(st->frule, &mt); 216 217 if (mt.mask->src) { 218 value = be16_to_cpu(mt.key->src); 219 mask = be16_to_cpu(mt.mask->src); 220 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value, 221 mask); 222 if (err) 223 goto out; 224 } 225 226 if (mt.mask->dst) { 227 value = be16_to_cpu(mt.key->dst); 228 mask = be16_to_cpu(mt.mask->dst); 229 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value, 230 mask); 231 if (err) 232 goto out; 233 } 234 235 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS); 236 237 return err; 238 239 out: 240 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error"); 241 return err; 242 } 243 244 static int 245 sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st) 246 { 247 struct flow_match_basic mt; 248 int err = 0; 249 250 flow_rule_match_basic(st->frule, &mt); 251 252 if (mt.mask->n_proto) { 253 st->l3_proto = be16_to_cpu(mt.key->n_proto); 254 if (!sparx5_vcap_is_known_etype(st->admin, st->l3_proto)) { 255 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE, 256 st->l3_proto, ~0); 257 if (err) 258 goto out; 259 } else if (st->l3_proto == ETH_P_IP) { 260 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS, 261 VCAP_BIT_1); 262 if (err) 263 goto out; 264 } else if (st->l3_proto == ETH_P_IPV6) { 265 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS, 266 VCAP_BIT_0); 267 if (err) 268 goto out; 269 if (st->admin->vtype == VCAP_TYPE_IS0) { 270 err = vcap_rule_add_key_bit(st->vrule, 271 VCAP_KF_IP_SNAP_IS, 272 VCAP_BIT_1); 273 if (err) 274 goto out; 275 } 276 277 } 278 } 279 280 if (mt.mask->ip_proto) { 281 st->l4_proto = mt.key->ip_proto; 282 if (st->l4_proto == IPPROTO_TCP) { 283 err = vcap_rule_add_key_bit(st->vrule, 284 VCAP_KF_TCP_IS, 285 VCAP_BIT_1); 286 if (err) 287 goto out; 288 } else if (st->l4_proto == IPPROTO_UDP) { 289 err = vcap_rule_add_key_bit(st->vrule, 290 VCAP_KF_TCP_IS, 291 VCAP_BIT_0); 292 if (err) 293 goto out; 294 if (st->admin->vtype == VCAP_TYPE_IS0) { 295 err = vcap_rule_add_key_bit(st->vrule, 296 VCAP_KF_TCP_UDP_IS, 297 VCAP_BIT_1); 298 if (err) 299 goto out; 300 } 301 } else { 302 err = vcap_rule_add_key_u32(st->vrule, 303 VCAP_KF_L3_IP_PROTO, 304 st->l4_proto, ~0); 305 if (err) 306 goto out; 307 } 308 } 309 310 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC); 311 312 return err; 313 314 out: 315 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error"); 316 return err; 317 } 318 319 static int 320 sparx5_tc_flower_handler_cvlan_usage(struct sparx5_tc_flower_parse_usage *st) 321 { 322 enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0; 323 enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0; 324 struct flow_match_vlan mt; 325 u16 tpid; 326 int err; 327 328 if (st->admin->vtype != VCAP_TYPE_IS0) { 329 NL_SET_ERR_MSG_MOD(st->fco->common.extack, 330 "cvlan not supported in this VCAP"); 331 return -EINVAL; 332 } 333 334 flow_rule_match_cvlan(st->frule, &mt); 335 336 tpid = be16_to_cpu(mt.key->vlan_tpid); 337 338 if (tpid == ETH_P_8021Q) { 339 vid_key = VCAP_KF_8021Q_VID1; 340 pcp_key = VCAP_KF_8021Q_PCP1; 341 } 342 343 if (mt.mask->vlan_id) { 344 err = vcap_rule_add_key_u32(st->vrule, vid_key, 345 mt.key->vlan_id, 346 mt.mask->vlan_id); 347 if (err) 348 goto out; 349 } 350 351 if (mt.mask->vlan_priority) { 352 err = vcap_rule_add_key_u32(st->vrule, pcp_key, 353 mt.key->vlan_priority, 354 mt.mask->vlan_priority); 355 if (err) 356 goto out; 357 } 358 359 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN); 360 361 return 0; 362 out: 363 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error"); 364 return err; 365 } 366 367 static int 368 sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st) 369 { 370 enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS; 371 enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS; 372 struct flow_match_vlan mt; 373 int err; 374 375 flow_rule_match_vlan(st->frule, &mt); 376 377 if (st->admin->vtype == VCAP_TYPE_IS0) { 378 vid_key = VCAP_KF_8021Q_VID0; 379 pcp_key = VCAP_KF_8021Q_PCP0; 380 } 381 382 if (mt.mask->vlan_id) { 383 err = vcap_rule_add_key_u32(st->vrule, vid_key, 384 mt.key->vlan_id, 385 mt.mask->vlan_id); 386 if (err) 387 goto out; 388 } 389 390 if (mt.mask->vlan_priority) { 391 err = vcap_rule_add_key_u32(st->vrule, pcp_key, 392 mt.key->vlan_priority, 393 mt.mask->vlan_priority); 394 if (err) 395 goto out; 396 } 397 398 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN); 399 400 return 0; 401 out: 402 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error"); 403 return err; 404 } 405 406 static int 407 sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st) 408 { 409 struct flow_match_tcp mt; 410 u16 tcp_flags_mask; 411 u16 tcp_flags_key; 412 enum vcap_bit val; 413 int err = 0; 414 415 flow_rule_match_tcp(st->frule, &mt); 416 tcp_flags_key = be16_to_cpu(mt.key->flags); 417 tcp_flags_mask = be16_to_cpu(mt.mask->flags); 418 419 if (tcp_flags_mask & TCPHDR_FIN) { 420 val = VCAP_BIT_0; 421 if (tcp_flags_key & TCPHDR_FIN) 422 val = VCAP_BIT_1; 423 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val); 424 if (err) 425 goto out; 426 } 427 428 if (tcp_flags_mask & TCPHDR_SYN) { 429 val = VCAP_BIT_0; 430 if (tcp_flags_key & TCPHDR_SYN) 431 val = VCAP_BIT_1; 432 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val); 433 if (err) 434 goto out; 435 } 436 437 if (tcp_flags_mask & TCPHDR_RST) { 438 val = VCAP_BIT_0; 439 if (tcp_flags_key & TCPHDR_RST) 440 val = VCAP_BIT_1; 441 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val); 442 if (err) 443 goto out; 444 } 445 446 if (tcp_flags_mask & TCPHDR_PSH) { 447 val = VCAP_BIT_0; 448 if (tcp_flags_key & TCPHDR_PSH) 449 val = VCAP_BIT_1; 450 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val); 451 if (err) 452 goto out; 453 } 454 455 if (tcp_flags_mask & TCPHDR_ACK) { 456 val = VCAP_BIT_0; 457 if (tcp_flags_key & TCPHDR_ACK) 458 val = VCAP_BIT_1; 459 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val); 460 if (err) 461 goto out; 462 } 463 464 if (tcp_flags_mask & TCPHDR_URG) { 465 val = VCAP_BIT_0; 466 if (tcp_flags_key & TCPHDR_URG) 467 val = VCAP_BIT_1; 468 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val); 469 if (err) 470 goto out; 471 } 472 473 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP); 474 475 return err; 476 477 out: 478 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error"); 479 return err; 480 } 481 482 static int 483 sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st) 484 { 485 struct flow_match_arp mt; 486 u16 value, mask; 487 u32 ipval, ipmsk; 488 int err; 489 490 flow_rule_match_arp(st->frule, &mt); 491 492 if (mt.mask->op) { 493 mask = 0x3; 494 if (st->l3_proto == ETH_P_ARP) { 495 value = mt.key->op == TC_ARP_OP_REQUEST ? 496 SPX5_IS2_ARP_REQUEST : 497 SPX5_IS2_ARP_REPLY; 498 } else { /* RARP */ 499 value = mt.key->op == TC_ARP_OP_REQUEST ? 500 SPX5_IS2_RARP_REQUEST : 501 SPX5_IS2_RARP_REPLY; 502 } 503 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE, 504 value, mask); 505 if (err) 506 goto out; 507 } 508 509 /* The IS2 ARP keyset does not support ARP hardware addresses */ 510 if (!is_zero_ether_addr(mt.mask->sha) || 511 !is_zero_ether_addr(mt.mask->tha)) { 512 err = -EINVAL; 513 goto out; 514 } 515 516 if (mt.mask->sip) { 517 ipval = be32_to_cpu((__force __be32)mt.key->sip); 518 ipmsk = be32_to_cpu((__force __be32)mt.mask->sip); 519 520 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP, 521 ipval, ipmsk); 522 if (err) 523 goto out; 524 } 525 526 if (mt.mask->tip) { 527 ipval = be32_to_cpu((__force __be32)mt.key->tip); 528 ipmsk = be32_to_cpu((__force __be32)mt.mask->tip); 529 530 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP, 531 ipval, ipmsk); 532 if (err) 533 goto out; 534 } 535 536 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP); 537 538 return 0; 539 540 out: 541 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error"); 542 return err; 543 } 544 545 static int 546 sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st) 547 { 548 struct flow_match_ip mt; 549 int err = 0; 550 551 flow_rule_match_ip(st->frule, &mt); 552 553 if (mt.mask->tos) { 554 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS, 555 mt.key->tos, 556 mt.mask->tos); 557 if (err) 558 goto out; 559 } 560 561 st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP); 562 563 return err; 564 565 out: 566 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error"); 567 return err; 568 } 569 570 static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = { 571 [FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage, 572 [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage, 573 [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage, 574 [FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage, 575 [FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage, 576 [FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage, 577 [FLOW_DISSECTOR_KEY_CVLAN] = sparx5_tc_flower_handler_cvlan_usage, 578 [FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage, 579 [FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage, 580 [FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage, 581 [FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage, 582 }; 583 584 static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco, 585 struct vcap_admin *admin, 586 struct vcap_rule *vrule, 587 u16 *l3_proto) 588 { 589 struct sparx5_tc_flower_parse_usage state = { 590 .fco = fco, 591 .vrule = vrule, 592 .l3_proto = ETH_P_ALL, 593 .admin = admin, 594 }; 595 int idx, err = 0; 596 597 state.frule = flow_cls_offload_flow_rule(fco); 598 for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) { 599 if (!flow_rule_match_key(state.frule, idx)) 600 continue; 601 if (!sparx5_tc_flower_usage_handlers[idx]) 602 continue; 603 err = sparx5_tc_flower_usage_handlers[idx](&state); 604 if (err) 605 return err; 606 } 607 608 if (state.frule->match.dissector->used_keys ^ state.used_keys) { 609 NL_SET_ERR_MSG_MOD(fco->common.extack, 610 "Unsupported match item"); 611 return -ENOENT; 612 } 613 614 if (l3_proto) 615 *l3_proto = state.l3_proto; 616 return err; 617 } 618 619 static int sparx5_tc_flower_action_check(struct vcap_control *vctrl, 620 struct net_device *ndev, 621 struct flow_cls_offload *fco) 622 { 623 struct flow_rule *rule = flow_cls_offload_flow_rule(fco); 624 struct flow_action_entry *actent, *last_actent = NULL; 625 struct flow_action *act = &rule->action; 626 u64 action_mask = 0; 627 int idx; 628 629 if (!flow_action_has_entries(act)) { 630 NL_SET_ERR_MSG_MOD(fco->common.extack, "No actions"); 631 return -EINVAL; 632 } 633 634 if (!flow_action_basic_hw_stats_check(act, fco->common.extack)) 635 return -EOPNOTSUPP; 636 637 flow_action_for_each(idx, actent, act) { 638 if (action_mask & BIT(actent->id)) { 639 NL_SET_ERR_MSG_MOD(fco->common.extack, 640 "More actions of the same type"); 641 return -EINVAL; 642 } 643 action_mask |= BIT(actent->id); 644 last_actent = actent; /* Save last action for later check */ 645 } 646 647 /* Check if last action is a goto 648 * The last chain/lookup does not need to have a goto action 649 */ 650 if (last_actent->id == FLOW_ACTION_GOTO) { 651 /* Check if the destination chain is in one of the VCAPs */ 652 if (!vcap_is_next_lookup(vctrl, fco->common.chain_index, 653 last_actent->chain_index)) { 654 NL_SET_ERR_MSG_MOD(fco->common.extack, 655 "Invalid goto chain"); 656 return -EINVAL; 657 } 658 } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index)) { 659 NL_SET_ERR_MSG_MOD(fco->common.extack, 660 "Last action must be 'goto'"); 661 return -EINVAL; 662 } 663 664 /* Catch unsupported combinations of actions */ 665 if (action_mask & BIT(FLOW_ACTION_TRAP) && 666 action_mask & BIT(FLOW_ACTION_ACCEPT)) { 667 NL_SET_ERR_MSG_MOD(fco->common.extack, 668 "Cannot combine pass and trap action"); 669 return -EOPNOTSUPP; 670 } 671 672 return 0; 673 } 674 675 /* Add a rule counter action */ 676 static int sparx5_tc_add_rule_counter(struct vcap_admin *admin, 677 struct vcap_rule *vrule) 678 { 679 int err; 680 681 if (admin->vtype == VCAP_TYPE_IS2) { 682 err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID, 683 vrule->id); 684 if (err) 685 return err; 686 vcap_rule_set_counter_id(vrule, vrule->id); 687 } 688 689 return 0; 690 } 691 692 /* Collect all port keysets and apply the first of them, possibly wildcarded */ 693 static int sparx5_tc_select_protocol_keyset(struct net_device *ndev, 694 struct vcap_rule *vrule, 695 struct vcap_admin *admin, 696 u16 l3_proto, 697 struct sparx5_multiple_rules *multi) 698 { 699 struct sparx5_port *port = netdev_priv(ndev); 700 struct vcap_keyset_list portkeysetlist = {}; 701 enum vcap_keyfield_set portkeysets[10] = {}; 702 struct vcap_keyset_list matches = {}; 703 enum vcap_keyfield_set keysets[10]; 704 int idx, jdx, err = 0, count = 0; 705 struct sparx5_wildcard_rule *mru; 706 const struct vcap_set *kinfo; 707 struct vcap_control *vctrl; 708 709 vctrl = port->sparx5->vcap_ctrl; 710 711 /* Find the keysets that the rule can use */ 712 matches.keysets = keysets; 713 matches.max = ARRAY_SIZE(keysets); 714 if (vcap_rule_find_keysets(vrule, &matches) == 0) 715 return -EINVAL; 716 717 /* Find the keysets that the port configuration supports */ 718 portkeysetlist.max = ARRAY_SIZE(portkeysets); 719 portkeysetlist.keysets = portkeysets; 720 err = sparx5_vcap_get_port_keyset(ndev, 721 admin, vrule->vcap_chain_id, 722 l3_proto, 723 &portkeysetlist); 724 if (err) 725 return err; 726 727 /* Find the intersection of the two sets of keyset */ 728 for (idx = 0; idx < portkeysetlist.cnt; ++idx) { 729 kinfo = vcap_keyfieldset(vctrl, admin->vtype, 730 portkeysetlist.keysets[idx]); 731 if (!kinfo) 732 continue; 733 734 /* Find a port keyset that matches the required keys 735 * If there are multiple keysets then compose a type id mask 736 */ 737 for (jdx = 0; jdx < matches.cnt; ++jdx) { 738 if (portkeysetlist.keysets[idx] != matches.keysets[jdx]) 739 continue; 740 741 mru = &multi->rule[kinfo->sw_per_item]; 742 if (!mru->selected) { 743 mru->selected = true; 744 mru->keyset = portkeysetlist.keysets[idx]; 745 mru->value = kinfo->type_id; 746 } 747 mru->value &= kinfo->type_id; 748 mru->mask |= kinfo->type_id; 749 ++count; 750 } 751 } 752 if (count == 0) 753 return -EPROTO; 754 755 if (l3_proto == ETH_P_ALL && count < portkeysetlist.cnt) 756 return -ENOENT; 757 758 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) { 759 mru = &multi->rule[idx]; 760 if (!mru->selected) 761 continue; 762 763 /* Align the mask to the combined value */ 764 mru->mask ^= mru->value; 765 } 766 767 /* Set the chosen keyset on the rule and set a wildcarded type if there 768 * are more than one keyset 769 */ 770 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) { 771 mru = &multi->rule[idx]; 772 if (!mru->selected) 773 continue; 774 775 vcap_set_rule_set_keyset(vrule, mru->keyset); 776 if (count > 1) 777 /* Some keysets do not have a type field */ 778 vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, 779 mru->value, 780 ~mru->mask); 781 mru->selected = false; /* mark as done */ 782 break; /* Stop here and add more rules later */ 783 } 784 return err; 785 } 786 787 static int sparx5_tc_add_rule_copy(struct vcap_control *vctrl, 788 struct flow_cls_offload *fco, 789 struct vcap_rule *erule, 790 struct vcap_admin *admin, 791 struct sparx5_wildcard_rule *rule) 792 { 793 enum vcap_key_field keylist[] = { 794 VCAP_KF_IF_IGR_PORT_MASK, 795 VCAP_KF_IF_IGR_PORT_MASK_SEL, 796 VCAP_KF_IF_IGR_PORT_MASK_RNG, 797 VCAP_KF_LOOKUP_FIRST_IS, 798 VCAP_KF_TYPE, 799 }; 800 struct vcap_rule *vrule; 801 int err; 802 803 /* Add an extra rule with a special user and the new keyset */ 804 erule->user = VCAP_USER_TC_EXTRA; 805 vrule = vcap_copy_rule(erule); 806 if (IS_ERR(vrule)) 807 return PTR_ERR(vrule); 808 809 /* Link the new rule to the existing rule with the cookie */ 810 vrule->cookie = erule->cookie; 811 vcap_filter_rule_keys(vrule, keylist, ARRAY_SIZE(keylist), true); 812 err = vcap_set_rule_set_keyset(vrule, rule->keyset); 813 if (err) { 814 pr_err("%s:%d: could not set keyset %s in rule: %u\n", 815 __func__, __LINE__, 816 vcap_keyset_name(vctrl, rule->keyset), 817 vrule->id); 818 goto out; 819 } 820 821 /* Some keysets do not have a type field, so ignore return value */ 822 vcap_rule_mod_key_u32(vrule, VCAP_KF_TYPE, rule->value, ~rule->mask); 823 824 err = vcap_set_rule_set_actionset(vrule, erule->actionset); 825 if (err) 826 goto out; 827 828 err = sparx5_tc_add_rule_counter(admin, vrule); 829 if (err) 830 goto out; 831 832 err = vcap_val_rule(vrule, ETH_P_ALL); 833 if (err) { 834 pr_err("%s:%d: could not validate rule: %u\n", 835 __func__, __LINE__, vrule->id); 836 vcap_set_tc_exterr(fco, vrule); 837 goto out; 838 } 839 err = vcap_add_rule(vrule); 840 if (err) { 841 pr_err("%s:%d: could not add rule: %u\n", 842 __func__, __LINE__, vrule->id); 843 goto out; 844 } 845 out: 846 vcap_free_rule(vrule); 847 return err; 848 } 849 850 static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl, 851 struct flow_cls_offload *fco, 852 struct vcap_rule *erule, 853 struct vcap_admin *admin, 854 struct sparx5_multiple_rules *multi) 855 { 856 int idx, err = 0; 857 858 for (idx = 0; idx < SPX5_MAX_RULE_SIZE; ++idx) { 859 if (!multi->rule[idx].selected) 860 continue; 861 862 err = sparx5_tc_add_rule_copy(vctrl, fco, erule, admin, 863 &multi->rule[idx]); 864 if (err) 865 break; 866 } 867 return err; 868 } 869 870 /* Add the actionset that is the default for the VCAP type */ 871 static int sparx5_tc_set_actionset(struct vcap_admin *admin, 872 struct vcap_rule *vrule) 873 { 874 enum vcap_actionfield_set aset; 875 int err = 0; 876 877 switch (admin->vtype) { 878 case VCAP_TYPE_IS0: 879 aset = VCAP_AFS_CLASSIFICATION; 880 break; 881 case VCAP_TYPE_IS2: 882 aset = VCAP_AFS_BASE_TYPE; 883 break; 884 default: 885 return -EINVAL; 886 } 887 /* Do not overwrite any current actionset */ 888 if (vrule->actionset == VCAP_AFS_NO_VALUE) 889 err = vcap_set_rule_set_actionset(vrule, aset); 890 return err; 891 } 892 893 /* Add the VCAP key to match on for a rule target value */ 894 static int sparx5_tc_add_rule_link_target(struct vcap_admin *admin, 895 struct vcap_rule *vrule, 896 int target_cid) 897 { 898 int link_val = target_cid % VCAP_CID_LOOKUP_SIZE; 899 int err; 900 901 if (!link_val) 902 return 0; 903 904 switch (admin->vtype) { 905 case VCAP_TYPE_IS0: 906 /* Add NXT_IDX key for chaining rules between IS0 instances */ 907 err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL, 908 1, /* enable */ 909 ~0); 910 if (err) 911 return err; 912 return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX, 913 link_val, /* target */ 914 ~0); 915 case VCAP_TYPE_IS2: 916 /* Add PAG key for chaining rules from IS0 */ 917 return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG, 918 link_val, /* target */ 919 ~0); 920 default: 921 break; 922 } 923 return 0; 924 } 925 926 /* Add the VCAP action that adds a target value to a rule */ 927 static int sparx5_tc_add_rule_link(struct vcap_control *vctrl, 928 struct vcap_admin *admin, 929 struct vcap_rule *vrule, 930 int from_cid, int to_cid) 931 { 932 struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid); 933 int diff, err = 0; 934 935 diff = vcap_chain_offset(vctrl, from_cid, to_cid); 936 if (!(to_admin && diff > 0)) { 937 pr_err("%s:%d: unsupported chain direction: %d\n", 938 __func__, __LINE__, to_cid); 939 return -EINVAL; 940 } 941 if (admin->vtype == VCAP_TYPE_IS0 && 942 to_admin->vtype == VCAP_TYPE_IS0) { 943 /* Between IS0 instances the G_IDX value is used */ 944 err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX, diff); 945 if (err) 946 goto out; 947 err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX_CTRL, 948 1); /* Replace */ 949 if (err) 950 goto out; 951 } else if (admin->vtype == VCAP_TYPE_IS0 && 952 to_admin->vtype == VCAP_TYPE_IS2) { 953 /* Between IS0 and IS2 the PAG value is used */ 954 err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff); 955 if (err) 956 goto out; 957 err = vcap_rule_add_action_u32(vrule, 958 VCAP_AF_PAG_OVERRIDE_MASK, 959 0xff); 960 if (err) 961 goto out; 962 } else { 963 pr_err("%s:%d: unsupported chain destination: %d\n", 964 __func__, __LINE__, to_cid); 965 err = -EOPNOTSUPP; 966 } 967 out: 968 return err; 969 } 970 971 static int sparx5_tc_flower_replace(struct net_device *ndev, 972 struct flow_cls_offload *fco, 973 struct vcap_admin *admin) 974 { 975 struct sparx5_port *port = netdev_priv(ndev); 976 struct sparx5_multiple_rules multi = {}; 977 struct flow_action_entry *act; 978 struct vcap_control *vctrl; 979 struct flow_rule *frule; 980 struct vcap_rule *vrule; 981 u16 l3_proto; 982 int err, idx; 983 984 vctrl = port->sparx5->vcap_ctrl; 985 986 err = sparx5_tc_flower_action_check(vctrl, ndev, fco); 987 if (err) 988 return err; 989 990 vrule = vcap_alloc_rule(vctrl, ndev, fco->common.chain_index, VCAP_USER_TC, 991 fco->common.prio, 0); 992 if (IS_ERR(vrule)) 993 return PTR_ERR(vrule); 994 995 vrule->cookie = fco->cookie; 996 997 l3_proto = ETH_P_ALL; 998 err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto); 999 if (err) 1000 goto out; 1001 1002 err = sparx5_tc_add_rule_counter(admin, vrule); 1003 if (err) 1004 goto out; 1005 1006 err = sparx5_tc_add_rule_link_target(admin, vrule, 1007 fco->common.chain_index); 1008 if (err) 1009 goto out; 1010 1011 frule = flow_cls_offload_flow_rule(fco); 1012 flow_action_for_each(idx, act, &frule->action) { 1013 switch (act->id) { 1014 case FLOW_ACTION_TRAP: 1015 if (admin->vtype != VCAP_TYPE_IS2) { 1016 NL_SET_ERR_MSG_MOD(fco->common.extack, 1017 "Trap action not supported in this VCAP"); 1018 err = -EOPNOTSUPP; 1019 goto out; 1020 } 1021 err = vcap_rule_add_action_bit(vrule, 1022 VCAP_AF_CPU_COPY_ENA, 1023 VCAP_BIT_1); 1024 if (err) 1025 goto out; 1026 err = vcap_rule_add_action_u32(vrule, 1027 VCAP_AF_CPU_QUEUE_NUM, 0); 1028 if (err) 1029 goto out; 1030 err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, 1031 SPX5_PMM_REPLACE_ALL); 1032 if (err) 1033 goto out; 1034 break; 1035 case FLOW_ACTION_ACCEPT: 1036 err = sparx5_tc_set_actionset(admin, vrule); 1037 if (err) 1038 goto out; 1039 break; 1040 case FLOW_ACTION_GOTO: 1041 err = sparx5_tc_set_actionset(admin, vrule); 1042 if (err) 1043 goto out; 1044 sparx5_tc_add_rule_link(vctrl, admin, vrule, 1045 fco->common.chain_index, 1046 act->chain_index); 1047 break; 1048 default: 1049 NL_SET_ERR_MSG_MOD(fco->common.extack, 1050 "Unsupported TC action"); 1051 err = -EOPNOTSUPP; 1052 goto out; 1053 } 1054 } 1055 1056 err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto, 1057 &multi); 1058 if (err) { 1059 NL_SET_ERR_MSG_MOD(fco->common.extack, 1060 "No matching port keyset for filter protocol and keys"); 1061 goto out; 1062 } 1063 1064 /* provide the l3 protocol to guide the keyset selection */ 1065 err = vcap_val_rule(vrule, l3_proto); 1066 if (err) { 1067 vcap_set_tc_exterr(fco, vrule); 1068 goto out; 1069 } 1070 err = vcap_add_rule(vrule); 1071 if (err) 1072 NL_SET_ERR_MSG_MOD(fco->common.extack, 1073 "Could not add the filter"); 1074 1075 if (l3_proto == ETH_P_ALL) 1076 err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin, 1077 &multi); 1078 1079 out: 1080 vcap_free_rule(vrule); 1081 return err; 1082 } 1083 1084 static int sparx5_tc_flower_destroy(struct net_device *ndev, 1085 struct flow_cls_offload *fco, 1086 struct vcap_admin *admin) 1087 { 1088 struct sparx5_port *port = netdev_priv(ndev); 1089 struct vcap_control *vctrl; 1090 int err = -ENOENT, rule_id; 1091 1092 vctrl = port->sparx5->vcap_ctrl; 1093 while (true) { 1094 rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie); 1095 if (rule_id <= 0) 1096 break; 1097 err = vcap_del_rule(vctrl, ndev, rule_id); 1098 if (err) { 1099 pr_err("%s:%d: could not delete rule %d\n", 1100 __func__, __LINE__, rule_id); 1101 break; 1102 } 1103 } 1104 return err; 1105 } 1106 1107 static int sparx5_tc_flower_stats(struct net_device *ndev, 1108 struct flow_cls_offload *fco, 1109 struct vcap_admin *admin) 1110 { 1111 struct sparx5_port *port = netdev_priv(ndev); 1112 struct vcap_counter ctr = {}; 1113 struct vcap_control *vctrl; 1114 ulong lastused = 0; 1115 int err; 1116 1117 vctrl = port->sparx5->vcap_ctrl; 1118 err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie); 1119 if (err) 1120 return err; 1121 flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused, 1122 FLOW_ACTION_HW_STATS_IMMEDIATE); 1123 return err; 1124 } 1125 1126 int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco, 1127 bool ingress) 1128 { 1129 struct sparx5_port *port = netdev_priv(ndev); 1130 struct vcap_control *vctrl; 1131 struct vcap_admin *admin; 1132 int err = -EINVAL; 1133 1134 /* Get vcap instance from the chain id */ 1135 vctrl = port->sparx5->vcap_ctrl; 1136 admin = vcap_find_admin(vctrl, fco->common.chain_index); 1137 if (!admin) { 1138 NL_SET_ERR_MSG_MOD(fco->common.extack, "Invalid chain"); 1139 return err; 1140 } 1141 1142 switch (fco->command) { 1143 case FLOW_CLS_REPLACE: 1144 return sparx5_tc_flower_replace(ndev, fco, admin); 1145 case FLOW_CLS_DESTROY: 1146 return sparx5_tc_flower_destroy(ndev, fco, admin); 1147 case FLOW_CLS_STATS: 1148 return sparx5_tc_flower_stats(ndev, fco, admin); 1149 default: 1150 return -EOPNOTSUPP; 1151 } 1152 } 1153