1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 struct otx2_tc_flow_stats { 31 u64 bytes; 32 u64 pkts; 33 u64 used; 34 }; 35 36 struct otx2_tc_flow { 37 struct list_head list; 38 unsigned long cookie; 39 struct rcu_head rcu; 40 struct otx2_tc_flow_stats stats; 41 spinlock_t lock; /* lock for stats */ 42 u16 rq; 43 u16 entry; 44 u16 leaf_profile; 45 bool is_act_police; 46 u32 prio; 47 struct npc_install_flow_req req; 48 u64 rate; 49 u32 burst; 50 bool is_pps; 51 }; 52 53 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 54 u32 *burst_exp, u32 *burst_mantissa) 55 { 56 int max_burst, max_mantissa; 57 unsigned int tmp; 58 59 if (is_dev_otx2(nic->pdev)) { 60 max_burst = MAX_BURST_SIZE; 61 max_mantissa = MAX_BURST_MANTISSA; 62 } else { 63 max_burst = CN10K_MAX_BURST_SIZE; 64 max_mantissa = CN10K_MAX_BURST_MANTISSA; 65 } 66 67 /* Burst is calculated as 68 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 69 * Max supported burst size is 130,816 bytes. 70 */ 71 burst = min_t(u32, burst, max_burst); 72 if (burst) { 73 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 74 tmp = burst - rounddown_pow_of_two(burst); 75 if (burst < max_mantissa) 76 *burst_mantissa = tmp * 2; 77 else 78 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 79 } else { 80 *burst_exp = MAX_BURST_EXPONENT; 81 *burst_mantissa = max_mantissa; 82 } 83 } 84 85 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 86 u32 *mantissa, u32 *div_exp) 87 { 88 u64 tmp; 89 90 /* Rate calculation by hardware 91 * 92 * PIR_ADD = ((256 + mantissa) << exp) / 256 93 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 94 * The resultant rate is in Mbps. 95 */ 96 97 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 98 * Setting this to '0' will ease the calculation of 99 * exponent and mantissa. 100 */ 101 *div_exp = 0; 102 103 if (maxrate) { 104 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 105 tmp = maxrate - rounddown_pow_of_two(maxrate); 106 if (maxrate < MAX_RATE_MANTISSA) 107 *mantissa = tmp * 2; 108 else 109 *mantissa = tmp / (1ULL << (*exp - 7)); 110 } else { 111 /* Instead of disabling rate limiting, set all values to max */ 112 *exp = MAX_RATE_EXPONENT; 113 *mantissa = MAX_RATE_MANTISSA; 114 } 115 } 116 117 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 118 u64 maxrate, u32 burst) 119 { 120 u32 burst_exp, burst_mantissa; 121 u32 exp, mantissa, div_exp; 122 u64 regval = 0; 123 124 /* Get exponent and mantissa values from the desired rate */ 125 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 126 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 127 128 if (is_dev_otx2(nic->pdev)) { 129 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 130 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 131 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 132 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 133 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 134 } else { 135 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 136 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 137 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 138 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 139 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 140 } 141 142 return regval; 143 } 144 145 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 146 u32 burst, u64 maxrate) 147 { 148 struct otx2_hw *hw = &nic->hw; 149 struct nix_txschq_config *req; 150 int txschq, err; 151 152 /* All SQs share the same TL4, so pick the first scheduler */ 153 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 154 155 mutex_lock(&nic->mbox.lock); 156 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 157 if (!req) { 158 mutex_unlock(&nic->mbox.lock); 159 return -ENOMEM; 160 } 161 162 req->lvl = NIX_TXSCH_LVL_TL4; 163 req->num_regs = 1; 164 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 165 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 166 167 err = otx2_sync_mbox_msg(&nic->mbox); 168 mutex_unlock(&nic->mbox.lock); 169 return err; 170 } 171 172 static int otx2_tc_validate_flow(struct otx2_nic *nic, 173 struct flow_action *actions, 174 struct netlink_ext_ack *extack) 175 { 176 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 177 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 178 return -EINVAL; 179 } 180 181 if (!flow_action_has_entries(actions)) { 182 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 183 return -EINVAL; 184 } 185 186 if (!flow_offload_has_one_action(actions)) { 187 NL_SET_ERR_MSG_MOD(extack, 188 "Egress MATCHALL offload supports only 1 policing action"); 189 return -EINVAL; 190 } 191 return 0; 192 } 193 194 static int otx2_policer_validate(const struct flow_action *action, 195 const struct flow_action_entry *act, 196 struct netlink_ext_ack *extack) 197 { 198 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 199 NL_SET_ERR_MSG_MOD(extack, 200 "Offload not supported when exceed action is not drop"); 201 return -EOPNOTSUPP; 202 } 203 204 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 205 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 206 NL_SET_ERR_MSG_MOD(extack, 207 "Offload not supported when conform action is not pipe or ok"); 208 return -EOPNOTSUPP; 209 } 210 211 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 212 !flow_action_is_last_entry(action, act)) { 213 NL_SET_ERR_MSG_MOD(extack, 214 "Offload not supported when conform action is ok, but action is not last"); 215 return -EOPNOTSUPP; 216 } 217 218 if (act->police.peakrate_bytes_ps || 219 act->police.avrate || act->police.overhead) { 220 NL_SET_ERR_MSG_MOD(extack, 221 "Offload not supported when peakrate/avrate/overhead is configured"); 222 return -EOPNOTSUPP; 223 } 224 225 return 0; 226 } 227 228 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 229 struct tc_cls_matchall_offload *cls) 230 { 231 struct netlink_ext_ack *extack = cls->common.extack; 232 struct flow_action *actions = &cls->rule->action; 233 struct flow_action_entry *entry; 234 int err; 235 236 err = otx2_tc_validate_flow(nic, actions, extack); 237 if (err) 238 return err; 239 240 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 241 NL_SET_ERR_MSG_MOD(extack, 242 "Only one Egress MATCHALL ratelimiter can be offloaded"); 243 return -ENOMEM; 244 } 245 246 entry = &cls->rule->action.entries[0]; 247 switch (entry->id) { 248 case FLOW_ACTION_POLICE: 249 err = otx2_policer_validate(&cls->rule->action, entry, extack); 250 if (err) 251 return err; 252 253 if (entry->police.rate_pkt_ps) { 254 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 255 return -EOPNOTSUPP; 256 } 257 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 258 otx2_convert_rate(entry->police.rate_bytes_ps)); 259 if (err) 260 return err; 261 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 262 break; 263 default: 264 NL_SET_ERR_MSG_MOD(extack, 265 "Only police action is supported with Egress MATCHALL offload"); 266 return -EOPNOTSUPP; 267 } 268 269 return 0; 270 } 271 272 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 273 struct tc_cls_matchall_offload *cls) 274 { 275 struct netlink_ext_ack *extack = cls->common.extack; 276 int err; 277 278 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 279 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 280 return -EINVAL; 281 } 282 283 err = otx2_set_matchall_egress_rate(nic, 0, 0); 284 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 285 return err; 286 } 287 288 static int otx2_tc_act_set_hw_police(struct otx2_nic *nic, 289 struct otx2_tc_flow *node) 290 { 291 int rc; 292 293 mutex_lock(&nic->mbox.lock); 294 295 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 296 if (rc) { 297 mutex_unlock(&nic->mbox.lock); 298 return rc; 299 } 300 301 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, 302 node->burst, node->rate, node->is_pps); 303 if (rc) 304 goto free_leaf; 305 306 rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true); 307 if (rc) 308 goto free_leaf; 309 310 mutex_unlock(&nic->mbox.lock); 311 312 return 0; 313 314 free_leaf: 315 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 316 netdev_err(nic->netdev, 317 "Unable to free leaf bandwidth profile(%d)\n", 318 node->leaf_profile); 319 mutex_unlock(&nic->mbox.lock); 320 return rc; 321 } 322 323 static int otx2_tc_act_set_police(struct otx2_nic *nic, 324 struct otx2_tc_flow *node, 325 struct flow_cls_offload *f, 326 u64 rate, u32 burst, u32 mark, 327 struct npc_install_flow_req *req, bool pps) 328 { 329 struct netlink_ext_ack *extack = f->common.extack; 330 struct otx2_hw *hw = &nic->hw; 331 int rq_idx, rc; 332 333 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 334 if (rq_idx >= hw->rx_queues) { 335 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 336 return -EINVAL; 337 } 338 339 req->match_id = mark & 0xFFFFULL; 340 req->index = rq_idx; 341 req->op = NIX_RX_ACTIONOP_UCAST; 342 343 node->is_act_police = true; 344 node->rq = rq_idx; 345 node->burst = burst; 346 node->rate = rate; 347 node->is_pps = pps; 348 349 rc = otx2_tc_act_set_hw_police(nic, node); 350 if (!rc) 351 set_bit(rq_idx, &nic->rq_bmap); 352 353 return rc; 354 } 355 356 static int otx2_tc_parse_actions(struct otx2_nic *nic, 357 struct flow_action *flow_action, 358 struct npc_install_flow_req *req, 359 struct flow_cls_offload *f, 360 struct otx2_tc_flow *node) 361 { 362 struct netlink_ext_ack *extack = f->common.extack; 363 struct flow_action_entry *act; 364 struct net_device *target; 365 struct otx2_nic *priv; 366 u32 burst, mark = 0; 367 u8 nr_police = 0; 368 bool pps = false; 369 u64 rate; 370 int err; 371 int i; 372 373 if (!flow_action_has_entries(flow_action)) { 374 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 375 return -EINVAL; 376 } 377 378 flow_action_for_each(i, act, flow_action) { 379 switch (act->id) { 380 case FLOW_ACTION_DROP: 381 req->op = NIX_RX_ACTIONOP_DROP; 382 return 0; 383 case FLOW_ACTION_ACCEPT: 384 req->op = NIX_RX_ACTION_DEFAULT; 385 return 0; 386 case FLOW_ACTION_REDIRECT_INGRESS: 387 target = act->dev; 388 priv = netdev_priv(target); 389 /* npc_install_flow_req doesn't support passing a target pcifunc */ 390 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 391 NL_SET_ERR_MSG_MOD(extack, 392 "can't redirect to other pf/vf"); 393 return -EOPNOTSUPP; 394 } 395 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 396 397 /* if op is already set; avoid overwriting the same */ 398 if (!req->op) 399 req->op = NIX_RX_ACTION_DEFAULT; 400 break; 401 402 case FLOW_ACTION_VLAN_POP: 403 req->vtag0_valid = true; 404 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 405 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 406 break; 407 case FLOW_ACTION_POLICE: 408 /* Ingress ratelimiting is not supported on OcteonTx2 */ 409 if (is_dev_otx2(nic->pdev)) { 410 NL_SET_ERR_MSG_MOD(extack, 411 "Ingress policing not supported on this platform"); 412 return -EOPNOTSUPP; 413 } 414 415 err = otx2_policer_validate(flow_action, act, extack); 416 if (err) 417 return err; 418 419 if (act->police.rate_bytes_ps > 0) { 420 rate = act->police.rate_bytes_ps * 8; 421 burst = act->police.burst; 422 } else if (act->police.rate_pkt_ps > 0) { 423 /* The algorithm used to calculate rate 424 * mantissa, exponent values for a given token 425 * rate (token can be byte or packet) requires 426 * token rate to be mutiplied by 8. 427 */ 428 rate = act->police.rate_pkt_ps * 8; 429 burst = act->police.burst_pkt; 430 pps = true; 431 } 432 nr_police++; 433 break; 434 case FLOW_ACTION_MARK: 435 mark = act->mark; 436 break; 437 438 case FLOW_ACTION_RX_QUEUE_MAPPING: 439 req->op = NIX_RX_ACTIONOP_UCAST; 440 req->index = act->rx_queue; 441 break; 442 443 default: 444 return -EOPNOTSUPP; 445 } 446 } 447 448 if (nr_police > 1) { 449 NL_SET_ERR_MSG_MOD(extack, 450 "rate limit police offload requires a single action"); 451 return -EOPNOTSUPP; 452 } 453 454 if (nr_police) 455 return otx2_tc_act_set_police(nic, node, f, rate, burst, 456 mark, req, pps); 457 458 return 0; 459 } 460 461 static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec, 462 struct flow_msg *flow_mask, struct flow_rule *rule, 463 struct npc_install_flow_req *req, bool is_inner) 464 { 465 struct flow_match_vlan match; 466 u16 vlan_tci, vlan_tci_mask; 467 468 if (is_inner) 469 flow_rule_match_cvlan(rule, &match); 470 else 471 flow_rule_match_vlan(rule, &match); 472 473 if (!eth_type_vlan(match.key->vlan_tpid)) { 474 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 475 ntohs(match.key->vlan_tpid)); 476 return -EOPNOTSUPP; 477 } 478 479 if (!match.mask->vlan_id) { 480 struct flow_action_entry *act; 481 int i; 482 483 flow_action_for_each(i, act, &rule->action) { 484 if (act->id == FLOW_ACTION_DROP) { 485 netdev_err(nic->netdev, 486 "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", 487 ntohs(match.key->vlan_tpid), match.key->vlan_id); 488 return -EOPNOTSUPP; 489 } 490 } 491 } 492 493 if (match.mask->vlan_id || 494 match.mask->vlan_dei || 495 match.mask->vlan_priority) { 496 vlan_tci = match.key->vlan_id | 497 match.key->vlan_dei << 12 | 498 match.key->vlan_priority << 13; 499 500 vlan_tci_mask = match.mask->vlan_id | 501 match.mask->vlan_dei << 12 | 502 match.mask->vlan_priority << 13; 503 if (is_inner) { 504 flow_spec->vlan_itci = htons(vlan_tci); 505 flow_mask->vlan_itci = htons(vlan_tci_mask); 506 req->features |= BIT_ULL(NPC_INNER_VID); 507 } else { 508 flow_spec->vlan_tci = htons(vlan_tci); 509 flow_mask->vlan_tci = htons(vlan_tci_mask); 510 req->features |= BIT_ULL(NPC_OUTER_VID); 511 } 512 } 513 514 return 0; 515 } 516 517 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 518 struct flow_cls_offload *f, 519 struct npc_install_flow_req *req) 520 { 521 struct netlink_ext_ack *extack = f->common.extack; 522 struct flow_msg *flow_spec = &req->packet; 523 struct flow_msg *flow_mask = &req->mask; 524 struct flow_dissector *dissector; 525 struct flow_rule *rule; 526 u8 ip_proto = 0; 527 528 rule = flow_cls_offload_flow_rule(f); 529 dissector = rule->match.dissector; 530 531 if ((dissector->used_keys & 532 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 533 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 534 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 535 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 536 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 537 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 538 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 539 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 540 BIT(FLOW_DISSECTOR_KEY_IPSEC) | 541 BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { 542 netdev_info(nic->netdev, "unsupported flow used key 0x%llx", 543 dissector->used_keys); 544 return -EOPNOTSUPP; 545 } 546 547 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 548 struct flow_match_basic match; 549 550 flow_rule_match_basic(rule, &match); 551 552 /* All EtherTypes can be matched, no hw limitation */ 553 flow_spec->etype = match.key->n_proto; 554 flow_mask->etype = match.mask->n_proto; 555 req->features |= BIT_ULL(NPC_ETYPE); 556 557 if (match.mask->ip_proto && 558 (match.key->ip_proto != IPPROTO_TCP && 559 match.key->ip_proto != IPPROTO_UDP && 560 match.key->ip_proto != IPPROTO_SCTP && 561 match.key->ip_proto != IPPROTO_ICMP && 562 match.key->ip_proto != IPPROTO_ESP && 563 match.key->ip_proto != IPPROTO_AH && 564 match.key->ip_proto != IPPROTO_ICMPV6)) { 565 netdev_info(nic->netdev, 566 "ip_proto=0x%x not supported\n", 567 match.key->ip_proto); 568 return -EOPNOTSUPP; 569 } 570 if (match.mask->ip_proto) 571 ip_proto = match.key->ip_proto; 572 573 if (ip_proto == IPPROTO_UDP) 574 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 575 else if (ip_proto == IPPROTO_TCP) 576 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 577 else if (ip_proto == IPPROTO_SCTP) 578 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 579 else if (ip_proto == IPPROTO_ICMP) 580 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 581 else if (ip_proto == IPPROTO_ICMPV6) 582 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 583 else if (ip_proto == IPPROTO_ESP) 584 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 585 else if (ip_proto == IPPROTO_AH) 586 req->features |= BIT_ULL(NPC_IPPROTO_AH); 587 } 588 589 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 590 struct flow_match_control match; 591 u32 val; 592 593 flow_rule_match_control(rule, &match); 594 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 595 NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); 596 return -EOPNOTSUPP; 597 } 598 599 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 600 val = match.key->flags & FLOW_DIS_IS_FRAGMENT; 601 if (ntohs(flow_spec->etype) == ETH_P_IP) { 602 flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0; 603 flow_mask->ip_flag = IPV4_FLAG_MORE; 604 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 605 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 606 flow_spec->next_header = val ? 607 IPPROTO_FRAGMENT : 0; 608 flow_mask->next_header = 0xff; 609 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 610 } else { 611 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 612 return -EOPNOTSUPP; 613 } 614 } 615 } 616 617 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 618 struct flow_match_eth_addrs match; 619 620 flow_rule_match_eth_addrs(rule, &match); 621 if (!is_zero_ether_addr(match.mask->src)) { 622 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 623 return -EOPNOTSUPP; 624 } 625 626 if (!is_zero_ether_addr(match.mask->dst)) { 627 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 628 ether_addr_copy(flow_mask->dmac, 629 (u8 *)&match.mask->dst); 630 req->features |= BIT_ULL(NPC_DMAC); 631 } 632 } 633 634 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { 635 struct flow_match_ipsec match; 636 637 flow_rule_match_ipsec(rule, &match); 638 if (!match.mask->spi) { 639 NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); 640 return -EOPNOTSUPP; 641 } 642 if (ip_proto != IPPROTO_ESP && 643 ip_proto != IPPROTO_AH) { 644 NL_SET_ERR_MSG_MOD(extack, 645 "SPI index is valid only for ESP/AH proto"); 646 return -EOPNOTSUPP; 647 } 648 649 flow_spec->spi = match.key->spi; 650 flow_mask->spi = match.mask->spi; 651 req->features |= BIT_ULL(NPC_IPSEC_SPI); 652 } 653 654 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 655 struct flow_match_ip match; 656 657 flow_rule_match_ip(rule, &match); 658 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 659 match.mask->tos) { 660 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 661 return -EOPNOTSUPP; 662 } 663 if (match.mask->ttl) { 664 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 665 return -EOPNOTSUPP; 666 } 667 flow_spec->tos = match.key->tos; 668 flow_mask->tos = match.mask->tos; 669 req->features |= BIT_ULL(NPC_TOS); 670 } 671 672 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 673 int ret; 674 675 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false); 676 if (ret) 677 return ret; 678 } 679 680 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 681 int ret; 682 683 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true); 684 if (ret) 685 return ret; 686 } 687 688 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 689 struct flow_match_ipv4_addrs match; 690 691 flow_rule_match_ipv4_addrs(rule, &match); 692 693 flow_spec->ip4dst = match.key->dst; 694 flow_mask->ip4dst = match.mask->dst; 695 req->features |= BIT_ULL(NPC_DIP_IPV4); 696 697 flow_spec->ip4src = match.key->src; 698 flow_mask->ip4src = match.mask->src; 699 req->features |= BIT_ULL(NPC_SIP_IPV4); 700 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 701 struct flow_match_ipv6_addrs match; 702 703 flow_rule_match_ipv6_addrs(rule, &match); 704 705 if (ipv6_addr_loopback(&match.key->dst) || 706 ipv6_addr_loopback(&match.key->src)) { 707 NL_SET_ERR_MSG_MOD(extack, 708 "Flow matching IPv6 loopback addr not supported"); 709 return -EOPNOTSUPP; 710 } 711 712 if (!ipv6_addr_any(&match.mask->dst)) { 713 memcpy(&flow_spec->ip6dst, 714 (struct in6_addr *)&match.key->dst, 715 sizeof(flow_spec->ip6dst)); 716 memcpy(&flow_mask->ip6dst, 717 (struct in6_addr *)&match.mask->dst, 718 sizeof(flow_spec->ip6dst)); 719 req->features |= BIT_ULL(NPC_DIP_IPV6); 720 } 721 722 if (!ipv6_addr_any(&match.mask->src)) { 723 memcpy(&flow_spec->ip6src, 724 (struct in6_addr *)&match.key->src, 725 sizeof(flow_spec->ip6src)); 726 memcpy(&flow_mask->ip6src, 727 (struct in6_addr *)&match.mask->src, 728 sizeof(flow_spec->ip6src)); 729 req->features |= BIT_ULL(NPC_SIP_IPV6); 730 } 731 } 732 733 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 734 struct flow_match_ports match; 735 736 flow_rule_match_ports(rule, &match); 737 738 flow_spec->dport = match.key->dst; 739 flow_mask->dport = match.mask->dst; 740 741 if (flow_mask->dport) { 742 if (ip_proto == IPPROTO_UDP) 743 req->features |= BIT_ULL(NPC_DPORT_UDP); 744 else if (ip_proto == IPPROTO_TCP) 745 req->features |= BIT_ULL(NPC_DPORT_TCP); 746 else if (ip_proto == IPPROTO_SCTP) 747 req->features |= BIT_ULL(NPC_DPORT_SCTP); 748 } 749 750 flow_spec->sport = match.key->src; 751 flow_mask->sport = match.mask->src; 752 753 if (flow_mask->sport) { 754 if (ip_proto == IPPROTO_UDP) 755 req->features |= BIT_ULL(NPC_SPORT_UDP); 756 else if (ip_proto == IPPROTO_TCP) 757 req->features |= BIT_ULL(NPC_SPORT_TCP); 758 else if (ip_proto == IPPROTO_SCTP) 759 req->features |= BIT_ULL(NPC_SPORT_SCTP); 760 } 761 } 762 763 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 764 } 765 766 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) 767 { 768 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 769 struct otx2_tc_flow *iter, *tmp; 770 771 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) 772 return; 773 774 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { 775 list_del(&iter->list); 776 kfree(iter); 777 flow_cfg->nr_flows--; 778 } 779 } 780 781 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, 782 unsigned long cookie) 783 { 784 struct otx2_tc_flow *tmp; 785 786 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 787 if (tmp->cookie == cookie) 788 return tmp; 789 } 790 791 return NULL; 792 } 793 794 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, 795 int index) 796 { 797 struct otx2_tc_flow *tmp; 798 int i = 0; 799 800 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 801 if (i == index) 802 return tmp; 803 i++; 804 } 805 806 return NULL; 807 } 808 809 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, 810 struct otx2_tc_flow *node) 811 { 812 struct list_head *pos, *n; 813 struct otx2_tc_flow *tmp; 814 815 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 816 tmp = list_entry(pos, struct otx2_tc_flow, list); 817 if (node == tmp) { 818 list_del(&node->list); 819 return; 820 } 821 } 822 } 823 824 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, 825 struct otx2_tc_flow *node) 826 { 827 struct list_head *pos, *n; 828 struct otx2_tc_flow *tmp; 829 int index = 0; 830 831 /* If the flow list is empty then add the new node */ 832 if (list_empty(&flow_cfg->flow_list_tc)) { 833 list_add(&node->list, &flow_cfg->flow_list_tc); 834 return index; 835 } 836 837 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 838 tmp = list_entry(pos, struct otx2_tc_flow, list); 839 if (node->prio < tmp->prio) 840 break; 841 index++; 842 } 843 844 list_add(&node->list, pos->prev); 845 return index; 846 } 847 848 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) 849 { 850 struct npc_install_flow_req *tmp_req; 851 int err; 852 853 mutex_lock(&nic->mbox.lock); 854 tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 855 if (!tmp_req) { 856 mutex_unlock(&nic->mbox.lock); 857 return -ENOMEM; 858 } 859 860 memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); 861 /* Send message to AF */ 862 err = otx2_sync_mbox_msg(&nic->mbox); 863 if (err) { 864 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", 865 req->entry); 866 mutex_unlock(&nic->mbox.lock); 867 return -EFAULT; 868 } 869 870 mutex_unlock(&nic->mbox.lock); 871 return 0; 872 } 873 874 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) 875 { 876 struct npc_delete_flow_rsp *rsp; 877 struct npc_delete_flow_req *req; 878 int err; 879 880 mutex_lock(&nic->mbox.lock); 881 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 882 if (!req) { 883 mutex_unlock(&nic->mbox.lock); 884 return -ENOMEM; 885 } 886 887 req->entry = entry; 888 889 /* Send message to AF */ 890 err = otx2_sync_mbox_msg(&nic->mbox); 891 if (err) { 892 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 893 entry); 894 mutex_unlock(&nic->mbox.lock); 895 return -EFAULT; 896 } 897 898 if (cntr_val) { 899 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 900 0, &req->hdr); 901 if (IS_ERR(rsp)) { 902 netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", 903 entry); 904 mutex_unlock(&nic->mbox.lock); 905 return -EFAULT; 906 } 907 908 *cntr_val = rsp->cntr_val; 909 } 910 911 mutex_unlock(&nic->mbox.lock); 912 return 0; 913 } 914 915 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, 916 struct otx2_flow_config *flow_cfg, 917 struct otx2_tc_flow *node) 918 { 919 struct list_head *pos, *n; 920 struct otx2_tc_flow *tmp; 921 int i = 0, index = 0; 922 u16 cntr_val = 0; 923 924 /* Find and delete the entry from the list and re-install 925 * all the entries from beginning to the index of the 926 * deleted entry to higher mcam indexes. 927 */ 928 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 929 tmp = list_entry(pos, struct otx2_tc_flow, list); 930 if (node == tmp) { 931 list_del(&tmp->list); 932 break; 933 } 934 935 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 936 tmp->entry++; 937 tmp->req.entry = tmp->entry; 938 tmp->req.cntr_val = cntr_val; 939 index++; 940 } 941 942 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 943 if (i == index) 944 break; 945 946 tmp = list_entry(pos, struct otx2_tc_flow, list); 947 otx2_add_mcam_flow_entry(nic, &tmp->req); 948 i++; 949 } 950 951 return 0; 952 } 953 954 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, 955 struct otx2_flow_config *flow_cfg, 956 struct otx2_tc_flow *node) 957 { 958 int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; 959 struct otx2_tc_flow *tmp; 960 int list_idx, i; 961 u16 cntr_val = 0; 962 963 /* Find the index of the entry(list_idx) whose priority 964 * is greater than the new entry and re-install all 965 * the entries from beginning to list_idx to higher 966 * mcam indexes. 967 */ 968 list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); 969 for (i = 0; i < list_idx; i++) { 970 tmp = otx2_tc_get_entry_by_index(flow_cfg, i); 971 if (!tmp) 972 return -ENOMEM; 973 974 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 975 tmp->entry = flow_cfg->flow_ent[mcam_idx]; 976 tmp->req.entry = tmp->entry; 977 tmp->req.cntr_val = cntr_val; 978 otx2_add_mcam_flow_entry(nic, &tmp->req); 979 mcam_idx++; 980 } 981 982 return mcam_idx; 983 } 984 985 static int otx2_tc_update_mcam_table(struct otx2_nic *nic, 986 struct otx2_flow_config *flow_cfg, 987 struct otx2_tc_flow *node, 988 bool add_req) 989 { 990 if (add_req) 991 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); 992 993 return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); 994 } 995 996 static int otx2_tc_del_flow(struct otx2_nic *nic, 997 struct flow_cls_offload *tc_flow_cmd) 998 { 999 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1000 struct otx2_tc_flow *flow_node; 1001 int err; 1002 1003 flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1004 if (!flow_node) { 1005 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 1006 tc_flow_cmd->cookie); 1007 return -EINVAL; 1008 } 1009 1010 if (flow_node->is_act_police) { 1011 __clear_bit(flow_node->rq, &nic->rq_bmap); 1012 1013 if (nic->flags & OTX2_FLAG_INTF_DOWN) 1014 goto free_mcam_flow; 1015 1016 mutex_lock(&nic->mbox.lock); 1017 1018 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 1019 flow_node->leaf_profile, false); 1020 if (err) 1021 netdev_err(nic->netdev, 1022 "Unmapping RQ %d & profile %d failed\n", 1023 flow_node->rq, flow_node->leaf_profile); 1024 1025 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 1026 if (err) 1027 netdev_err(nic->netdev, 1028 "Unable to free leaf bandwidth profile(%d)\n", 1029 flow_node->leaf_profile); 1030 1031 mutex_unlock(&nic->mbox.lock); 1032 } 1033 1034 free_mcam_flow: 1035 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); 1036 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); 1037 kfree_rcu(flow_node, rcu); 1038 flow_cfg->nr_flows--; 1039 return 0; 1040 } 1041 1042 static int otx2_tc_add_flow(struct otx2_nic *nic, 1043 struct flow_cls_offload *tc_flow_cmd) 1044 { 1045 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 1046 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1047 struct otx2_tc_flow *new_node, *old_node; 1048 struct npc_install_flow_req *req, dummy; 1049 int rc, err, mcam_idx; 1050 1051 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 1052 return -ENOMEM; 1053 1054 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1055 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1056 return -EINVAL; 1057 } 1058 1059 if (flow_cfg->nr_flows == flow_cfg->max_flows) { 1060 NL_SET_ERR_MSG_MOD(extack, 1061 "Free MCAM entry not available to add the flow"); 1062 return -ENOMEM; 1063 } 1064 1065 /* allocate memory for the new flow and it's node */ 1066 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 1067 if (!new_node) 1068 return -ENOMEM; 1069 spin_lock_init(&new_node->lock); 1070 new_node->cookie = tc_flow_cmd->cookie; 1071 new_node->prio = tc_flow_cmd->common.prio; 1072 1073 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 1074 1075 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 1076 if (rc) { 1077 kfree_rcu(new_node, rcu); 1078 return rc; 1079 } 1080 1081 /* If a flow exists with the same cookie, delete it */ 1082 old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1083 if (old_node) 1084 otx2_tc_del_flow(nic, tc_flow_cmd); 1085 1086 mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); 1087 mutex_lock(&nic->mbox.lock); 1088 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1089 if (!req) { 1090 mutex_unlock(&nic->mbox.lock); 1091 rc = -ENOMEM; 1092 goto free_leaf; 1093 } 1094 1095 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 1096 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 1097 req->channel = nic->hw.rx_chan_base; 1098 req->entry = flow_cfg->flow_ent[mcam_idx]; 1099 req->intf = NIX_INTF_RX; 1100 req->set_cntr = 1; 1101 new_node->entry = req->entry; 1102 1103 /* Send message to AF */ 1104 rc = otx2_sync_mbox_msg(&nic->mbox); 1105 if (rc) { 1106 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 1107 mutex_unlock(&nic->mbox.lock); 1108 goto free_leaf; 1109 } 1110 1111 mutex_unlock(&nic->mbox.lock); 1112 memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); 1113 1114 flow_cfg->nr_flows++; 1115 return 0; 1116 1117 free_leaf: 1118 otx2_tc_del_from_flow_list(flow_cfg, new_node); 1119 kfree_rcu(new_node, rcu); 1120 if (new_node->is_act_police) { 1121 mutex_lock(&nic->mbox.lock); 1122 1123 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 1124 new_node->leaf_profile, false); 1125 if (err) 1126 netdev_err(nic->netdev, 1127 "Unmapping RQ %d & profile %d failed\n", 1128 new_node->rq, new_node->leaf_profile); 1129 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 1130 if (err) 1131 netdev_err(nic->netdev, 1132 "Unable to free leaf bandwidth profile(%d)\n", 1133 new_node->leaf_profile); 1134 1135 __clear_bit(new_node->rq, &nic->rq_bmap); 1136 1137 mutex_unlock(&nic->mbox.lock); 1138 } 1139 1140 return rc; 1141 } 1142 1143 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 1144 struct flow_cls_offload *tc_flow_cmd) 1145 { 1146 struct npc_mcam_get_stats_req *req; 1147 struct npc_mcam_get_stats_rsp *rsp; 1148 struct otx2_tc_flow_stats *stats; 1149 struct otx2_tc_flow *flow_node; 1150 int err; 1151 1152 flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); 1153 if (!flow_node) { 1154 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 1155 tc_flow_cmd->cookie); 1156 return -EINVAL; 1157 } 1158 1159 mutex_lock(&nic->mbox.lock); 1160 1161 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 1162 if (!req) { 1163 mutex_unlock(&nic->mbox.lock); 1164 return -ENOMEM; 1165 } 1166 1167 req->entry = flow_node->entry; 1168 1169 err = otx2_sync_mbox_msg(&nic->mbox); 1170 if (err) { 1171 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 1172 req->entry); 1173 mutex_unlock(&nic->mbox.lock); 1174 return -EFAULT; 1175 } 1176 1177 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 1178 (&nic->mbox.mbox, 0, &req->hdr); 1179 if (IS_ERR(rsp)) { 1180 mutex_unlock(&nic->mbox.lock); 1181 return PTR_ERR(rsp); 1182 } 1183 1184 mutex_unlock(&nic->mbox.lock); 1185 1186 if (!rsp->stat_ena) 1187 return -EINVAL; 1188 1189 stats = &flow_node->stats; 1190 1191 spin_lock(&flow_node->lock); 1192 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 1193 FLOW_ACTION_HW_STATS_IMMEDIATE); 1194 stats->pkts = rsp->stat; 1195 spin_unlock(&flow_node->lock); 1196 1197 return 0; 1198 } 1199 1200 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1201 struct flow_cls_offload *cls_flower) 1202 { 1203 switch (cls_flower->command) { 1204 case FLOW_CLS_REPLACE: 1205 return otx2_tc_add_flow(nic, cls_flower); 1206 case FLOW_CLS_DESTROY: 1207 return otx2_tc_del_flow(nic, cls_flower); 1208 case FLOW_CLS_STATS: 1209 return otx2_tc_get_flow_stats(nic, cls_flower); 1210 default: 1211 return -EOPNOTSUPP; 1212 } 1213 } 1214 1215 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 1216 struct tc_cls_matchall_offload *cls) 1217 { 1218 struct netlink_ext_ack *extack = cls->common.extack; 1219 struct flow_action *actions = &cls->rule->action; 1220 struct flow_action_entry *entry; 1221 u64 rate; 1222 int err; 1223 1224 err = otx2_tc_validate_flow(nic, actions, extack); 1225 if (err) 1226 return err; 1227 1228 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 1229 NL_SET_ERR_MSG_MOD(extack, 1230 "Only one ingress MATCHALL ratelimitter can be offloaded"); 1231 return -ENOMEM; 1232 } 1233 1234 entry = &cls->rule->action.entries[0]; 1235 switch (entry->id) { 1236 case FLOW_ACTION_POLICE: 1237 /* Ingress ratelimiting is not supported on OcteonTx2 */ 1238 if (is_dev_otx2(nic->pdev)) { 1239 NL_SET_ERR_MSG_MOD(extack, 1240 "Ingress policing not supported on this platform"); 1241 return -EOPNOTSUPP; 1242 } 1243 1244 err = cn10k_alloc_matchall_ipolicer(nic); 1245 if (err) 1246 return err; 1247 1248 /* Convert to bits per second */ 1249 rate = entry->police.rate_bytes_ps * 8; 1250 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 1251 if (err) 1252 return err; 1253 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1254 break; 1255 default: 1256 NL_SET_ERR_MSG_MOD(extack, 1257 "Only police action supported with Ingress MATCHALL offload"); 1258 return -EOPNOTSUPP; 1259 } 1260 1261 return 0; 1262 } 1263 1264 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1265 struct tc_cls_matchall_offload *cls) 1266 { 1267 struct netlink_ext_ack *extack = cls->common.extack; 1268 int err; 1269 1270 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1271 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1272 return -EINVAL; 1273 } 1274 1275 err = cn10k_free_matchall_ipolicer(nic); 1276 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1277 return err; 1278 } 1279 1280 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1281 struct tc_cls_matchall_offload *cls_matchall) 1282 { 1283 switch (cls_matchall->command) { 1284 case TC_CLSMATCHALL_REPLACE: 1285 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1286 case TC_CLSMATCHALL_DESTROY: 1287 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1288 case TC_CLSMATCHALL_STATS: 1289 default: 1290 break; 1291 } 1292 1293 return -EOPNOTSUPP; 1294 } 1295 1296 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1297 void *type_data, void *cb_priv) 1298 { 1299 struct otx2_nic *nic = cb_priv; 1300 bool ntuple; 1301 1302 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1303 return -EOPNOTSUPP; 1304 1305 ntuple = nic->netdev->features & NETIF_F_NTUPLE; 1306 switch (type) { 1307 case TC_SETUP_CLSFLOWER: 1308 if (ntuple) { 1309 netdev_warn(nic->netdev, 1310 "Can't install TC flower offload rule when NTUPLE is active"); 1311 return -EOPNOTSUPP; 1312 } 1313 1314 return otx2_setup_tc_cls_flower(nic, type_data); 1315 case TC_SETUP_CLSMATCHALL: 1316 return otx2_setup_tc_ingress_matchall(nic, type_data); 1317 default: 1318 break; 1319 } 1320 1321 return -EOPNOTSUPP; 1322 } 1323 1324 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1325 struct tc_cls_matchall_offload *cls_matchall) 1326 { 1327 switch (cls_matchall->command) { 1328 case TC_CLSMATCHALL_REPLACE: 1329 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1330 case TC_CLSMATCHALL_DESTROY: 1331 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1332 case TC_CLSMATCHALL_STATS: 1333 default: 1334 break; 1335 } 1336 1337 return -EOPNOTSUPP; 1338 } 1339 1340 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1341 void *type_data, void *cb_priv) 1342 { 1343 struct otx2_nic *nic = cb_priv; 1344 1345 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1346 return -EOPNOTSUPP; 1347 1348 switch (type) { 1349 case TC_SETUP_CLSMATCHALL: 1350 return otx2_setup_tc_egress_matchall(nic, type_data); 1351 default: 1352 break; 1353 } 1354 1355 return -EOPNOTSUPP; 1356 } 1357 1358 static LIST_HEAD(otx2_block_cb_list); 1359 1360 static int otx2_setup_tc_block(struct net_device *netdev, 1361 struct flow_block_offload *f) 1362 { 1363 struct otx2_nic *nic = netdev_priv(netdev); 1364 flow_setup_cb_t *cb; 1365 bool ingress; 1366 1367 if (f->block_shared) 1368 return -EOPNOTSUPP; 1369 1370 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1371 cb = otx2_setup_tc_block_ingress_cb; 1372 ingress = true; 1373 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1374 cb = otx2_setup_tc_block_egress_cb; 1375 ingress = false; 1376 } else { 1377 return -EOPNOTSUPP; 1378 } 1379 1380 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1381 nic, nic, ingress); 1382 } 1383 1384 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1385 void *type_data) 1386 { 1387 switch (type) { 1388 case TC_SETUP_BLOCK: 1389 return otx2_setup_tc_block(netdev, type_data); 1390 case TC_SETUP_QDISC_HTB: 1391 return otx2_setup_tc_htb(netdev, type_data); 1392 default: 1393 return -EOPNOTSUPP; 1394 } 1395 } 1396 EXPORT_SYMBOL(otx2_setup_tc); 1397 1398 int otx2_init_tc(struct otx2_nic *nic) 1399 { 1400 /* Exclude receive queue 0 being used for police action */ 1401 set_bit(0, &nic->rq_bmap); 1402 1403 if (!nic->flow_cfg) { 1404 netdev_err(nic->netdev, 1405 "Can't init TC, nic->flow_cfg is not setup\n"); 1406 return -EINVAL; 1407 } 1408 1409 return 0; 1410 } 1411 EXPORT_SYMBOL(otx2_init_tc); 1412 1413 void otx2_shutdown_tc(struct otx2_nic *nic) 1414 { 1415 otx2_destroy_tc_flow_list(nic); 1416 } 1417 EXPORT_SYMBOL(otx2_shutdown_tc); 1418 1419 static void otx2_tc_config_ingress_rule(struct otx2_nic *nic, 1420 struct otx2_tc_flow *node) 1421 { 1422 struct npc_install_flow_req *req; 1423 1424 if (otx2_tc_act_set_hw_police(nic, node)) 1425 return; 1426 1427 mutex_lock(&nic->mbox.lock); 1428 1429 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1430 if (!req) 1431 goto err; 1432 1433 memcpy(req, &node->req, sizeof(struct npc_install_flow_req)); 1434 1435 if (otx2_sync_mbox_msg(&nic->mbox)) 1436 netdev_err(nic->netdev, 1437 "Failed to install MCAM flow entry for ingress rule"); 1438 err: 1439 mutex_unlock(&nic->mbox.lock); 1440 } 1441 1442 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic) 1443 { 1444 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1445 struct otx2_tc_flow *node; 1446 1447 /* If any ingress policer rules exist for the interface then 1448 * apply those rules. Ingress policer rules depend on bandwidth 1449 * profiles linked to the receive queues. Since no receive queues 1450 * exist when interface is down, ingress policer rules are stored 1451 * and configured in hardware after all receive queues are allocated 1452 * in otx2_open. 1453 */ 1454 list_for_each_entry(node, &flow_cfg->flow_list_tc, list) { 1455 if (node->is_act_police) 1456 otx2_tc_config_ingress_rule(nic, node); 1457 } 1458 } 1459 EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules); 1460