1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 struct otx2_tc_flow_stats { 31 u64 bytes; 32 u64 pkts; 33 u64 used; 34 }; 35 36 struct otx2_tc_flow { 37 struct list_head list; 38 unsigned long cookie; 39 struct rcu_head rcu; 40 struct otx2_tc_flow_stats stats; 41 spinlock_t lock; /* lock for stats */ 42 u16 rq; 43 u16 entry; 44 u16 leaf_profile; 45 bool is_act_police; 46 u32 prio; 47 struct npc_install_flow_req req; 48 u64 rate; 49 u32 burst; 50 bool is_pps; 51 }; 52 53 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 54 u32 *burst_exp, u32 *burst_mantissa) 55 { 56 int max_burst, max_mantissa; 57 unsigned int tmp; 58 59 if (is_dev_otx2(nic->pdev)) { 60 max_burst = MAX_BURST_SIZE; 61 max_mantissa = MAX_BURST_MANTISSA; 62 } else { 63 max_burst = CN10K_MAX_BURST_SIZE; 64 max_mantissa = CN10K_MAX_BURST_MANTISSA; 65 } 66 67 /* Burst is calculated as 68 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 69 * Max supported burst size is 130,816 bytes. 70 */ 71 burst = min_t(u32, burst, max_burst); 72 if (burst) { 73 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 74 tmp = burst - rounddown_pow_of_two(burst); 75 if (burst < max_mantissa) 76 *burst_mantissa = tmp * 2; 77 else 78 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 79 } else { 80 *burst_exp = MAX_BURST_EXPONENT; 81 *burst_mantissa = max_mantissa; 82 } 83 } 84 85 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 86 u32 *mantissa, u32 *div_exp) 87 { 88 u64 tmp; 89 90 /* Rate calculation by hardware 91 * 92 * PIR_ADD = ((256 + mantissa) << exp) / 256 93 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 94 * The resultant rate is in Mbps. 95 */ 96 97 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 98 * Setting this to '0' will ease the calculation of 99 * exponent and mantissa. 100 */ 101 *div_exp = 0; 102 103 if (maxrate) { 104 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 105 tmp = maxrate - rounddown_pow_of_two(maxrate); 106 if (maxrate < MAX_RATE_MANTISSA) 107 *mantissa = tmp * 2; 108 else 109 *mantissa = tmp / (1ULL << (*exp - 7)); 110 } else { 111 /* Instead of disabling rate limiting, set all values to max */ 112 *exp = MAX_RATE_EXPONENT; 113 *mantissa = MAX_RATE_MANTISSA; 114 } 115 } 116 117 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 118 u64 maxrate, u32 burst) 119 { 120 u32 burst_exp, burst_mantissa; 121 u32 exp, mantissa, div_exp; 122 u64 regval = 0; 123 124 /* Get exponent and mantissa values from the desired rate */ 125 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 126 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 127 128 if (is_dev_otx2(nic->pdev)) { 129 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 130 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 131 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 132 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 133 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 134 } else { 135 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 136 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 137 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 138 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 139 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 140 } 141 142 return regval; 143 } 144 145 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 146 u32 burst, u64 maxrate) 147 { 148 struct otx2_hw *hw = &nic->hw; 149 struct nix_txschq_config *req; 150 int txschq, err; 151 152 /* All SQs share the same TL4, so pick the first scheduler */ 153 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 154 155 mutex_lock(&nic->mbox.lock); 156 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 157 if (!req) { 158 mutex_unlock(&nic->mbox.lock); 159 return -ENOMEM; 160 } 161 162 req->lvl = NIX_TXSCH_LVL_TL4; 163 req->num_regs = 1; 164 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 165 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 166 167 err = otx2_sync_mbox_msg(&nic->mbox); 168 mutex_unlock(&nic->mbox.lock); 169 return err; 170 } 171 172 static int otx2_tc_validate_flow(struct otx2_nic *nic, 173 struct flow_action *actions, 174 struct netlink_ext_ack *extack) 175 { 176 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 177 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 178 return -EINVAL; 179 } 180 181 if (!flow_action_has_entries(actions)) { 182 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 183 return -EINVAL; 184 } 185 186 if (!flow_offload_has_one_action(actions)) { 187 NL_SET_ERR_MSG_MOD(extack, 188 "Egress MATCHALL offload supports only 1 policing action"); 189 return -EINVAL; 190 } 191 return 0; 192 } 193 194 static int otx2_policer_validate(const struct flow_action *action, 195 const struct flow_action_entry *act, 196 struct netlink_ext_ack *extack) 197 { 198 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 199 NL_SET_ERR_MSG_MOD(extack, 200 "Offload not supported when exceed action is not drop"); 201 return -EOPNOTSUPP; 202 } 203 204 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 205 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 206 NL_SET_ERR_MSG_MOD(extack, 207 "Offload not supported when conform action is not pipe or ok"); 208 return -EOPNOTSUPP; 209 } 210 211 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 212 !flow_action_is_last_entry(action, act)) { 213 NL_SET_ERR_MSG_MOD(extack, 214 "Offload not supported when conform action is ok, but action is not last"); 215 return -EOPNOTSUPP; 216 } 217 218 if (act->police.peakrate_bytes_ps || 219 act->police.avrate || act->police.overhead) { 220 NL_SET_ERR_MSG_MOD(extack, 221 "Offload not supported when peakrate/avrate/overhead is configured"); 222 return -EOPNOTSUPP; 223 } 224 225 return 0; 226 } 227 228 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 229 struct tc_cls_matchall_offload *cls) 230 { 231 struct netlink_ext_ack *extack = cls->common.extack; 232 struct flow_action *actions = &cls->rule->action; 233 struct flow_action_entry *entry; 234 int err; 235 236 err = otx2_tc_validate_flow(nic, actions, extack); 237 if (err) 238 return err; 239 240 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 241 NL_SET_ERR_MSG_MOD(extack, 242 "Only one Egress MATCHALL ratelimiter can be offloaded"); 243 return -ENOMEM; 244 } 245 246 entry = &cls->rule->action.entries[0]; 247 switch (entry->id) { 248 case FLOW_ACTION_POLICE: 249 err = otx2_policer_validate(&cls->rule->action, entry, extack); 250 if (err) 251 return err; 252 253 if (entry->police.rate_pkt_ps) { 254 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 255 return -EOPNOTSUPP; 256 } 257 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 258 otx2_convert_rate(entry->police.rate_bytes_ps)); 259 if (err) 260 return err; 261 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 262 break; 263 default: 264 NL_SET_ERR_MSG_MOD(extack, 265 "Only police action is supported with Egress MATCHALL offload"); 266 return -EOPNOTSUPP; 267 } 268 269 return 0; 270 } 271 272 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 273 struct tc_cls_matchall_offload *cls) 274 { 275 struct netlink_ext_ack *extack = cls->common.extack; 276 int err; 277 278 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 279 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 280 return -EINVAL; 281 } 282 283 err = otx2_set_matchall_egress_rate(nic, 0, 0); 284 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 285 return err; 286 } 287 288 static int otx2_tc_act_set_hw_police(struct otx2_nic *nic, 289 struct otx2_tc_flow *node) 290 { 291 int rc; 292 293 mutex_lock(&nic->mbox.lock); 294 295 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 296 if (rc) { 297 mutex_unlock(&nic->mbox.lock); 298 return rc; 299 } 300 301 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, 302 node->burst, node->rate, node->is_pps); 303 if (rc) 304 goto free_leaf; 305 306 rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true); 307 if (rc) 308 goto free_leaf; 309 310 mutex_unlock(&nic->mbox.lock); 311 312 return 0; 313 314 free_leaf: 315 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 316 netdev_err(nic->netdev, 317 "Unable to free leaf bandwidth profile(%d)\n", 318 node->leaf_profile); 319 mutex_unlock(&nic->mbox.lock); 320 return rc; 321 } 322 323 static int otx2_tc_act_set_police(struct otx2_nic *nic, 324 struct otx2_tc_flow *node, 325 struct flow_cls_offload *f, 326 u64 rate, u32 burst, u32 mark, 327 struct npc_install_flow_req *req, bool pps) 328 { 329 struct netlink_ext_ack *extack = f->common.extack; 330 struct otx2_hw *hw = &nic->hw; 331 int rq_idx, rc; 332 333 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 334 if (rq_idx >= hw->rx_queues) { 335 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 336 return -EINVAL; 337 } 338 339 req->match_id = mark & 0xFFFFULL; 340 req->index = rq_idx; 341 req->op = NIX_RX_ACTIONOP_UCAST; 342 343 node->is_act_police = true; 344 node->rq = rq_idx; 345 node->burst = burst; 346 node->rate = rate; 347 node->is_pps = pps; 348 349 rc = otx2_tc_act_set_hw_police(nic, node); 350 if (!rc) 351 set_bit(rq_idx, &nic->rq_bmap); 352 353 return rc; 354 } 355 356 static int otx2_tc_parse_actions(struct otx2_nic *nic, 357 struct flow_action *flow_action, 358 struct npc_install_flow_req *req, 359 struct flow_cls_offload *f, 360 struct otx2_tc_flow *node) 361 { 362 struct netlink_ext_ack *extack = f->common.extack; 363 struct flow_action_entry *act; 364 struct net_device *target; 365 struct otx2_nic *priv; 366 u32 burst, mark = 0; 367 u8 nr_police = 0; 368 bool pps = false; 369 u64 rate; 370 int err; 371 int i; 372 373 if (!flow_action_has_entries(flow_action)) { 374 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 375 return -EINVAL; 376 } 377 378 flow_action_for_each(i, act, flow_action) { 379 switch (act->id) { 380 case FLOW_ACTION_DROP: 381 req->op = NIX_RX_ACTIONOP_DROP; 382 return 0; 383 case FLOW_ACTION_ACCEPT: 384 req->op = NIX_RX_ACTION_DEFAULT; 385 return 0; 386 case FLOW_ACTION_REDIRECT_INGRESS: 387 target = act->dev; 388 priv = netdev_priv(target); 389 /* npc_install_flow_req doesn't support passing a target pcifunc */ 390 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 391 NL_SET_ERR_MSG_MOD(extack, 392 "can't redirect to other pf/vf"); 393 return -EOPNOTSUPP; 394 } 395 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 396 397 /* if op is already set; avoid overwriting the same */ 398 if (!req->op) 399 req->op = NIX_RX_ACTION_DEFAULT; 400 break; 401 402 case FLOW_ACTION_VLAN_POP: 403 req->vtag0_valid = true; 404 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 405 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 406 break; 407 case FLOW_ACTION_POLICE: 408 /* Ingress ratelimiting is not supported on OcteonTx2 */ 409 if (is_dev_otx2(nic->pdev)) { 410 NL_SET_ERR_MSG_MOD(extack, 411 "Ingress policing not supported on this platform"); 412 return -EOPNOTSUPP; 413 } 414 415 err = otx2_policer_validate(flow_action, act, extack); 416 if (err) 417 return err; 418 419 if (act->police.rate_bytes_ps > 0) { 420 rate = act->police.rate_bytes_ps * 8; 421 burst = act->police.burst; 422 } else if (act->police.rate_pkt_ps > 0) { 423 /* The algorithm used to calculate rate 424 * mantissa, exponent values for a given token 425 * rate (token can be byte or packet) requires 426 * token rate to be mutiplied by 8. 427 */ 428 rate = act->police.rate_pkt_ps * 8; 429 burst = act->police.burst_pkt; 430 pps = true; 431 } 432 nr_police++; 433 break; 434 case FLOW_ACTION_MARK: 435 mark = act->mark; 436 break; 437 438 case FLOW_ACTION_RX_QUEUE_MAPPING: 439 req->op = NIX_RX_ACTIONOP_UCAST; 440 req->index = act->rx_queue; 441 break; 442 443 default: 444 return -EOPNOTSUPP; 445 } 446 } 447 448 if (nr_police > 1) { 449 NL_SET_ERR_MSG_MOD(extack, 450 "rate limit police offload requires a single action"); 451 return -EOPNOTSUPP; 452 } 453 454 if (nr_police) 455 return otx2_tc_act_set_police(nic, node, f, rate, burst, 456 mark, req, pps); 457 458 return 0; 459 } 460 461 static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec, 462 struct flow_msg *flow_mask, struct flow_rule *rule, 463 struct npc_install_flow_req *req, bool is_inner) 464 { 465 struct flow_match_vlan match; 466 u16 vlan_tci, vlan_tci_mask; 467 468 if (is_inner) 469 flow_rule_match_cvlan(rule, &match); 470 else 471 flow_rule_match_vlan(rule, &match); 472 473 if (!eth_type_vlan(match.key->vlan_tpid)) { 474 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 475 ntohs(match.key->vlan_tpid)); 476 return -EOPNOTSUPP; 477 } 478 479 if (!match.mask->vlan_id) { 480 struct flow_action_entry *act; 481 int i; 482 483 flow_action_for_each(i, act, &rule->action) { 484 if (act->id == FLOW_ACTION_DROP) { 485 netdev_err(nic->netdev, 486 "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", 487 ntohs(match.key->vlan_tpid), match.key->vlan_id); 488 return -EOPNOTSUPP; 489 } 490 } 491 } 492 493 if (match.mask->vlan_id || 494 match.mask->vlan_dei || 495 match.mask->vlan_priority) { 496 vlan_tci = match.key->vlan_id | 497 match.key->vlan_dei << 12 | 498 match.key->vlan_priority << 13; 499 500 vlan_tci_mask = match.mask->vlan_id | 501 match.mask->vlan_dei << 12 | 502 match.mask->vlan_priority << 13; 503 if (is_inner) { 504 flow_spec->vlan_itci = htons(vlan_tci); 505 flow_mask->vlan_itci = htons(vlan_tci_mask); 506 req->features |= BIT_ULL(NPC_INNER_VID); 507 } else { 508 flow_spec->vlan_tci = htons(vlan_tci); 509 flow_mask->vlan_tci = htons(vlan_tci_mask); 510 req->features |= BIT_ULL(NPC_OUTER_VID); 511 } 512 } 513 514 return 0; 515 } 516 517 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 518 struct flow_cls_offload *f, 519 struct npc_install_flow_req *req) 520 { 521 struct netlink_ext_ack *extack = f->common.extack; 522 struct flow_msg *flow_spec = &req->packet; 523 struct flow_msg *flow_mask = &req->mask; 524 struct flow_dissector *dissector; 525 struct flow_rule *rule; 526 u8 ip_proto = 0; 527 528 rule = flow_cls_offload_flow_rule(f); 529 dissector = rule->match.dissector; 530 531 if ((dissector->used_keys & 532 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 533 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 534 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 535 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 536 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 537 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 538 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 539 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 540 BIT(FLOW_DISSECTOR_KEY_IPSEC) | 541 BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { 542 netdev_info(nic->netdev, "unsupported flow used key 0x%llx", 543 dissector->used_keys); 544 return -EOPNOTSUPP; 545 } 546 547 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 548 struct flow_match_basic match; 549 550 flow_rule_match_basic(rule, &match); 551 552 /* All EtherTypes can be matched, no hw limitation */ 553 flow_spec->etype = match.key->n_proto; 554 flow_mask->etype = match.mask->n_proto; 555 req->features |= BIT_ULL(NPC_ETYPE); 556 557 if (match.mask->ip_proto && 558 (match.key->ip_proto != IPPROTO_TCP && 559 match.key->ip_proto != IPPROTO_UDP && 560 match.key->ip_proto != IPPROTO_SCTP && 561 match.key->ip_proto != IPPROTO_ICMP && 562 match.key->ip_proto != IPPROTO_ESP && 563 match.key->ip_proto != IPPROTO_AH && 564 match.key->ip_proto != IPPROTO_ICMPV6)) { 565 netdev_info(nic->netdev, 566 "ip_proto=0x%x not supported\n", 567 match.key->ip_proto); 568 return -EOPNOTSUPP; 569 } 570 if (match.mask->ip_proto) 571 ip_proto = match.key->ip_proto; 572 573 if (ip_proto == IPPROTO_UDP) 574 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 575 else if (ip_proto == IPPROTO_TCP) 576 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 577 else if (ip_proto == IPPROTO_SCTP) 578 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 579 else if (ip_proto == IPPROTO_ICMP) 580 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 581 else if (ip_proto == IPPROTO_ICMPV6) 582 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 583 else if (ip_proto == IPPROTO_ESP) 584 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 585 else if (ip_proto == IPPROTO_AH) 586 req->features |= BIT_ULL(NPC_IPPROTO_AH); 587 } 588 589 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 590 struct flow_match_control match; 591 592 flow_rule_match_control(rule, &match); 593 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 594 NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); 595 return -EOPNOTSUPP; 596 } 597 598 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 599 if (ntohs(flow_spec->etype) == ETH_P_IP) { 600 flow_spec->ip_flag = IPV4_FLAG_MORE; 601 flow_mask->ip_flag = IPV4_FLAG_MORE; 602 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 603 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 604 flow_spec->next_header = IPPROTO_FRAGMENT; 605 flow_mask->next_header = 0xff; 606 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 607 } else { 608 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 609 return -EOPNOTSUPP; 610 } 611 } 612 } 613 614 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 615 struct flow_match_eth_addrs match; 616 617 flow_rule_match_eth_addrs(rule, &match); 618 if (!is_zero_ether_addr(match.mask->src)) { 619 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 620 return -EOPNOTSUPP; 621 } 622 623 if (!is_zero_ether_addr(match.mask->dst)) { 624 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 625 ether_addr_copy(flow_mask->dmac, 626 (u8 *)&match.mask->dst); 627 req->features |= BIT_ULL(NPC_DMAC); 628 } 629 } 630 631 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { 632 struct flow_match_ipsec match; 633 634 flow_rule_match_ipsec(rule, &match); 635 if (!match.mask->spi) { 636 NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); 637 return -EOPNOTSUPP; 638 } 639 if (ip_proto != IPPROTO_ESP && 640 ip_proto != IPPROTO_AH) { 641 NL_SET_ERR_MSG_MOD(extack, 642 "SPI index is valid only for ESP/AH proto"); 643 return -EOPNOTSUPP; 644 } 645 646 flow_spec->spi = match.key->spi; 647 flow_mask->spi = match.mask->spi; 648 req->features |= BIT_ULL(NPC_IPSEC_SPI); 649 } 650 651 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 652 struct flow_match_ip match; 653 654 flow_rule_match_ip(rule, &match); 655 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 656 match.mask->tos) { 657 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 658 return -EOPNOTSUPP; 659 } 660 if (match.mask->ttl) { 661 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 662 return -EOPNOTSUPP; 663 } 664 flow_spec->tos = match.key->tos; 665 flow_mask->tos = match.mask->tos; 666 req->features |= BIT_ULL(NPC_TOS); 667 } 668 669 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 670 int ret; 671 672 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false); 673 if (ret) 674 return ret; 675 } 676 677 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 678 int ret; 679 680 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true); 681 if (ret) 682 return ret; 683 } 684 685 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 686 struct flow_match_ipv4_addrs match; 687 688 flow_rule_match_ipv4_addrs(rule, &match); 689 690 flow_spec->ip4dst = match.key->dst; 691 flow_mask->ip4dst = match.mask->dst; 692 req->features |= BIT_ULL(NPC_DIP_IPV4); 693 694 flow_spec->ip4src = match.key->src; 695 flow_mask->ip4src = match.mask->src; 696 req->features |= BIT_ULL(NPC_SIP_IPV4); 697 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 698 struct flow_match_ipv6_addrs match; 699 700 flow_rule_match_ipv6_addrs(rule, &match); 701 702 if (ipv6_addr_loopback(&match.key->dst) || 703 ipv6_addr_loopback(&match.key->src)) { 704 NL_SET_ERR_MSG_MOD(extack, 705 "Flow matching IPv6 loopback addr not supported"); 706 return -EOPNOTSUPP; 707 } 708 709 if (!ipv6_addr_any(&match.mask->dst)) { 710 memcpy(&flow_spec->ip6dst, 711 (struct in6_addr *)&match.key->dst, 712 sizeof(flow_spec->ip6dst)); 713 memcpy(&flow_mask->ip6dst, 714 (struct in6_addr *)&match.mask->dst, 715 sizeof(flow_spec->ip6dst)); 716 req->features |= BIT_ULL(NPC_DIP_IPV6); 717 } 718 719 if (!ipv6_addr_any(&match.mask->src)) { 720 memcpy(&flow_spec->ip6src, 721 (struct in6_addr *)&match.key->src, 722 sizeof(flow_spec->ip6src)); 723 memcpy(&flow_mask->ip6src, 724 (struct in6_addr *)&match.mask->src, 725 sizeof(flow_spec->ip6src)); 726 req->features |= BIT_ULL(NPC_SIP_IPV6); 727 } 728 } 729 730 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 731 struct flow_match_ports match; 732 733 flow_rule_match_ports(rule, &match); 734 735 flow_spec->dport = match.key->dst; 736 flow_mask->dport = match.mask->dst; 737 738 if (flow_mask->dport) { 739 if (ip_proto == IPPROTO_UDP) 740 req->features |= BIT_ULL(NPC_DPORT_UDP); 741 else if (ip_proto == IPPROTO_TCP) 742 req->features |= BIT_ULL(NPC_DPORT_TCP); 743 else if (ip_proto == IPPROTO_SCTP) 744 req->features |= BIT_ULL(NPC_DPORT_SCTP); 745 } 746 747 flow_spec->sport = match.key->src; 748 flow_mask->sport = match.mask->src; 749 750 if (flow_mask->sport) { 751 if (ip_proto == IPPROTO_UDP) 752 req->features |= BIT_ULL(NPC_SPORT_UDP); 753 else if (ip_proto == IPPROTO_TCP) 754 req->features |= BIT_ULL(NPC_SPORT_TCP); 755 else if (ip_proto == IPPROTO_SCTP) 756 req->features |= BIT_ULL(NPC_SPORT_SCTP); 757 } 758 } 759 760 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 761 } 762 763 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) 764 { 765 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 766 struct otx2_tc_flow *iter, *tmp; 767 768 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) 769 return; 770 771 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { 772 list_del(&iter->list); 773 kfree(iter); 774 flow_cfg->nr_flows--; 775 } 776 } 777 778 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, 779 unsigned long cookie) 780 { 781 struct otx2_tc_flow *tmp; 782 783 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 784 if (tmp->cookie == cookie) 785 return tmp; 786 } 787 788 return NULL; 789 } 790 791 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, 792 int index) 793 { 794 struct otx2_tc_flow *tmp; 795 int i = 0; 796 797 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 798 if (i == index) 799 return tmp; 800 i++; 801 } 802 803 return NULL; 804 } 805 806 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, 807 struct otx2_tc_flow *node) 808 { 809 struct list_head *pos, *n; 810 struct otx2_tc_flow *tmp; 811 812 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 813 tmp = list_entry(pos, struct otx2_tc_flow, list); 814 if (node == tmp) { 815 list_del(&node->list); 816 return; 817 } 818 } 819 } 820 821 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, 822 struct otx2_tc_flow *node) 823 { 824 struct list_head *pos, *n; 825 struct otx2_tc_flow *tmp; 826 int index = 0; 827 828 /* If the flow list is empty then add the new node */ 829 if (list_empty(&flow_cfg->flow_list_tc)) { 830 list_add(&node->list, &flow_cfg->flow_list_tc); 831 return index; 832 } 833 834 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 835 tmp = list_entry(pos, struct otx2_tc_flow, list); 836 if (node->prio < tmp->prio) 837 break; 838 index++; 839 } 840 841 list_add(&node->list, pos->prev); 842 return index; 843 } 844 845 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) 846 { 847 struct npc_install_flow_req *tmp_req; 848 int err; 849 850 mutex_lock(&nic->mbox.lock); 851 tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 852 if (!tmp_req) { 853 mutex_unlock(&nic->mbox.lock); 854 return -ENOMEM; 855 } 856 857 memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); 858 /* Send message to AF */ 859 err = otx2_sync_mbox_msg(&nic->mbox); 860 if (err) { 861 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", 862 req->entry); 863 mutex_unlock(&nic->mbox.lock); 864 return -EFAULT; 865 } 866 867 mutex_unlock(&nic->mbox.lock); 868 return 0; 869 } 870 871 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) 872 { 873 struct npc_delete_flow_rsp *rsp; 874 struct npc_delete_flow_req *req; 875 int err; 876 877 mutex_lock(&nic->mbox.lock); 878 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 879 if (!req) { 880 mutex_unlock(&nic->mbox.lock); 881 return -ENOMEM; 882 } 883 884 req->entry = entry; 885 886 /* Send message to AF */ 887 err = otx2_sync_mbox_msg(&nic->mbox); 888 if (err) { 889 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 890 entry); 891 mutex_unlock(&nic->mbox.lock); 892 return -EFAULT; 893 } 894 895 if (cntr_val) { 896 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 897 0, &req->hdr); 898 if (IS_ERR(rsp)) { 899 netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", 900 entry); 901 mutex_unlock(&nic->mbox.lock); 902 return -EFAULT; 903 } 904 905 *cntr_val = rsp->cntr_val; 906 } 907 908 mutex_unlock(&nic->mbox.lock); 909 return 0; 910 } 911 912 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, 913 struct otx2_flow_config *flow_cfg, 914 struct otx2_tc_flow *node) 915 { 916 struct list_head *pos, *n; 917 struct otx2_tc_flow *tmp; 918 int i = 0, index = 0; 919 u16 cntr_val = 0; 920 921 /* Find and delete the entry from the list and re-install 922 * all the entries from beginning to the index of the 923 * deleted entry to higher mcam indexes. 924 */ 925 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 926 tmp = list_entry(pos, struct otx2_tc_flow, list); 927 if (node == tmp) { 928 list_del(&tmp->list); 929 break; 930 } 931 932 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 933 tmp->entry++; 934 tmp->req.entry = tmp->entry; 935 tmp->req.cntr_val = cntr_val; 936 index++; 937 } 938 939 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 940 if (i == index) 941 break; 942 943 tmp = list_entry(pos, struct otx2_tc_flow, list); 944 otx2_add_mcam_flow_entry(nic, &tmp->req); 945 i++; 946 } 947 948 return 0; 949 } 950 951 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, 952 struct otx2_flow_config *flow_cfg, 953 struct otx2_tc_flow *node) 954 { 955 int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; 956 struct otx2_tc_flow *tmp; 957 int list_idx, i; 958 u16 cntr_val = 0; 959 960 /* Find the index of the entry(list_idx) whose priority 961 * is greater than the new entry and re-install all 962 * the entries from beginning to list_idx to higher 963 * mcam indexes. 964 */ 965 list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); 966 for (i = 0; i < list_idx; i++) { 967 tmp = otx2_tc_get_entry_by_index(flow_cfg, i); 968 if (!tmp) 969 return -ENOMEM; 970 971 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 972 tmp->entry = flow_cfg->flow_ent[mcam_idx]; 973 tmp->req.entry = tmp->entry; 974 tmp->req.cntr_val = cntr_val; 975 otx2_add_mcam_flow_entry(nic, &tmp->req); 976 mcam_idx++; 977 } 978 979 return mcam_idx; 980 } 981 982 static int otx2_tc_update_mcam_table(struct otx2_nic *nic, 983 struct otx2_flow_config *flow_cfg, 984 struct otx2_tc_flow *node, 985 bool add_req) 986 { 987 if (add_req) 988 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); 989 990 return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); 991 } 992 993 static int otx2_tc_del_flow(struct otx2_nic *nic, 994 struct flow_cls_offload *tc_flow_cmd) 995 { 996 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 997 struct otx2_tc_flow *flow_node; 998 int err; 999 1000 flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1001 if (!flow_node) { 1002 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 1003 tc_flow_cmd->cookie); 1004 return -EINVAL; 1005 } 1006 1007 if (flow_node->is_act_police) { 1008 __clear_bit(flow_node->rq, &nic->rq_bmap); 1009 1010 if (nic->flags & OTX2_FLAG_INTF_DOWN) 1011 goto free_mcam_flow; 1012 1013 mutex_lock(&nic->mbox.lock); 1014 1015 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 1016 flow_node->leaf_profile, false); 1017 if (err) 1018 netdev_err(nic->netdev, 1019 "Unmapping RQ %d & profile %d failed\n", 1020 flow_node->rq, flow_node->leaf_profile); 1021 1022 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 1023 if (err) 1024 netdev_err(nic->netdev, 1025 "Unable to free leaf bandwidth profile(%d)\n", 1026 flow_node->leaf_profile); 1027 1028 mutex_unlock(&nic->mbox.lock); 1029 } 1030 1031 free_mcam_flow: 1032 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); 1033 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); 1034 kfree_rcu(flow_node, rcu); 1035 flow_cfg->nr_flows--; 1036 return 0; 1037 } 1038 1039 static int otx2_tc_add_flow(struct otx2_nic *nic, 1040 struct flow_cls_offload *tc_flow_cmd) 1041 { 1042 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 1043 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1044 struct otx2_tc_flow *new_node, *old_node; 1045 struct npc_install_flow_req *req, dummy; 1046 int rc, err, mcam_idx; 1047 1048 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 1049 return -ENOMEM; 1050 1051 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1052 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1053 return -EINVAL; 1054 } 1055 1056 if (flow_cfg->nr_flows == flow_cfg->max_flows) { 1057 NL_SET_ERR_MSG_MOD(extack, 1058 "Free MCAM entry not available to add the flow"); 1059 return -ENOMEM; 1060 } 1061 1062 /* allocate memory for the new flow and it's node */ 1063 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 1064 if (!new_node) 1065 return -ENOMEM; 1066 spin_lock_init(&new_node->lock); 1067 new_node->cookie = tc_flow_cmd->cookie; 1068 new_node->prio = tc_flow_cmd->common.prio; 1069 1070 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 1071 1072 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 1073 if (rc) { 1074 kfree_rcu(new_node, rcu); 1075 return rc; 1076 } 1077 1078 /* If a flow exists with the same cookie, delete it */ 1079 old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1080 if (old_node) 1081 otx2_tc_del_flow(nic, tc_flow_cmd); 1082 1083 mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); 1084 mutex_lock(&nic->mbox.lock); 1085 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1086 if (!req) { 1087 mutex_unlock(&nic->mbox.lock); 1088 rc = -ENOMEM; 1089 goto free_leaf; 1090 } 1091 1092 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 1093 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 1094 req->channel = nic->hw.rx_chan_base; 1095 req->entry = flow_cfg->flow_ent[mcam_idx]; 1096 req->intf = NIX_INTF_RX; 1097 req->set_cntr = 1; 1098 new_node->entry = req->entry; 1099 1100 /* Send message to AF */ 1101 rc = otx2_sync_mbox_msg(&nic->mbox); 1102 if (rc) { 1103 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 1104 mutex_unlock(&nic->mbox.lock); 1105 goto free_leaf; 1106 } 1107 1108 mutex_unlock(&nic->mbox.lock); 1109 memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); 1110 1111 flow_cfg->nr_flows++; 1112 return 0; 1113 1114 free_leaf: 1115 otx2_tc_del_from_flow_list(flow_cfg, new_node); 1116 kfree_rcu(new_node, rcu); 1117 if (new_node->is_act_police) { 1118 mutex_lock(&nic->mbox.lock); 1119 1120 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 1121 new_node->leaf_profile, false); 1122 if (err) 1123 netdev_err(nic->netdev, 1124 "Unmapping RQ %d & profile %d failed\n", 1125 new_node->rq, new_node->leaf_profile); 1126 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 1127 if (err) 1128 netdev_err(nic->netdev, 1129 "Unable to free leaf bandwidth profile(%d)\n", 1130 new_node->leaf_profile); 1131 1132 __clear_bit(new_node->rq, &nic->rq_bmap); 1133 1134 mutex_unlock(&nic->mbox.lock); 1135 } 1136 1137 return rc; 1138 } 1139 1140 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 1141 struct flow_cls_offload *tc_flow_cmd) 1142 { 1143 struct npc_mcam_get_stats_req *req; 1144 struct npc_mcam_get_stats_rsp *rsp; 1145 struct otx2_tc_flow_stats *stats; 1146 struct otx2_tc_flow *flow_node; 1147 int err; 1148 1149 flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); 1150 if (!flow_node) { 1151 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 1152 tc_flow_cmd->cookie); 1153 return -EINVAL; 1154 } 1155 1156 mutex_lock(&nic->mbox.lock); 1157 1158 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 1159 if (!req) { 1160 mutex_unlock(&nic->mbox.lock); 1161 return -ENOMEM; 1162 } 1163 1164 req->entry = flow_node->entry; 1165 1166 err = otx2_sync_mbox_msg(&nic->mbox); 1167 if (err) { 1168 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 1169 req->entry); 1170 mutex_unlock(&nic->mbox.lock); 1171 return -EFAULT; 1172 } 1173 1174 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 1175 (&nic->mbox.mbox, 0, &req->hdr); 1176 if (IS_ERR(rsp)) { 1177 mutex_unlock(&nic->mbox.lock); 1178 return PTR_ERR(rsp); 1179 } 1180 1181 mutex_unlock(&nic->mbox.lock); 1182 1183 if (!rsp->stat_ena) 1184 return -EINVAL; 1185 1186 stats = &flow_node->stats; 1187 1188 spin_lock(&flow_node->lock); 1189 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 1190 FLOW_ACTION_HW_STATS_IMMEDIATE); 1191 stats->pkts = rsp->stat; 1192 spin_unlock(&flow_node->lock); 1193 1194 return 0; 1195 } 1196 1197 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1198 struct flow_cls_offload *cls_flower) 1199 { 1200 switch (cls_flower->command) { 1201 case FLOW_CLS_REPLACE: 1202 return otx2_tc_add_flow(nic, cls_flower); 1203 case FLOW_CLS_DESTROY: 1204 return otx2_tc_del_flow(nic, cls_flower); 1205 case FLOW_CLS_STATS: 1206 return otx2_tc_get_flow_stats(nic, cls_flower); 1207 default: 1208 return -EOPNOTSUPP; 1209 } 1210 } 1211 1212 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 1213 struct tc_cls_matchall_offload *cls) 1214 { 1215 struct netlink_ext_ack *extack = cls->common.extack; 1216 struct flow_action *actions = &cls->rule->action; 1217 struct flow_action_entry *entry; 1218 u64 rate; 1219 int err; 1220 1221 err = otx2_tc_validate_flow(nic, actions, extack); 1222 if (err) 1223 return err; 1224 1225 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 1226 NL_SET_ERR_MSG_MOD(extack, 1227 "Only one ingress MATCHALL ratelimitter can be offloaded"); 1228 return -ENOMEM; 1229 } 1230 1231 entry = &cls->rule->action.entries[0]; 1232 switch (entry->id) { 1233 case FLOW_ACTION_POLICE: 1234 /* Ingress ratelimiting is not supported on OcteonTx2 */ 1235 if (is_dev_otx2(nic->pdev)) { 1236 NL_SET_ERR_MSG_MOD(extack, 1237 "Ingress policing not supported on this platform"); 1238 return -EOPNOTSUPP; 1239 } 1240 1241 err = cn10k_alloc_matchall_ipolicer(nic); 1242 if (err) 1243 return err; 1244 1245 /* Convert to bits per second */ 1246 rate = entry->police.rate_bytes_ps * 8; 1247 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 1248 if (err) 1249 return err; 1250 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1251 break; 1252 default: 1253 NL_SET_ERR_MSG_MOD(extack, 1254 "Only police action supported with Ingress MATCHALL offload"); 1255 return -EOPNOTSUPP; 1256 } 1257 1258 return 0; 1259 } 1260 1261 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1262 struct tc_cls_matchall_offload *cls) 1263 { 1264 struct netlink_ext_ack *extack = cls->common.extack; 1265 int err; 1266 1267 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1268 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1269 return -EINVAL; 1270 } 1271 1272 err = cn10k_free_matchall_ipolicer(nic); 1273 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1274 return err; 1275 } 1276 1277 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1278 struct tc_cls_matchall_offload *cls_matchall) 1279 { 1280 switch (cls_matchall->command) { 1281 case TC_CLSMATCHALL_REPLACE: 1282 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1283 case TC_CLSMATCHALL_DESTROY: 1284 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1285 case TC_CLSMATCHALL_STATS: 1286 default: 1287 break; 1288 } 1289 1290 return -EOPNOTSUPP; 1291 } 1292 1293 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1294 void *type_data, void *cb_priv) 1295 { 1296 struct otx2_nic *nic = cb_priv; 1297 bool ntuple; 1298 1299 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1300 return -EOPNOTSUPP; 1301 1302 ntuple = nic->netdev->features & NETIF_F_NTUPLE; 1303 switch (type) { 1304 case TC_SETUP_CLSFLOWER: 1305 if (ntuple) { 1306 netdev_warn(nic->netdev, 1307 "Can't install TC flower offload rule when NTUPLE is active"); 1308 return -EOPNOTSUPP; 1309 } 1310 1311 return otx2_setup_tc_cls_flower(nic, type_data); 1312 case TC_SETUP_CLSMATCHALL: 1313 return otx2_setup_tc_ingress_matchall(nic, type_data); 1314 default: 1315 break; 1316 } 1317 1318 return -EOPNOTSUPP; 1319 } 1320 1321 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1322 struct tc_cls_matchall_offload *cls_matchall) 1323 { 1324 switch (cls_matchall->command) { 1325 case TC_CLSMATCHALL_REPLACE: 1326 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1327 case TC_CLSMATCHALL_DESTROY: 1328 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1329 case TC_CLSMATCHALL_STATS: 1330 default: 1331 break; 1332 } 1333 1334 return -EOPNOTSUPP; 1335 } 1336 1337 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1338 void *type_data, void *cb_priv) 1339 { 1340 struct otx2_nic *nic = cb_priv; 1341 1342 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1343 return -EOPNOTSUPP; 1344 1345 switch (type) { 1346 case TC_SETUP_CLSMATCHALL: 1347 return otx2_setup_tc_egress_matchall(nic, type_data); 1348 default: 1349 break; 1350 } 1351 1352 return -EOPNOTSUPP; 1353 } 1354 1355 static LIST_HEAD(otx2_block_cb_list); 1356 1357 static int otx2_setup_tc_block(struct net_device *netdev, 1358 struct flow_block_offload *f) 1359 { 1360 struct otx2_nic *nic = netdev_priv(netdev); 1361 flow_setup_cb_t *cb; 1362 bool ingress; 1363 1364 if (f->block_shared) 1365 return -EOPNOTSUPP; 1366 1367 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1368 cb = otx2_setup_tc_block_ingress_cb; 1369 ingress = true; 1370 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1371 cb = otx2_setup_tc_block_egress_cb; 1372 ingress = false; 1373 } else { 1374 return -EOPNOTSUPP; 1375 } 1376 1377 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1378 nic, nic, ingress); 1379 } 1380 1381 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1382 void *type_data) 1383 { 1384 switch (type) { 1385 case TC_SETUP_BLOCK: 1386 return otx2_setup_tc_block(netdev, type_data); 1387 case TC_SETUP_QDISC_HTB: 1388 return otx2_setup_tc_htb(netdev, type_data); 1389 default: 1390 return -EOPNOTSUPP; 1391 } 1392 } 1393 EXPORT_SYMBOL(otx2_setup_tc); 1394 1395 int otx2_init_tc(struct otx2_nic *nic) 1396 { 1397 /* Exclude receive queue 0 being used for police action */ 1398 set_bit(0, &nic->rq_bmap); 1399 1400 if (!nic->flow_cfg) { 1401 netdev_err(nic->netdev, 1402 "Can't init TC, nic->flow_cfg is not setup\n"); 1403 return -EINVAL; 1404 } 1405 1406 return 0; 1407 } 1408 EXPORT_SYMBOL(otx2_init_tc); 1409 1410 void otx2_shutdown_tc(struct otx2_nic *nic) 1411 { 1412 otx2_destroy_tc_flow_list(nic); 1413 } 1414 EXPORT_SYMBOL(otx2_shutdown_tc); 1415 1416 static void otx2_tc_config_ingress_rule(struct otx2_nic *nic, 1417 struct otx2_tc_flow *node) 1418 { 1419 struct npc_install_flow_req *req; 1420 1421 if (otx2_tc_act_set_hw_police(nic, node)) 1422 return; 1423 1424 mutex_lock(&nic->mbox.lock); 1425 1426 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1427 if (!req) 1428 goto err; 1429 1430 memcpy(req, &node->req, sizeof(struct npc_install_flow_req)); 1431 1432 if (otx2_sync_mbox_msg(&nic->mbox)) 1433 netdev_err(nic->netdev, 1434 "Failed to install MCAM flow entry for ingress rule"); 1435 err: 1436 mutex_unlock(&nic->mbox.lock); 1437 } 1438 1439 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic) 1440 { 1441 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1442 struct otx2_tc_flow *node; 1443 1444 /* If any ingress policer rules exist for the interface then 1445 * apply those rules. Ingress policer rules depend on bandwidth 1446 * profiles linked to the receive queues. Since no receive queues 1447 * exist when interface is down, ingress policer rules are stored 1448 * and configured in hardware after all receive queues are allocated 1449 * in otx2_open. 1450 */ 1451 list_for_each_entry(node, &flow_cfg->flow_list_tc, list) { 1452 if (node->is_act_police) 1453 otx2_tc_config_ingress_rule(nic, node); 1454 } 1455 } 1456 EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules); 1457