1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 struct otx2_tc_flow_stats { 31 u64 bytes; 32 u64 pkts; 33 u64 used; 34 }; 35 36 struct otx2_tc_flow { 37 struct list_head list; 38 unsigned long cookie; 39 struct rcu_head rcu; 40 struct otx2_tc_flow_stats stats; 41 spinlock_t lock; /* lock for stats */ 42 u16 rq; 43 u16 entry; 44 u16 leaf_profile; 45 bool is_act_police; 46 u32 prio; 47 struct npc_install_flow_req req; 48 }; 49 50 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 51 u32 *burst_exp, u32 *burst_mantissa) 52 { 53 int max_burst, max_mantissa; 54 unsigned int tmp; 55 56 if (is_dev_otx2(nic->pdev)) { 57 max_burst = MAX_BURST_SIZE; 58 max_mantissa = MAX_BURST_MANTISSA; 59 } else { 60 max_burst = CN10K_MAX_BURST_SIZE; 61 max_mantissa = CN10K_MAX_BURST_MANTISSA; 62 } 63 64 /* Burst is calculated as 65 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 66 * Max supported burst size is 130,816 bytes. 67 */ 68 burst = min_t(u32, burst, max_burst); 69 if (burst) { 70 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 71 tmp = burst - rounddown_pow_of_two(burst); 72 if (burst < max_mantissa) 73 *burst_mantissa = tmp * 2; 74 else 75 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 76 } else { 77 *burst_exp = MAX_BURST_EXPONENT; 78 *burst_mantissa = max_mantissa; 79 } 80 } 81 82 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 83 u32 *mantissa, u32 *div_exp) 84 { 85 u64 tmp; 86 87 /* Rate calculation by hardware 88 * 89 * PIR_ADD = ((256 + mantissa) << exp) / 256 90 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 91 * The resultant rate is in Mbps. 92 */ 93 94 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 95 * Setting this to '0' will ease the calculation of 96 * exponent and mantissa. 97 */ 98 *div_exp = 0; 99 100 if (maxrate) { 101 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 102 tmp = maxrate - rounddown_pow_of_two(maxrate); 103 if (maxrate < MAX_RATE_MANTISSA) 104 *mantissa = tmp * 2; 105 else 106 *mantissa = tmp / (1ULL << (*exp - 7)); 107 } else { 108 /* Instead of disabling rate limiting, set all values to max */ 109 *exp = MAX_RATE_EXPONENT; 110 *mantissa = MAX_RATE_MANTISSA; 111 } 112 } 113 114 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 115 u64 maxrate, u32 burst) 116 { 117 u32 burst_exp, burst_mantissa; 118 u32 exp, mantissa, div_exp; 119 u64 regval = 0; 120 121 /* Get exponent and mantissa values from the desired rate */ 122 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 123 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 124 125 if (is_dev_otx2(nic->pdev)) { 126 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 127 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 128 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 129 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 130 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 131 } else { 132 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 133 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 134 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 135 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 136 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 137 } 138 139 return regval; 140 } 141 142 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 143 u32 burst, u64 maxrate) 144 { 145 struct otx2_hw *hw = &nic->hw; 146 struct nix_txschq_config *req; 147 int txschq, err; 148 149 /* All SQs share the same TL4, so pick the first scheduler */ 150 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 151 152 mutex_lock(&nic->mbox.lock); 153 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 154 if (!req) { 155 mutex_unlock(&nic->mbox.lock); 156 return -ENOMEM; 157 } 158 159 req->lvl = NIX_TXSCH_LVL_TL4; 160 req->num_regs = 1; 161 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 162 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 163 164 err = otx2_sync_mbox_msg(&nic->mbox); 165 mutex_unlock(&nic->mbox.lock); 166 return err; 167 } 168 169 static int otx2_tc_validate_flow(struct otx2_nic *nic, 170 struct flow_action *actions, 171 struct netlink_ext_ack *extack) 172 { 173 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 174 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 175 return -EINVAL; 176 } 177 178 if (!flow_action_has_entries(actions)) { 179 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 180 return -EINVAL; 181 } 182 183 if (!flow_offload_has_one_action(actions)) { 184 NL_SET_ERR_MSG_MOD(extack, 185 "Egress MATCHALL offload supports only 1 policing action"); 186 return -EINVAL; 187 } 188 return 0; 189 } 190 191 static int otx2_policer_validate(const struct flow_action *action, 192 const struct flow_action_entry *act, 193 struct netlink_ext_ack *extack) 194 { 195 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 196 NL_SET_ERR_MSG_MOD(extack, 197 "Offload not supported when exceed action is not drop"); 198 return -EOPNOTSUPP; 199 } 200 201 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 202 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 203 NL_SET_ERR_MSG_MOD(extack, 204 "Offload not supported when conform action is not pipe or ok"); 205 return -EOPNOTSUPP; 206 } 207 208 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 209 !flow_action_is_last_entry(action, act)) { 210 NL_SET_ERR_MSG_MOD(extack, 211 "Offload not supported when conform action is ok, but action is not last"); 212 return -EOPNOTSUPP; 213 } 214 215 if (act->police.peakrate_bytes_ps || 216 act->police.avrate || act->police.overhead) { 217 NL_SET_ERR_MSG_MOD(extack, 218 "Offload not supported when peakrate/avrate/overhead is configured"); 219 return -EOPNOTSUPP; 220 } 221 222 return 0; 223 } 224 225 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 226 struct tc_cls_matchall_offload *cls) 227 { 228 struct netlink_ext_ack *extack = cls->common.extack; 229 struct flow_action *actions = &cls->rule->action; 230 struct flow_action_entry *entry; 231 int err; 232 233 err = otx2_tc_validate_flow(nic, actions, extack); 234 if (err) 235 return err; 236 237 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 238 NL_SET_ERR_MSG_MOD(extack, 239 "Only one Egress MATCHALL ratelimiter can be offloaded"); 240 return -ENOMEM; 241 } 242 243 entry = &cls->rule->action.entries[0]; 244 switch (entry->id) { 245 case FLOW_ACTION_POLICE: 246 err = otx2_policer_validate(&cls->rule->action, entry, extack); 247 if (err) 248 return err; 249 250 if (entry->police.rate_pkt_ps) { 251 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 252 return -EOPNOTSUPP; 253 } 254 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 255 otx2_convert_rate(entry->police.rate_bytes_ps)); 256 if (err) 257 return err; 258 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 259 break; 260 default: 261 NL_SET_ERR_MSG_MOD(extack, 262 "Only police action is supported with Egress MATCHALL offload"); 263 return -EOPNOTSUPP; 264 } 265 266 return 0; 267 } 268 269 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 270 struct tc_cls_matchall_offload *cls) 271 { 272 struct netlink_ext_ack *extack = cls->common.extack; 273 int err; 274 275 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 276 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 277 return -EINVAL; 278 } 279 280 err = otx2_set_matchall_egress_rate(nic, 0, 0); 281 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 282 return err; 283 } 284 285 static int otx2_tc_act_set_police(struct otx2_nic *nic, 286 struct otx2_tc_flow *node, 287 struct flow_cls_offload *f, 288 u64 rate, u32 burst, u32 mark, 289 struct npc_install_flow_req *req, bool pps) 290 { 291 struct netlink_ext_ack *extack = f->common.extack; 292 struct otx2_hw *hw = &nic->hw; 293 int rq_idx, rc; 294 295 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 296 if (rq_idx >= hw->rx_queues) { 297 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 298 return -EINVAL; 299 } 300 301 mutex_lock(&nic->mbox.lock); 302 303 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 304 if (rc) { 305 mutex_unlock(&nic->mbox.lock); 306 return rc; 307 } 308 309 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); 310 if (rc) 311 goto free_leaf; 312 313 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); 314 if (rc) 315 goto free_leaf; 316 317 mutex_unlock(&nic->mbox.lock); 318 319 req->match_id = mark & 0xFFFFULL; 320 req->index = rq_idx; 321 req->op = NIX_RX_ACTIONOP_UCAST; 322 set_bit(rq_idx, &nic->rq_bmap); 323 node->is_act_police = true; 324 node->rq = rq_idx; 325 326 return 0; 327 328 free_leaf: 329 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 330 netdev_err(nic->netdev, 331 "Unable to free leaf bandwidth profile(%d)\n", 332 node->leaf_profile); 333 mutex_unlock(&nic->mbox.lock); 334 return rc; 335 } 336 337 static int otx2_tc_parse_actions(struct otx2_nic *nic, 338 struct flow_action *flow_action, 339 struct npc_install_flow_req *req, 340 struct flow_cls_offload *f, 341 struct otx2_tc_flow *node) 342 { 343 struct netlink_ext_ack *extack = f->common.extack; 344 struct flow_action_entry *act; 345 struct net_device *target; 346 struct otx2_nic *priv; 347 u32 burst, mark = 0; 348 u8 nr_police = 0; 349 bool pps = false; 350 u64 rate; 351 int err; 352 int i; 353 354 if (!flow_action_has_entries(flow_action)) { 355 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 356 return -EINVAL; 357 } 358 359 flow_action_for_each(i, act, flow_action) { 360 switch (act->id) { 361 case FLOW_ACTION_DROP: 362 req->op = NIX_RX_ACTIONOP_DROP; 363 return 0; 364 case FLOW_ACTION_ACCEPT: 365 req->op = NIX_RX_ACTION_DEFAULT; 366 return 0; 367 case FLOW_ACTION_REDIRECT_INGRESS: 368 target = act->dev; 369 priv = netdev_priv(target); 370 /* npc_install_flow_req doesn't support passing a target pcifunc */ 371 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 372 NL_SET_ERR_MSG_MOD(extack, 373 "can't redirect to other pf/vf"); 374 return -EOPNOTSUPP; 375 } 376 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 377 378 /* if op is already set; avoid overwriting the same */ 379 if (!req->op) 380 req->op = NIX_RX_ACTION_DEFAULT; 381 break; 382 383 case FLOW_ACTION_VLAN_POP: 384 req->vtag0_valid = true; 385 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 386 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 387 break; 388 case FLOW_ACTION_POLICE: 389 /* Ingress ratelimiting is not supported on OcteonTx2 */ 390 if (is_dev_otx2(nic->pdev)) { 391 NL_SET_ERR_MSG_MOD(extack, 392 "Ingress policing not supported on this platform"); 393 return -EOPNOTSUPP; 394 } 395 396 err = otx2_policer_validate(flow_action, act, extack); 397 if (err) 398 return err; 399 400 if (act->police.rate_bytes_ps > 0) { 401 rate = act->police.rate_bytes_ps * 8; 402 burst = act->police.burst; 403 } else if (act->police.rate_pkt_ps > 0) { 404 /* The algorithm used to calculate rate 405 * mantissa, exponent values for a given token 406 * rate (token can be byte or packet) requires 407 * token rate to be mutiplied by 8. 408 */ 409 rate = act->police.rate_pkt_ps * 8; 410 burst = act->police.burst_pkt; 411 pps = true; 412 } 413 nr_police++; 414 break; 415 case FLOW_ACTION_MARK: 416 mark = act->mark; 417 break; 418 419 case FLOW_ACTION_RX_QUEUE_MAPPING: 420 req->op = NIX_RX_ACTIONOP_UCAST; 421 req->index = act->rx_queue; 422 break; 423 424 default: 425 return -EOPNOTSUPP; 426 } 427 } 428 429 if (nr_police > 1) { 430 NL_SET_ERR_MSG_MOD(extack, 431 "rate limit police offload requires a single action"); 432 return -EOPNOTSUPP; 433 } 434 435 if (nr_police) 436 return otx2_tc_act_set_police(nic, node, f, rate, burst, 437 mark, req, pps); 438 439 return 0; 440 } 441 442 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 443 struct flow_cls_offload *f, 444 struct npc_install_flow_req *req) 445 { 446 struct netlink_ext_ack *extack = f->common.extack; 447 struct flow_msg *flow_spec = &req->packet; 448 struct flow_msg *flow_mask = &req->mask; 449 struct flow_dissector *dissector; 450 struct flow_rule *rule; 451 u8 ip_proto = 0; 452 453 rule = flow_cls_offload_flow_rule(f); 454 dissector = rule->match.dissector; 455 456 if ((dissector->used_keys & 457 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 458 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 459 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 460 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 461 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 462 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 463 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 464 BIT(FLOW_DISSECTOR_KEY_IPSEC) | 465 BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { 466 netdev_info(nic->netdev, "unsupported flow used key 0x%llx", 467 dissector->used_keys); 468 return -EOPNOTSUPP; 469 } 470 471 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 472 struct flow_match_basic match; 473 474 flow_rule_match_basic(rule, &match); 475 476 /* All EtherTypes can be matched, no hw limitation */ 477 flow_spec->etype = match.key->n_proto; 478 flow_mask->etype = match.mask->n_proto; 479 req->features |= BIT_ULL(NPC_ETYPE); 480 481 if (match.mask->ip_proto && 482 (match.key->ip_proto != IPPROTO_TCP && 483 match.key->ip_proto != IPPROTO_UDP && 484 match.key->ip_proto != IPPROTO_SCTP && 485 match.key->ip_proto != IPPROTO_ICMP && 486 match.key->ip_proto != IPPROTO_ESP && 487 match.key->ip_proto != IPPROTO_AH && 488 match.key->ip_proto != IPPROTO_ICMPV6)) { 489 netdev_info(nic->netdev, 490 "ip_proto=0x%x not supported\n", 491 match.key->ip_proto); 492 return -EOPNOTSUPP; 493 } 494 if (match.mask->ip_proto) 495 ip_proto = match.key->ip_proto; 496 497 if (ip_proto == IPPROTO_UDP) 498 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 499 else if (ip_proto == IPPROTO_TCP) 500 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 501 else if (ip_proto == IPPROTO_SCTP) 502 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 503 else if (ip_proto == IPPROTO_ICMP) 504 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 505 else if (ip_proto == IPPROTO_ICMPV6) 506 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 507 else if (ip_proto == IPPROTO_ESP) 508 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 509 else if (ip_proto == IPPROTO_AH) 510 req->features |= BIT_ULL(NPC_IPPROTO_AH); 511 } 512 513 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 514 struct flow_match_control match; 515 516 flow_rule_match_control(rule, &match); 517 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 518 NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); 519 return -EOPNOTSUPP; 520 } 521 522 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 523 if (ntohs(flow_spec->etype) == ETH_P_IP) { 524 flow_spec->ip_flag = IPV4_FLAG_MORE; 525 flow_mask->ip_flag = IPV4_FLAG_MORE; 526 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 527 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 528 flow_spec->next_header = IPPROTO_FRAGMENT; 529 flow_mask->next_header = 0xff; 530 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 531 } else { 532 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 533 return -EOPNOTSUPP; 534 } 535 } 536 } 537 538 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 539 struct flow_match_eth_addrs match; 540 541 flow_rule_match_eth_addrs(rule, &match); 542 if (!is_zero_ether_addr(match.mask->src)) { 543 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 544 return -EOPNOTSUPP; 545 } 546 547 if (!is_zero_ether_addr(match.mask->dst)) { 548 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 549 ether_addr_copy(flow_mask->dmac, 550 (u8 *)&match.mask->dst); 551 req->features |= BIT_ULL(NPC_DMAC); 552 } 553 } 554 555 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { 556 struct flow_match_ipsec match; 557 558 flow_rule_match_ipsec(rule, &match); 559 if (!match.mask->spi) { 560 NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); 561 return -EOPNOTSUPP; 562 } 563 if (ip_proto != IPPROTO_ESP && 564 ip_proto != IPPROTO_AH) { 565 NL_SET_ERR_MSG_MOD(extack, 566 "SPI index is valid only for ESP/AH proto"); 567 return -EOPNOTSUPP; 568 } 569 570 flow_spec->spi = match.key->spi; 571 flow_mask->spi = match.mask->spi; 572 req->features |= BIT_ULL(NPC_IPSEC_SPI); 573 } 574 575 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 576 struct flow_match_ip match; 577 578 flow_rule_match_ip(rule, &match); 579 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 580 match.mask->tos) { 581 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 582 return -EOPNOTSUPP; 583 } 584 if (match.mask->ttl) { 585 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 586 return -EOPNOTSUPP; 587 } 588 flow_spec->tos = match.key->tos; 589 flow_mask->tos = match.mask->tos; 590 req->features |= BIT_ULL(NPC_TOS); 591 } 592 593 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 594 struct flow_match_vlan match; 595 u16 vlan_tci, vlan_tci_mask; 596 597 flow_rule_match_vlan(rule, &match); 598 599 if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { 600 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 601 ntohs(match.key->vlan_tpid)); 602 return -EOPNOTSUPP; 603 } 604 605 if (!match.mask->vlan_id) { 606 struct flow_action_entry *act; 607 int i; 608 609 flow_action_for_each(i, act, &rule->action) { 610 if (act->id == FLOW_ACTION_DROP) { 611 netdev_err(nic->netdev, 612 "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", 613 ntohs(match.key->vlan_tpid), 614 match.key->vlan_id); 615 return -EOPNOTSUPP; 616 } 617 } 618 } 619 620 if (match.mask->vlan_id || 621 match.mask->vlan_dei || 622 match.mask->vlan_priority) { 623 vlan_tci = match.key->vlan_id | 624 match.key->vlan_dei << 12 | 625 match.key->vlan_priority << 13; 626 627 vlan_tci_mask = match.mask->vlan_id | 628 match.mask->vlan_dei << 12 | 629 match.mask->vlan_priority << 13; 630 631 flow_spec->vlan_tci = htons(vlan_tci); 632 flow_mask->vlan_tci = htons(vlan_tci_mask); 633 req->features |= BIT_ULL(NPC_OUTER_VID); 634 } 635 } 636 637 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 638 struct flow_match_ipv4_addrs match; 639 640 flow_rule_match_ipv4_addrs(rule, &match); 641 642 flow_spec->ip4dst = match.key->dst; 643 flow_mask->ip4dst = match.mask->dst; 644 req->features |= BIT_ULL(NPC_DIP_IPV4); 645 646 flow_spec->ip4src = match.key->src; 647 flow_mask->ip4src = match.mask->src; 648 req->features |= BIT_ULL(NPC_SIP_IPV4); 649 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 650 struct flow_match_ipv6_addrs match; 651 652 flow_rule_match_ipv6_addrs(rule, &match); 653 654 if (ipv6_addr_loopback(&match.key->dst) || 655 ipv6_addr_loopback(&match.key->src)) { 656 NL_SET_ERR_MSG_MOD(extack, 657 "Flow matching IPv6 loopback addr not supported"); 658 return -EOPNOTSUPP; 659 } 660 661 if (!ipv6_addr_any(&match.mask->dst)) { 662 memcpy(&flow_spec->ip6dst, 663 (struct in6_addr *)&match.key->dst, 664 sizeof(flow_spec->ip6dst)); 665 memcpy(&flow_mask->ip6dst, 666 (struct in6_addr *)&match.mask->dst, 667 sizeof(flow_spec->ip6dst)); 668 req->features |= BIT_ULL(NPC_DIP_IPV6); 669 } 670 671 if (!ipv6_addr_any(&match.mask->src)) { 672 memcpy(&flow_spec->ip6src, 673 (struct in6_addr *)&match.key->src, 674 sizeof(flow_spec->ip6src)); 675 memcpy(&flow_mask->ip6src, 676 (struct in6_addr *)&match.mask->src, 677 sizeof(flow_spec->ip6src)); 678 req->features |= BIT_ULL(NPC_SIP_IPV6); 679 } 680 } 681 682 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 683 struct flow_match_ports match; 684 685 flow_rule_match_ports(rule, &match); 686 687 flow_spec->dport = match.key->dst; 688 flow_mask->dport = match.mask->dst; 689 690 if (flow_mask->dport) { 691 if (ip_proto == IPPROTO_UDP) 692 req->features |= BIT_ULL(NPC_DPORT_UDP); 693 else if (ip_proto == IPPROTO_TCP) 694 req->features |= BIT_ULL(NPC_DPORT_TCP); 695 else if (ip_proto == IPPROTO_SCTP) 696 req->features |= BIT_ULL(NPC_DPORT_SCTP); 697 } 698 699 flow_spec->sport = match.key->src; 700 flow_mask->sport = match.mask->src; 701 702 if (flow_mask->sport) { 703 if (ip_proto == IPPROTO_UDP) 704 req->features |= BIT_ULL(NPC_SPORT_UDP); 705 else if (ip_proto == IPPROTO_TCP) 706 req->features |= BIT_ULL(NPC_SPORT_TCP); 707 else if (ip_proto == IPPROTO_SCTP) 708 req->features |= BIT_ULL(NPC_SPORT_SCTP); 709 } 710 } 711 712 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 713 } 714 715 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) 716 { 717 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 718 struct otx2_tc_flow *iter, *tmp; 719 720 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) 721 return; 722 723 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { 724 list_del(&iter->list); 725 kfree(iter); 726 flow_cfg->nr_flows--; 727 } 728 } 729 730 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, 731 unsigned long cookie) 732 { 733 struct otx2_tc_flow *tmp; 734 735 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 736 if (tmp->cookie == cookie) 737 return tmp; 738 } 739 740 return NULL; 741 } 742 743 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, 744 int index) 745 { 746 struct otx2_tc_flow *tmp; 747 int i = 0; 748 749 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 750 if (i == index) 751 return tmp; 752 i++; 753 } 754 755 return NULL; 756 } 757 758 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, 759 struct otx2_tc_flow *node) 760 { 761 struct list_head *pos, *n; 762 struct otx2_tc_flow *tmp; 763 764 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 765 tmp = list_entry(pos, struct otx2_tc_flow, list); 766 if (node == tmp) { 767 list_del(&node->list); 768 return; 769 } 770 } 771 } 772 773 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, 774 struct otx2_tc_flow *node) 775 { 776 struct list_head *pos, *n; 777 struct otx2_tc_flow *tmp; 778 int index = 0; 779 780 /* If the flow list is empty then add the new node */ 781 if (list_empty(&flow_cfg->flow_list_tc)) { 782 list_add(&node->list, &flow_cfg->flow_list_tc); 783 return index; 784 } 785 786 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 787 tmp = list_entry(pos, struct otx2_tc_flow, list); 788 if (node->prio < tmp->prio) 789 break; 790 index++; 791 } 792 793 list_add(&node->list, pos->prev); 794 return index; 795 } 796 797 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) 798 { 799 struct npc_install_flow_req *tmp_req; 800 int err; 801 802 mutex_lock(&nic->mbox.lock); 803 tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 804 if (!tmp_req) { 805 mutex_unlock(&nic->mbox.lock); 806 return -ENOMEM; 807 } 808 809 memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); 810 /* Send message to AF */ 811 err = otx2_sync_mbox_msg(&nic->mbox); 812 if (err) { 813 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", 814 req->entry); 815 mutex_unlock(&nic->mbox.lock); 816 return -EFAULT; 817 } 818 819 mutex_unlock(&nic->mbox.lock); 820 return 0; 821 } 822 823 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) 824 { 825 struct npc_delete_flow_rsp *rsp; 826 struct npc_delete_flow_req *req; 827 int err; 828 829 mutex_lock(&nic->mbox.lock); 830 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 831 if (!req) { 832 mutex_unlock(&nic->mbox.lock); 833 return -ENOMEM; 834 } 835 836 req->entry = entry; 837 838 /* Send message to AF */ 839 err = otx2_sync_mbox_msg(&nic->mbox); 840 if (err) { 841 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 842 entry); 843 mutex_unlock(&nic->mbox.lock); 844 return -EFAULT; 845 } 846 847 if (cntr_val) { 848 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 849 0, &req->hdr); 850 if (IS_ERR(rsp)) { 851 netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", 852 entry); 853 mutex_unlock(&nic->mbox.lock); 854 return -EFAULT; 855 } 856 857 *cntr_val = rsp->cntr_val; 858 } 859 860 mutex_unlock(&nic->mbox.lock); 861 return 0; 862 } 863 864 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, 865 struct otx2_flow_config *flow_cfg, 866 struct otx2_tc_flow *node) 867 { 868 struct list_head *pos, *n; 869 struct otx2_tc_flow *tmp; 870 int i = 0, index = 0; 871 u16 cntr_val = 0; 872 873 /* Find and delete the entry from the list and re-install 874 * all the entries from beginning to the index of the 875 * deleted entry to higher mcam indexes. 876 */ 877 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 878 tmp = list_entry(pos, struct otx2_tc_flow, list); 879 if (node == tmp) { 880 list_del(&tmp->list); 881 break; 882 } 883 884 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 885 tmp->entry++; 886 tmp->req.entry = tmp->entry; 887 tmp->req.cntr_val = cntr_val; 888 index++; 889 } 890 891 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 892 if (i == index) 893 break; 894 895 tmp = list_entry(pos, struct otx2_tc_flow, list); 896 otx2_add_mcam_flow_entry(nic, &tmp->req); 897 i++; 898 } 899 900 return 0; 901 } 902 903 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, 904 struct otx2_flow_config *flow_cfg, 905 struct otx2_tc_flow *node) 906 { 907 int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; 908 struct otx2_tc_flow *tmp; 909 int list_idx, i; 910 u16 cntr_val = 0; 911 912 /* Find the index of the entry(list_idx) whose priority 913 * is greater than the new entry and re-install all 914 * the entries from beginning to list_idx to higher 915 * mcam indexes. 916 */ 917 list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); 918 for (i = 0; i < list_idx; i++) { 919 tmp = otx2_tc_get_entry_by_index(flow_cfg, i); 920 if (!tmp) 921 return -ENOMEM; 922 923 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 924 tmp->entry = flow_cfg->flow_ent[mcam_idx]; 925 tmp->req.entry = tmp->entry; 926 tmp->req.cntr_val = cntr_val; 927 otx2_add_mcam_flow_entry(nic, &tmp->req); 928 mcam_idx++; 929 } 930 931 return mcam_idx; 932 } 933 934 static int otx2_tc_update_mcam_table(struct otx2_nic *nic, 935 struct otx2_flow_config *flow_cfg, 936 struct otx2_tc_flow *node, 937 bool add_req) 938 { 939 if (add_req) 940 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); 941 942 return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); 943 } 944 945 static int otx2_tc_del_flow(struct otx2_nic *nic, 946 struct flow_cls_offload *tc_flow_cmd) 947 { 948 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 949 struct otx2_tc_flow *flow_node; 950 int err; 951 952 flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 953 if (!flow_node) { 954 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 955 tc_flow_cmd->cookie); 956 return -EINVAL; 957 } 958 959 if (flow_node->is_act_police) { 960 mutex_lock(&nic->mbox.lock); 961 962 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 963 flow_node->leaf_profile, false); 964 if (err) 965 netdev_err(nic->netdev, 966 "Unmapping RQ %d & profile %d failed\n", 967 flow_node->rq, flow_node->leaf_profile); 968 969 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 970 if (err) 971 netdev_err(nic->netdev, 972 "Unable to free leaf bandwidth profile(%d)\n", 973 flow_node->leaf_profile); 974 975 __clear_bit(flow_node->rq, &nic->rq_bmap); 976 977 mutex_unlock(&nic->mbox.lock); 978 } 979 980 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); 981 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); 982 kfree_rcu(flow_node, rcu); 983 flow_cfg->nr_flows--; 984 return 0; 985 } 986 987 static int otx2_tc_add_flow(struct otx2_nic *nic, 988 struct flow_cls_offload *tc_flow_cmd) 989 { 990 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 991 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 992 struct otx2_tc_flow *new_node, *old_node; 993 struct npc_install_flow_req *req, dummy; 994 int rc, err, mcam_idx; 995 996 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 997 return -ENOMEM; 998 999 if (flow_cfg->nr_flows == flow_cfg->max_flows) { 1000 NL_SET_ERR_MSG_MOD(extack, 1001 "Free MCAM entry not available to add the flow"); 1002 return -ENOMEM; 1003 } 1004 1005 /* allocate memory for the new flow and it's node */ 1006 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 1007 if (!new_node) 1008 return -ENOMEM; 1009 spin_lock_init(&new_node->lock); 1010 new_node->cookie = tc_flow_cmd->cookie; 1011 new_node->prio = tc_flow_cmd->common.prio; 1012 1013 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 1014 1015 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 1016 if (rc) { 1017 kfree_rcu(new_node, rcu); 1018 return rc; 1019 } 1020 1021 /* If a flow exists with the same cookie, delete it */ 1022 old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1023 if (old_node) 1024 otx2_tc_del_flow(nic, tc_flow_cmd); 1025 1026 mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); 1027 mutex_lock(&nic->mbox.lock); 1028 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1029 if (!req) { 1030 mutex_unlock(&nic->mbox.lock); 1031 rc = -ENOMEM; 1032 goto free_leaf; 1033 } 1034 1035 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 1036 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 1037 req->channel = nic->hw.rx_chan_base; 1038 req->entry = flow_cfg->flow_ent[mcam_idx]; 1039 req->intf = NIX_INTF_RX; 1040 req->set_cntr = 1; 1041 new_node->entry = req->entry; 1042 1043 /* Send message to AF */ 1044 rc = otx2_sync_mbox_msg(&nic->mbox); 1045 if (rc) { 1046 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 1047 mutex_unlock(&nic->mbox.lock); 1048 goto free_leaf; 1049 } 1050 1051 mutex_unlock(&nic->mbox.lock); 1052 memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); 1053 1054 flow_cfg->nr_flows++; 1055 return 0; 1056 1057 free_leaf: 1058 otx2_tc_del_from_flow_list(flow_cfg, new_node); 1059 kfree_rcu(new_node, rcu); 1060 if (new_node->is_act_police) { 1061 mutex_lock(&nic->mbox.lock); 1062 1063 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 1064 new_node->leaf_profile, false); 1065 if (err) 1066 netdev_err(nic->netdev, 1067 "Unmapping RQ %d & profile %d failed\n", 1068 new_node->rq, new_node->leaf_profile); 1069 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 1070 if (err) 1071 netdev_err(nic->netdev, 1072 "Unable to free leaf bandwidth profile(%d)\n", 1073 new_node->leaf_profile); 1074 1075 __clear_bit(new_node->rq, &nic->rq_bmap); 1076 1077 mutex_unlock(&nic->mbox.lock); 1078 } 1079 1080 return rc; 1081 } 1082 1083 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 1084 struct flow_cls_offload *tc_flow_cmd) 1085 { 1086 struct npc_mcam_get_stats_req *req; 1087 struct npc_mcam_get_stats_rsp *rsp; 1088 struct otx2_tc_flow_stats *stats; 1089 struct otx2_tc_flow *flow_node; 1090 int err; 1091 1092 flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); 1093 if (!flow_node) { 1094 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 1095 tc_flow_cmd->cookie); 1096 return -EINVAL; 1097 } 1098 1099 mutex_lock(&nic->mbox.lock); 1100 1101 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 1102 if (!req) { 1103 mutex_unlock(&nic->mbox.lock); 1104 return -ENOMEM; 1105 } 1106 1107 req->entry = flow_node->entry; 1108 1109 err = otx2_sync_mbox_msg(&nic->mbox); 1110 if (err) { 1111 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 1112 req->entry); 1113 mutex_unlock(&nic->mbox.lock); 1114 return -EFAULT; 1115 } 1116 1117 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 1118 (&nic->mbox.mbox, 0, &req->hdr); 1119 if (IS_ERR(rsp)) { 1120 mutex_unlock(&nic->mbox.lock); 1121 return PTR_ERR(rsp); 1122 } 1123 1124 mutex_unlock(&nic->mbox.lock); 1125 1126 if (!rsp->stat_ena) 1127 return -EINVAL; 1128 1129 stats = &flow_node->stats; 1130 1131 spin_lock(&flow_node->lock); 1132 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 1133 FLOW_ACTION_HW_STATS_IMMEDIATE); 1134 stats->pkts = rsp->stat; 1135 spin_unlock(&flow_node->lock); 1136 1137 return 0; 1138 } 1139 1140 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1141 struct flow_cls_offload *cls_flower) 1142 { 1143 switch (cls_flower->command) { 1144 case FLOW_CLS_REPLACE: 1145 return otx2_tc_add_flow(nic, cls_flower); 1146 case FLOW_CLS_DESTROY: 1147 return otx2_tc_del_flow(nic, cls_flower); 1148 case FLOW_CLS_STATS: 1149 return otx2_tc_get_flow_stats(nic, cls_flower); 1150 default: 1151 return -EOPNOTSUPP; 1152 } 1153 } 1154 1155 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 1156 struct tc_cls_matchall_offload *cls) 1157 { 1158 struct netlink_ext_ack *extack = cls->common.extack; 1159 struct flow_action *actions = &cls->rule->action; 1160 struct flow_action_entry *entry; 1161 u64 rate; 1162 int err; 1163 1164 err = otx2_tc_validate_flow(nic, actions, extack); 1165 if (err) 1166 return err; 1167 1168 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 1169 NL_SET_ERR_MSG_MOD(extack, 1170 "Only one ingress MATCHALL ratelimitter can be offloaded"); 1171 return -ENOMEM; 1172 } 1173 1174 entry = &cls->rule->action.entries[0]; 1175 switch (entry->id) { 1176 case FLOW_ACTION_POLICE: 1177 /* Ingress ratelimiting is not supported on OcteonTx2 */ 1178 if (is_dev_otx2(nic->pdev)) { 1179 NL_SET_ERR_MSG_MOD(extack, 1180 "Ingress policing not supported on this platform"); 1181 return -EOPNOTSUPP; 1182 } 1183 1184 err = cn10k_alloc_matchall_ipolicer(nic); 1185 if (err) 1186 return err; 1187 1188 /* Convert to bits per second */ 1189 rate = entry->police.rate_bytes_ps * 8; 1190 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 1191 if (err) 1192 return err; 1193 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1194 break; 1195 default: 1196 NL_SET_ERR_MSG_MOD(extack, 1197 "Only police action supported with Ingress MATCHALL offload"); 1198 return -EOPNOTSUPP; 1199 } 1200 1201 return 0; 1202 } 1203 1204 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1205 struct tc_cls_matchall_offload *cls) 1206 { 1207 struct netlink_ext_ack *extack = cls->common.extack; 1208 int err; 1209 1210 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1211 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1212 return -EINVAL; 1213 } 1214 1215 err = cn10k_free_matchall_ipolicer(nic); 1216 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1217 return err; 1218 } 1219 1220 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1221 struct tc_cls_matchall_offload *cls_matchall) 1222 { 1223 switch (cls_matchall->command) { 1224 case TC_CLSMATCHALL_REPLACE: 1225 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1226 case TC_CLSMATCHALL_DESTROY: 1227 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1228 case TC_CLSMATCHALL_STATS: 1229 default: 1230 break; 1231 } 1232 1233 return -EOPNOTSUPP; 1234 } 1235 1236 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1237 void *type_data, void *cb_priv) 1238 { 1239 struct otx2_nic *nic = cb_priv; 1240 bool ntuple; 1241 1242 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1243 return -EOPNOTSUPP; 1244 1245 ntuple = nic->netdev->features & NETIF_F_NTUPLE; 1246 switch (type) { 1247 case TC_SETUP_CLSFLOWER: 1248 if (ntuple) { 1249 netdev_warn(nic->netdev, 1250 "Can't install TC flower offload rule when NTUPLE is active"); 1251 return -EOPNOTSUPP; 1252 } 1253 1254 return otx2_setup_tc_cls_flower(nic, type_data); 1255 case TC_SETUP_CLSMATCHALL: 1256 return otx2_setup_tc_ingress_matchall(nic, type_data); 1257 default: 1258 break; 1259 } 1260 1261 return -EOPNOTSUPP; 1262 } 1263 1264 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1265 struct tc_cls_matchall_offload *cls_matchall) 1266 { 1267 switch (cls_matchall->command) { 1268 case TC_CLSMATCHALL_REPLACE: 1269 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1270 case TC_CLSMATCHALL_DESTROY: 1271 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1272 case TC_CLSMATCHALL_STATS: 1273 default: 1274 break; 1275 } 1276 1277 return -EOPNOTSUPP; 1278 } 1279 1280 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1281 void *type_data, void *cb_priv) 1282 { 1283 struct otx2_nic *nic = cb_priv; 1284 1285 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1286 return -EOPNOTSUPP; 1287 1288 switch (type) { 1289 case TC_SETUP_CLSMATCHALL: 1290 return otx2_setup_tc_egress_matchall(nic, type_data); 1291 default: 1292 break; 1293 } 1294 1295 return -EOPNOTSUPP; 1296 } 1297 1298 static LIST_HEAD(otx2_block_cb_list); 1299 1300 static int otx2_setup_tc_block(struct net_device *netdev, 1301 struct flow_block_offload *f) 1302 { 1303 struct otx2_nic *nic = netdev_priv(netdev); 1304 flow_setup_cb_t *cb; 1305 bool ingress; 1306 1307 if (f->block_shared) 1308 return -EOPNOTSUPP; 1309 1310 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1311 cb = otx2_setup_tc_block_ingress_cb; 1312 ingress = true; 1313 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1314 cb = otx2_setup_tc_block_egress_cb; 1315 ingress = false; 1316 } else { 1317 return -EOPNOTSUPP; 1318 } 1319 1320 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1321 nic, nic, ingress); 1322 } 1323 1324 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1325 void *type_data) 1326 { 1327 switch (type) { 1328 case TC_SETUP_BLOCK: 1329 return otx2_setup_tc_block(netdev, type_data); 1330 case TC_SETUP_QDISC_HTB: 1331 return otx2_setup_tc_htb(netdev, type_data); 1332 default: 1333 return -EOPNOTSUPP; 1334 } 1335 } 1336 EXPORT_SYMBOL(otx2_setup_tc); 1337 1338 int otx2_init_tc(struct otx2_nic *nic) 1339 { 1340 /* Exclude receive queue 0 being used for police action */ 1341 set_bit(0, &nic->rq_bmap); 1342 1343 if (!nic->flow_cfg) { 1344 netdev_err(nic->netdev, 1345 "Can't init TC, nic->flow_cfg is not setup\n"); 1346 return -EINVAL; 1347 } 1348 1349 return 0; 1350 } 1351 EXPORT_SYMBOL(otx2_init_tc); 1352 1353 void otx2_shutdown_tc(struct otx2_nic *nic) 1354 { 1355 otx2_destroy_tc_flow_list(nic); 1356 } 1357 EXPORT_SYMBOL(otx2_shutdown_tc); 1358