1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 struct otx2_tc_flow_stats { 31 u64 bytes; 32 u64 pkts; 33 u64 used; 34 }; 35 36 struct otx2_tc_flow { 37 struct list_head list; 38 unsigned long cookie; 39 struct rcu_head rcu; 40 struct otx2_tc_flow_stats stats; 41 spinlock_t lock; /* lock for stats */ 42 u16 rq; 43 u16 entry; 44 u16 leaf_profile; 45 bool is_act_police; 46 u32 prio; 47 struct npc_install_flow_req req; 48 }; 49 50 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 51 u32 *burst_exp, u32 *burst_mantissa) 52 { 53 int max_burst, max_mantissa; 54 unsigned int tmp; 55 56 if (is_dev_otx2(nic->pdev)) { 57 max_burst = MAX_BURST_SIZE; 58 max_mantissa = MAX_BURST_MANTISSA; 59 } else { 60 max_burst = CN10K_MAX_BURST_SIZE; 61 max_mantissa = CN10K_MAX_BURST_MANTISSA; 62 } 63 64 /* Burst is calculated as 65 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 66 * Max supported burst size is 130,816 bytes. 67 */ 68 burst = min_t(u32, burst, max_burst); 69 if (burst) { 70 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 71 tmp = burst - rounddown_pow_of_two(burst); 72 if (burst < max_mantissa) 73 *burst_mantissa = tmp * 2; 74 else 75 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 76 } else { 77 *burst_exp = MAX_BURST_EXPONENT; 78 *burst_mantissa = max_mantissa; 79 } 80 } 81 82 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 83 u32 *mantissa, u32 *div_exp) 84 { 85 u64 tmp; 86 87 /* Rate calculation by hardware 88 * 89 * PIR_ADD = ((256 + mantissa) << exp) / 256 90 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 91 * The resultant rate is in Mbps. 92 */ 93 94 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 95 * Setting this to '0' will ease the calculation of 96 * exponent and mantissa. 97 */ 98 *div_exp = 0; 99 100 if (maxrate) { 101 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 102 tmp = maxrate - rounddown_pow_of_two(maxrate); 103 if (maxrate < MAX_RATE_MANTISSA) 104 *mantissa = tmp * 2; 105 else 106 *mantissa = tmp / (1ULL << (*exp - 7)); 107 } else { 108 /* Instead of disabling rate limiting, set all values to max */ 109 *exp = MAX_RATE_EXPONENT; 110 *mantissa = MAX_RATE_MANTISSA; 111 } 112 } 113 114 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 115 u64 maxrate, u32 burst) 116 { 117 u32 burst_exp, burst_mantissa; 118 u32 exp, mantissa, div_exp; 119 u64 regval = 0; 120 121 /* Get exponent and mantissa values from the desired rate */ 122 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 123 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 124 125 if (is_dev_otx2(nic->pdev)) { 126 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 127 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 128 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 129 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 130 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 131 } else { 132 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 133 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 134 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 135 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 136 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 137 } 138 139 return regval; 140 } 141 142 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 143 u32 burst, u64 maxrate) 144 { 145 struct otx2_hw *hw = &nic->hw; 146 struct nix_txschq_config *req; 147 int txschq, err; 148 149 /* All SQs share the same TL4, so pick the first scheduler */ 150 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 151 152 mutex_lock(&nic->mbox.lock); 153 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 154 if (!req) { 155 mutex_unlock(&nic->mbox.lock); 156 return -ENOMEM; 157 } 158 159 req->lvl = NIX_TXSCH_LVL_TL4; 160 req->num_regs = 1; 161 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 162 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 163 164 err = otx2_sync_mbox_msg(&nic->mbox); 165 mutex_unlock(&nic->mbox.lock); 166 return err; 167 } 168 169 static int otx2_tc_validate_flow(struct otx2_nic *nic, 170 struct flow_action *actions, 171 struct netlink_ext_ack *extack) 172 { 173 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 174 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 175 return -EINVAL; 176 } 177 178 if (!flow_action_has_entries(actions)) { 179 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 180 return -EINVAL; 181 } 182 183 if (!flow_offload_has_one_action(actions)) { 184 NL_SET_ERR_MSG_MOD(extack, 185 "Egress MATCHALL offload supports only 1 policing action"); 186 return -EINVAL; 187 } 188 return 0; 189 } 190 191 static int otx2_policer_validate(const struct flow_action *action, 192 const struct flow_action_entry *act, 193 struct netlink_ext_ack *extack) 194 { 195 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 196 NL_SET_ERR_MSG_MOD(extack, 197 "Offload not supported when exceed action is not drop"); 198 return -EOPNOTSUPP; 199 } 200 201 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 202 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 203 NL_SET_ERR_MSG_MOD(extack, 204 "Offload not supported when conform action is not pipe or ok"); 205 return -EOPNOTSUPP; 206 } 207 208 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 209 !flow_action_is_last_entry(action, act)) { 210 NL_SET_ERR_MSG_MOD(extack, 211 "Offload not supported when conform action is ok, but action is not last"); 212 return -EOPNOTSUPP; 213 } 214 215 if (act->police.peakrate_bytes_ps || 216 act->police.avrate || act->police.overhead) { 217 NL_SET_ERR_MSG_MOD(extack, 218 "Offload not supported when peakrate/avrate/overhead is configured"); 219 return -EOPNOTSUPP; 220 } 221 222 return 0; 223 } 224 225 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 226 struct tc_cls_matchall_offload *cls) 227 { 228 struct netlink_ext_ack *extack = cls->common.extack; 229 struct flow_action *actions = &cls->rule->action; 230 struct flow_action_entry *entry; 231 int err; 232 233 err = otx2_tc_validate_flow(nic, actions, extack); 234 if (err) 235 return err; 236 237 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 238 NL_SET_ERR_MSG_MOD(extack, 239 "Only one Egress MATCHALL ratelimiter can be offloaded"); 240 return -ENOMEM; 241 } 242 243 entry = &cls->rule->action.entries[0]; 244 switch (entry->id) { 245 case FLOW_ACTION_POLICE: 246 err = otx2_policer_validate(&cls->rule->action, entry, extack); 247 if (err) 248 return err; 249 250 if (entry->police.rate_pkt_ps) { 251 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 252 return -EOPNOTSUPP; 253 } 254 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 255 otx2_convert_rate(entry->police.rate_bytes_ps)); 256 if (err) 257 return err; 258 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 259 break; 260 default: 261 NL_SET_ERR_MSG_MOD(extack, 262 "Only police action is supported with Egress MATCHALL offload"); 263 return -EOPNOTSUPP; 264 } 265 266 return 0; 267 } 268 269 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 270 struct tc_cls_matchall_offload *cls) 271 { 272 struct netlink_ext_ack *extack = cls->common.extack; 273 int err; 274 275 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 276 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 277 return -EINVAL; 278 } 279 280 err = otx2_set_matchall_egress_rate(nic, 0, 0); 281 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 282 return err; 283 } 284 285 static int otx2_tc_act_set_police(struct otx2_nic *nic, 286 struct otx2_tc_flow *node, 287 struct flow_cls_offload *f, 288 u64 rate, u32 burst, u32 mark, 289 struct npc_install_flow_req *req, bool pps) 290 { 291 struct netlink_ext_ack *extack = f->common.extack; 292 struct otx2_hw *hw = &nic->hw; 293 int rq_idx, rc; 294 295 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 296 if (rq_idx >= hw->rx_queues) { 297 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 298 return -EINVAL; 299 } 300 301 mutex_lock(&nic->mbox.lock); 302 303 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 304 if (rc) { 305 mutex_unlock(&nic->mbox.lock); 306 return rc; 307 } 308 309 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); 310 if (rc) 311 goto free_leaf; 312 313 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); 314 if (rc) 315 goto free_leaf; 316 317 mutex_unlock(&nic->mbox.lock); 318 319 req->match_id = mark & 0xFFFFULL; 320 req->index = rq_idx; 321 req->op = NIX_RX_ACTIONOP_UCAST; 322 set_bit(rq_idx, &nic->rq_bmap); 323 node->is_act_police = true; 324 node->rq = rq_idx; 325 326 return 0; 327 328 free_leaf: 329 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 330 netdev_err(nic->netdev, 331 "Unable to free leaf bandwidth profile(%d)\n", 332 node->leaf_profile); 333 mutex_unlock(&nic->mbox.lock); 334 return rc; 335 } 336 337 static int otx2_tc_parse_actions(struct otx2_nic *nic, 338 struct flow_action *flow_action, 339 struct npc_install_flow_req *req, 340 struct flow_cls_offload *f, 341 struct otx2_tc_flow *node) 342 { 343 struct netlink_ext_ack *extack = f->common.extack; 344 struct flow_action_entry *act; 345 struct net_device *target; 346 struct otx2_nic *priv; 347 u32 burst, mark = 0; 348 u8 nr_police = 0; 349 bool pps = false; 350 u64 rate; 351 int err; 352 int i; 353 354 if (!flow_action_has_entries(flow_action)) { 355 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 356 return -EINVAL; 357 } 358 359 flow_action_for_each(i, act, flow_action) { 360 switch (act->id) { 361 case FLOW_ACTION_DROP: 362 req->op = NIX_RX_ACTIONOP_DROP; 363 return 0; 364 case FLOW_ACTION_ACCEPT: 365 req->op = NIX_RX_ACTION_DEFAULT; 366 return 0; 367 case FLOW_ACTION_REDIRECT_INGRESS: 368 target = act->dev; 369 priv = netdev_priv(target); 370 /* npc_install_flow_req doesn't support passing a target pcifunc */ 371 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 372 NL_SET_ERR_MSG_MOD(extack, 373 "can't redirect to other pf/vf"); 374 return -EOPNOTSUPP; 375 } 376 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 377 378 /* if op is already set; avoid overwriting the same */ 379 if (!req->op) 380 req->op = NIX_RX_ACTION_DEFAULT; 381 break; 382 383 case FLOW_ACTION_VLAN_POP: 384 req->vtag0_valid = true; 385 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 386 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 387 break; 388 case FLOW_ACTION_POLICE: 389 /* Ingress ratelimiting is not supported on OcteonTx2 */ 390 if (is_dev_otx2(nic->pdev)) { 391 NL_SET_ERR_MSG_MOD(extack, 392 "Ingress policing not supported on this platform"); 393 return -EOPNOTSUPP; 394 } 395 396 err = otx2_policer_validate(flow_action, act, extack); 397 if (err) 398 return err; 399 400 if (act->police.rate_bytes_ps > 0) { 401 rate = act->police.rate_bytes_ps * 8; 402 burst = act->police.burst; 403 } else if (act->police.rate_pkt_ps > 0) { 404 /* The algorithm used to calculate rate 405 * mantissa, exponent values for a given token 406 * rate (token can be byte or packet) requires 407 * token rate to be mutiplied by 8. 408 */ 409 rate = act->police.rate_pkt_ps * 8; 410 burst = act->police.burst_pkt; 411 pps = true; 412 } 413 nr_police++; 414 break; 415 case FLOW_ACTION_MARK: 416 mark = act->mark; 417 break; 418 419 case FLOW_ACTION_RX_QUEUE_MAPPING: 420 req->op = NIX_RX_ACTIONOP_UCAST; 421 req->index = act->rx_queue; 422 break; 423 424 default: 425 return -EOPNOTSUPP; 426 } 427 } 428 429 if (nr_police > 1) { 430 NL_SET_ERR_MSG_MOD(extack, 431 "rate limit police offload requires a single action"); 432 return -EOPNOTSUPP; 433 } 434 435 if (nr_police) 436 return otx2_tc_act_set_police(nic, node, f, rate, burst, 437 mark, req, pps); 438 439 return 0; 440 } 441 442 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 443 struct flow_cls_offload *f, 444 struct npc_install_flow_req *req) 445 { 446 struct netlink_ext_ack *extack = f->common.extack; 447 struct flow_msg *flow_spec = &req->packet; 448 struct flow_msg *flow_mask = &req->mask; 449 struct flow_dissector *dissector; 450 struct flow_rule *rule; 451 u8 ip_proto = 0; 452 453 rule = flow_cls_offload_flow_rule(f); 454 dissector = rule->match.dissector; 455 456 if ((dissector->used_keys & 457 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 458 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 459 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 460 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 461 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 462 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 463 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 464 BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { 465 netdev_info(nic->netdev, "unsupported flow used key 0x%llx", 466 dissector->used_keys); 467 return -EOPNOTSUPP; 468 } 469 470 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 471 struct flow_match_basic match; 472 473 flow_rule_match_basic(rule, &match); 474 475 /* All EtherTypes can be matched, no hw limitation */ 476 flow_spec->etype = match.key->n_proto; 477 flow_mask->etype = match.mask->n_proto; 478 req->features |= BIT_ULL(NPC_ETYPE); 479 480 if (match.mask->ip_proto && 481 (match.key->ip_proto != IPPROTO_TCP && 482 match.key->ip_proto != IPPROTO_UDP && 483 match.key->ip_proto != IPPROTO_SCTP && 484 match.key->ip_proto != IPPROTO_ICMP && 485 match.key->ip_proto != IPPROTO_ICMPV6)) { 486 netdev_info(nic->netdev, 487 "ip_proto=0x%x not supported\n", 488 match.key->ip_proto); 489 return -EOPNOTSUPP; 490 } 491 if (match.mask->ip_proto) 492 ip_proto = match.key->ip_proto; 493 494 if (ip_proto == IPPROTO_UDP) 495 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 496 else if (ip_proto == IPPROTO_TCP) 497 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 498 else if (ip_proto == IPPROTO_SCTP) 499 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 500 else if (ip_proto == IPPROTO_ICMP) 501 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 502 else if (ip_proto == IPPROTO_ICMPV6) 503 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 504 } 505 506 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 507 struct flow_match_control match; 508 509 flow_rule_match_control(rule, &match); 510 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 511 NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); 512 return -EOPNOTSUPP; 513 } 514 515 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 516 if (ntohs(flow_spec->etype) == ETH_P_IP) { 517 flow_spec->ip_flag = IPV4_FLAG_MORE; 518 flow_mask->ip_flag = IPV4_FLAG_MORE; 519 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 520 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 521 flow_spec->next_header = IPPROTO_FRAGMENT; 522 flow_mask->next_header = 0xff; 523 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 524 } else { 525 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 526 return -EOPNOTSUPP; 527 } 528 } 529 } 530 531 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 532 struct flow_match_eth_addrs match; 533 534 flow_rule_match_eth_addrs(rule, &match); 535 if (!is_zero_ether_addr(match.mask->src)) { 536 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 537 return -EOPNOTSUPP; 538 } 539 540 if (!is_zero_ether_addr(match.mask->dst)) { 541 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 542 ether_addr_copy(flow_mask->dmac, 543 (u8 *)&match.mask->dst); 544 req->features |= BIT_ULL(NPC_DMAC); 545 } 546 } 547 548 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 549 struct flow_match_ip match; 550 551 flow_rule_match_ip(rule, &match); 552 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 553 match.mask->tos) { 554 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 555 return -EOPNOTSUPP; 556 } 557 if (match.mask->ttl) { 558 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 559 return -EOPNOTSUPP; 560 } 561 flow_spec->tos = match.key->tos; 562 flow_mask->tos = match.mask->tos; 563 req->features |= BIT_ULL(NPC_TOS); 564 } 565 566 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 567 struct flow_match_vlan match; 568 u16 vlan_tci, vlan_tci_mask; 569 570 flow_rule_match_vlan(rule, &match); 571 572 if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { 573 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 574 ntohs(match.key->vlan_tpid)); 575 return -EOPNOTSUPP; 576 } 577 578 if (!match.mask->vlan_id) { 579 struct flow_action_entry *act; 580 int i; 581 582 flow_action_for_each(i, act, &rule->action) { 583 if (act->id == FLOW_ACTION_DROP) { 584 netdev_err(nic->netdev, 585 "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", 586 ntohs(match.key->vlan_tpid), 587 match.key->vlan_id); 588 return -EOPNOTSUPP; 589 } 590 } 591 } 592 593 if (match.mask->vlan_id || 594 match.mask->vlan_dei || 595 match.mask->vlan_priority) { 596 vlan_tci = match.key->vlan_id | 597 match.key->vlan_dei << 12 | 598 match.key->vlan_priority << 13; 599 600 vlan_tci_mask = match.mask->vlan_id | 601 match.mask->vlan_dei << 12 | 602 match.mask->vlan_priority << 13; 603 604 flow_spec->vlan_tci = htons(vlan_tci); 605 flow_mask->vlan_tci = htons(vlan_tci_mask); 606 req->features |= BIT_ULL(NPC_OUTER_VID); 607 } 608 } 609 610 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 611 struct flow_match_ipv4_addrs match; 612 613 flow_rule_match_ipv4_addrs(rule, &match); 614 615 flow_spec->ip4dst = match.key->dst; 616 flow_mask->ip4dst = match.mask->dst; 617 req->features |= BIT_ULL(NPC_DIP_IPV4); 618 619 flow_spec->ip4src = match.key->src; 620 flow_mask->ip4src = match.mask->src; 621 req->features |= BIT_ULL(NPC_SIP_IPV4); 622 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 623 struct flow_match_ipv6_addrs match; 624 625 flow_rule_match_ipv6_addrs(rule, &match); 626 627 if (ipv6_addr_loopback(&match.key->dst) || 628 ipv6_addr_loopback(&match.key->src)) { 629 NL_SET_ERR_MSG_MOD(extack, 630 "Flow matching IPv6 loopback addr not supported"); 631 return -EOPNOTSUPP; 632 } 633 634 if (!ipv6_addr_any(&match.mask->dst)) { 635 memcpy(&flow_spec->ip6dst, 636 (struct in6_addr *)&match.key->dst, 637 sizeof(flow_spec->ip6dst)); 638 memcpy(&flow_mask->ip6dst, 639 (struct in6_addr *)&match.mask->dst, 640 sizeof(flow_spec->ip6dst)); 641 req->features |= BIT_ULL(NPC_DIP_IPV6); 642 } 643 644 if (!ipv6_addr_any(&match.mask->src)) { 645 memcpy(&flow_spec->ip6src, 646 (struct in6_addr *)&match.key->src, 647 sizeof(flow_spec->ip6src)); 648 memcpy(&flow_mask->ip6src, 649 (struct in6_addr *)&match.mask->src, 650 sizeof(flow_spec->ip6src)); 651 req->features |= BIT_ULL(NPC_SIP_IPV6); 652 } 653 } 654 655 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 656 struct flow_match_ports match; 657 658 flow_rule_match_ports(rule, &match); 659 660 flow_spec->dport = match.key->dst; 661 flow_mask->dport = match.mask->dst; 662 663 if (flow_mask->dport) { 664 if (ip_proto == IPPROTO_UDP) 665 req->features |= BIT_ULL(NPC_DPORT_UDP); 666 else if (ip_proto == IPPROTO_TCP) 667 req->features |= BIT_ULL(NPC_DPORT_TCP); 668 else if (ip_proto == IPPROTO_SCTP) 669 req->features |= BIT_ULL(NPC_DPORT_SCTP); 670 } 671 672 flow_spec->sport = match.key->src; 673 flow_mask->sport = match.mask->src; 674 675 if (flow_mask->sport) { 676 if (ip_proto == IPPROTO_UDP) 677 req->features |= BIT_ULL(NPC_SPORT_UDP); 678 else if (ip_proto == IPPROTO_TCP) 679 req->features |= BIT_ULL(NPC_SPORT_TCP); 680 else if (ip_proto == IPPROTO_SCTP) 681 req->features |= BIT_ULL(NPC_SPORT_SCTP); 682 } 683 } 684 685 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 686 } 687 688 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) 689 { 690 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 691 struct otx2_tc_flow *iter, *tmp; 692 693 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) 694 return; 695 696 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { 697 list_del(&iter->list); 698 kfree(iter); 699 flow_cfg->nr_flows--; 700 } 701 } 702 703 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, 704 unsigned long cookie) 705 { 706 struct otx2_tc_flow *tmp; 707 708 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 709 if (tmp->cookie == cookie) 710 return tmp; 711 } 712 713 return NULL; 714 } 715 716 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, 717 int index) 718 { 719 struct otx2_tc_flow *tmp; 720 int i = 0; 721 722 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 723 if (i == index) 724 return tmp; 725 i++; 726 } 727 728 return NULL; 729 } 730 731 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, 732 struct otx2_tc_flow *node) 733 { 734 struct list_head *pos, *n; 735 struct otx2_tc_flow *tmp; 736 737 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 738 tmp = list_entry(pos, struct otx2_tc_flow, list); 739 if (node == tmp) { 740 list_del(&node->list); 741 return; 742 } 743 } 744 } 745 746 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, 747 struct otx2_tc_flow *node) 748 { 749 struct list_head *pos, *n; 750 struct otx2_tc_flow *tmp; 751 int index = 0; 752 753 /* If the flow list is empty then add the new node */ 754 if (list_empty(&flow_cfg->flow_list_tc)) { 755 list_add(&node->list, &flow_cfg->flow_list_tc); 756 return index; 757 } 758 759 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 760 tmp = list_entry(pos, struct otx2_tc_flow, list); 761 if (node->prio < tmp->prio) 762 break; 763 index++; 764 } 765 766 list_add(&node->list, pos->prev); 767 return index; 768 } 769 770 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) 771 { 772 struct npc_install_flow_req *tmp_req; 773 int err; 774 775 mutex_lock(&nic->mbox.lock); 776 tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 777 if (!tmp_req) { 778 mutex_unlock(&nic->mbox.lock); 779 return -ENOMEM; 780 } 781 782 memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); 783 /* Send message to AF */ 784 err = otx2_sync_mbox_msg(&nic->mbox); 785 if (err) { 786 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", 787 req->entry); 788 mutex_unlock(&nic->mbox.lock); 789 return -EFAULT; 790 } 791 792 mutex_unlock(&nic->mbox.lock); 793 return 0; 794 } 795 796 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) 797 { 798 struct npc_delete_flow_rsp *rsp; 799 struct npc_delete_flow_req *req; 800 int err; 801 802 mutex_lock(&nic->mbox.lock); 803 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 804 if (!req) { 805 mutex_unlock(&nic->mbox.lock); 806 return -ENOMEM; 807 } 808 809 req->entry = entry; 810 811 /* Send message to AF */ 812 err = otx2_sync_mbox_msg(&nic->mbox); 813 if (err) { 814 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 815 entry); 816 mutex_unlock(&nic->mbox.lock); 817 return -EFAULT; 818 } 819 820 if (cntr_val) { 821 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 822 0, &req->hdr); 823 if (IS_ERR(rsp)) { 824 netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", 825 entry); 826 mutex_unlock(&nic->mbox.lock); 827 return -EFAULT; 828 } 829 830 *cntr_val = rsp->cntr_val; 831 } 832 833 mutex_unlock(&nic->mbox.lock); 834 return 0; 835 } 836 837 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, 838 struct otx2_flow_config *flow_cfg, 839 struct otx2_tc_flow *node) 840 { 841 struct list_head *pos, *n; 842 struct otx2_tc_flow *tmp; 843 int i = 0, index = 0; 844 u16 cntr_val = 0; 845 846 /* Find and delete the entry from the list and re-install 847 * all the entries from beginning to the index of the 848 * deleted entry to higher mcam indexes. 849 */ 850 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 851 tmp = list_entry(pos, struct otx2_tc_flow, list); 852 if (node == tmp) { 853 list_del(&tmp->list); 854 break; 855 } 856 857 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 858 tmp->entry++; 859 tmp->req.entry = tmp->entry; 860 tmp->req.cntr_val = cntr_val; 861 index++; 862 } 863 864 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 865 if (i == index) 866 break; 867 868 tmp = list_entry(pos, struct otx2_tc_flow, list); 869 otx2_add_mcam_flow_entry(nic, &tmp->req); 870 i++; 871 } 872 873 return 0; 874 } 875 876 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, 877 struct otx2_flow_config *flow_cfg, 878 struct otx2_tc_flow *node) 879 { 880 int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; 881 struct otx2_tc_flow *tmp; 882 int list_idx, i; 883 u16 cntr_val = 0; 884 885 /* Find the index of the entry(list_idx) whose priority 886 * is greater than the new entry and re-install all 887 * the entries from beginning to list_idx to higher 888 * mcam indexes. 889 */ 890 list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); 891 for (i = 0; i < list_idx; i++) { 892 tmp = otx2_tc_get_entry_by_index(flow_cfg, i); 893 if (!tmp) 894 return -ENOMEM; 895 896 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 897 tmp->entry = flow_cfg->flow_ent[mcam_idx]; 898 tmp->req.entry = tmp->entry; 899 tmp->req.cntr_val = cntr_val; 900 otx2_add_mcam_flow_entry(nic, &tmp->req); 901 mcam_idx++; 902 } 903 904 return mcam_idx; 905 } 906 907 static int otx2_tc_update_mcam_table(struct otx2_nic *nic, 908 struct otx2_flow_config *flow_cfg, 909 struct otx2_tc_flow *node, 910 bool add_req) 911 { 912 if (add_req) 913 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); 914 915 return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); 916 } 917 918 static int otx2_tc_del_flow(struct otx2_nic *nic, 919 struct flow_cls_offload *tc_flow_cmd) 920 { 921 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 922 struct otx2_tc_flow *flow_node; 923 int err; 924 925 flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 926 if (!flow_node) { 927 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 928 tc_flow_cmd->cookie); 929 return -EINVAL; 930 } 931 932 if (flow_node->is_act_police) { 933 mutex_lock(&nic->mbox.lock); 934 935 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 936 flow_node->leaf_profile, false); 937 if (err) 938 netdev_err(nic->netdev, 939 "Unmapping RQ %d & profile %d failed\n", 940 flow_node->rq, flow_node->leaf_profile); 941 942 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 943 if (err) 944 netdev_err(nic->netdev, 945 "Unable to free leaf bandwidth profile(%d)\n", 946 flow_node->leaf_profile); 947 948 __clear_bit(flow_node->rq, &nic->rq_bmap); 949 950 mutex_unlock(&nic->mbox.lock); 951 } 952 953 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); 954 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); 955 kfree_rcu(flow_node, rcu); 956 flow_cfg->nr_flows--; 957 return 0; 958 } 959 960 static int otx2_tc_add_flow(struct otx2_nic *nic, 961 struct flow_cls_offload *tc_flow_cmd) 962 { 963 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 964 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 965 struct otx2_tc_flow *new_node, *old_node; 966 struct npc_install_flow_req *req, dummy; 967 int rc, err, mcam_idx; 968 969 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 970 return -ENOMEM; 971 972 if (flow_cfg->nr_flows == flow_cfg->max_flows) { 973 NL_SET_ERR_MSG_MOD(extack, 974 "Free MCAM entry not available to add the flow"); 975 return -ENOMEM; 976 } 977 978 /* allocate memory for the new flow and it's node */ 979 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 980 if (!new_node) 981 return -ENOMEM; 982 spin_lock_init(&new_node->lock); 983 new_node->cookie = tc_flow_cmd->cookie; 984 new_node->prio = tc_flow_cmd->common.prio; 985 986 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 987 988 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 989 if (rc) { 990 kfree_rcu(new_node, rcu); 991 return rc; 992 } 993 994 /* If a flow exists with the same cookie, delete it */ 995 old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 996 if (old_node) 997 otx2_tc_del_flow(nic, tc_flow_cmd); 998 999 mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); 1000 mutex_lock(&nic->mbox.lock); 1001 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1002 if (!req) { 1003 mutex_unlock(&nic->mbox.lock); 1004 rc = -ENOMEM; 1005 goto free_leaf; 1006 } 1007 1008 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 1009 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 1010 req->channel = nic->hw.rx_chan_base; 1011 req->entry = flow_cfg->flow_ent[mcam_idx]; 1012 req->intf = NIX_INTF_RX; 1013 req->set_cntr = 1; 1014 new_node->entry = req->entry; 1015 1016 /* Send message to AF */ 1017 rc = otx2_sync_mbox_msg(&nic->mbox); 1018 if (rc) { 1019 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 1020 mutex_unlock(&nic->mbox.lock); 1021 goto free_leaf; 1022 } 1023 1024 mutex_unlock(&nic->mbox.lock); 1025 memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); 1026 1027 flow_cfg->nr_flows++; 1028 return 0; 1029 1030 free_leaf: 1031 otx2_tc_del_from_flow_list(flow_cfg, new_node); 1032 kfree_rcu(new_node, rcu); 1033 if (new_node->is_act_police) { 1034 mutex_lock(&nic->mbox.lock); 1035 1036 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 1037 new_node->leaf_profile, false); 1038 if (err) 1039 netdev_err(nic->netdev, 1040 "Unmapping RQ %d & profile %d failed\n", 1041 new_node->rq, new_node->leaf_profile); 1042 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 1043 if (err) 1044 netdev_err(nic->netdev, 1045 "Unable to free leaf bandwidth profile(%d)\n", 1046 new_node->leaf_profile); 1047 1048 __clear_bit(new_node->rq, &nic->rq_bmap); 1049 1050 mutex_unlock(&nic->mbox.lock); 1051 } 1052 1053 return rc; 1054 } 1055 1056 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 1057 struct flow_cls_offload *tc_flow_cmd) 1058 { 1059 struct npc_mcam_get_stats_req *req; 1060 struct npc_mcam_get_stats_rsp *rsp; 1061 struct otx2_tc_flow_stats *stats; 1062 struct otx2_tc_flow *flow_node; 1063 int err; 1064 1065 flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); 1066 if (!flow_node) { 1067 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 1068 tc_flow_cmd->cookie); 1069 return -EINVAL; 1070 } 1071 1072 mutex_lock(&nic->mbox.lock); 1073 1074 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 1075 if (!req) { 1076 mutex_unlock(&nic->mbox.lock); 1077 return -ENOMEM; 1078 } 1079 1080 req->entry = flow_node->entry; 1081 1082 err = otx2_sync_mbox_msg(&nic->mbox); 1083 if (err) { 1084 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 1085 req->entry); 1086 mutex_unlock(&nic->mbox.lock); 1087 return -EFAULT; 1088 } 1089 1090 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 1091 (&nic->mbox.mbox, 0, &req->hdr); 1092 if (IS_ERR(rsp)) { 1093 mutex_unlock(&nic->mbox.lock); 1094 return PTR_ERR(rsp); 1095 } 1096 1097 mutex_unlock(&nic->mbox.lock); 1098 1099 if (!rsp->stat_ena) 1100 return -EINVAL; 1101 1102 stats = &flow_node->stats; 1103 1104 spin_lock(&flow_node->lock); 1105 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 1106 FLOW_ACTION_HW_STATS_IMMEDIATE); 1107 stats->pkts = rsp->stat; 1108 spin_unlock(&flow_node->lock); 1109 1110 return 0; 1111 } 1112 1113 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1114 struct flow_cls_offload *cls_flower) 1115 { 1116 switch (cls_flower->command) { 1117 case FLOW_CLS_REPLACE: 1118 return otx2_tc_add_flow(nic, cls_flower); 1119 case FLOW_CLS_DESTROY: 1120 return otx2_tc_del_flow(nic, cls_flower); 1121 case FLOW_CLS_STATS: 1122 return otx2_tc_get_flow_stats(nic, cls_flower); 1123 default: 1124 return -EOPNOTSUPP; 1125 } 1126 } 1127 1128 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 1129 struct tc_cls_matchall_offload *cls) 1130 { 1131 struct netlink_ext_ack *extack = cls->common.extack; 1132 struct flow_action *actions = &cls->rule->action; 1133 struct flow_action_entry *entry; 1134 u64 rate; 1135 int err; 1136 1137 err = otx2_tc_validate_flow(nic, actions, extack); 1138 if (err) 1139 return err; 1140 1141 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 1142 NL_SET_ERR_MSG_MOD(extack, 1143 "Only one ingress MATCHALL ratelimitter can be offloaded"); 1144 return -ENOMEM; 1145 } 1146 1147 entry = &cls->rule->action.entries[0]; 1148 switch (entry->id) { 1149 case FLOW_ACTION_POLICE: 1150 /* Ingress ratelimiting is not supported on OcteonTx2 */ 1151 if (is_dev_otx2(nic->pdev)) { 1152 NL_SET_ERR_MSG_MOD(extack, 1153 "Ingress policing not supported on this platform"); 1154 return -EOPNOTSUPP; 1155 } 1156 1157 err = cn10k_alloc_matchall_ipolicer(nic); 1158 if (err) 1159 return err; 1160 1161 /* Convert to bits per second */ 1162 rate = entry->police.rate_bytes_ps * 8; 1163 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 1164 if (err) 1165 return err; 1166 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1167 break; 1168 default: 1169 NL_SET_ERR_MSG_MOD(extack, 1170 "Only police action supported with Ingress MATCHALL offload"); 1171 return -EOPNOTSUPP; 1172 } 1173 1174 return 0; 1175 } 1176 1177 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1178 struct tc_cls_matchall_offload *cls) 1179 { 1180 struct netlink_ext_ack *extack = cls->common.extack; 1181 int err; 1182 1183 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1184 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1185 return -EINVAL; 1186 } 1187 1188 err = cn10k_free_matchall_ipolicer(nic); 1189 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1190 return err; 1191 } 1192 1193 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1194 struct tc_cls_matchall_offload *cls_matchall) 1195 { 1196 switch (cls_matchall->command) { 1197 case TC_CLSMATCHALL_REPLACE: 1198 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1199 case TC_CLSMATCHALL_DESTROY: 1200 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1201 case TC_CLSMATCHALL_STATS: 1202 default: 1203 break; 1204 } 1205 1206 return -EOPNOTSUPP; 1207 } 1208 1209 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1210 void *type_data, void *cb_priv) 1211 { 1212 struct otx2_nic *nic = cb_priv; 1213 bool ntuple; 1214 1215 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1216 return -EOPNOTSUPP; 1217 1218 ntuple = nic->netdev->features & NETIF_F_NTUPLE; 1219 switch (type) { 1220 case TC_SETUP_CLSFLOWER: 1221 if (ntuple) { 1222 netdev_warn(nic->netdev, 1223 "Can't install TC flower offload rule when NTUPLE is active"); 1224 return -EOPNOTSUPP; 1225 } 1226 1227 return otx2_setup_tc_cls_flower(nic, type_data); 1228 case TC_SETUP_CLSMATCHALL: 1229 return otx2_setup_tc_ingress_matchall(nic, type_data); 1230 default: 1231 break; 1232 } 1233 1234 return -EOPNOTSUPP; 1235 } 1236 1237 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1238 struct tc_cls_matchall_offload *cls_matchall) 1239 { 1240 switch (cls_matchall->command) { 1241 case TC_CLSMATCHALL_REPLACE: 1242 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1243 case TC_CLSMATCHALL_DESTROY: 1244 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1245 case TC_CLSMATCHALL_STATS: 1246 default: 1247 break; 1248 } 1249 1250 return -EOPNOTSUPP; 1251 } 1252 1253 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1254 void *type_data, void *cb_priv) 1255 { 1256 struct otx2_nic *nic = cb_priv; 1257 1258 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1259 return -EOPNOTSUPP; 1260 1261 switch (type) { 1262 case TC_SETUP_CLSMATCHALL: 1263 return otx2_setup_tc_egress_matchall(nic, type_data); 1264 default: 1265 break; 1266 } 1267 1268 return -EOPNOTSUPP; 1269 } 1270 1271 static LIST_HEAD(otx2_block_cb_list); 1272 1273 static int otx2_setup_tc_block(struct net_device *netdev, 1274 struct flow_block_offload *f) 1275 { 1276 struct otx2_nic *nic = netdev_priv(netdev); 1277 flow_setup_cb_t *cb; 1278 bool ingress; 1279 1280 if (f->block_shared) 1281 return -EOPNOTSUPP; 1282 1283 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1284 cb = otx2_setup_tc_block_ingress_cb; 1285 ingress = true; 1286 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1287 cb = otx2_setup_tc_block_egress_cb; 1288 ingress = false; 1289 } else { 1290 return -EOPNOTSUPP; 1291 } 1292 1293 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1294 nic, nic, ingress); 1295 } 1296 1297 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1298 void *type_data) 1299 { 1300 switch (type) { 1301 case TC_SETUP_BLOCK: 1302 return otx2_setup_tc_block(netdev, type_data); 1303 case TC_SETUP_QDISC_HTB: 1304 return otx2_setup_tc_htb(netdev, type_data); 1305 default: 1306 return -EOPNOTSUPP; 1307 } 1308 } 1309 EXPORT_SYMBOL(otx2_setup_tc); 1310 1311 int otx2_init_tc(struct otx2_nic *nic) 1312 { 1313 /* Exclude receive queue 0 being used for police action */ 1314 set_bit(0, &nic->rq_bmap); 1315 1316 if (!nic->flow_cfg) { 1317 netdev_err(nic->netdev, 1318 "Can't init TC, nic->flow_cfg is not setup\n"); 1319 return -EINVAL; 1320 } 1321 1322 return 0; 1323 } 1324 EXPORT_SYMBOL(otx2_init_tc); 1325 1326 void otx2_shutdown_tc(struct otx2_nic *nic) 1327 { 1328 otx2_destroy_tc_flow_list(nic); 1329 } 1330 EXPORT_SYMBOL(otx2_shutdown_tc); 1331