1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 23 /* Egress rate limiting definitions */ 24 #define MAX_BURST_EXPONENT 0x0FULL 25 #define MAX_BURST_MANTISSA 0xFFULL 26 #define MAX_BURST_SIZE 130816ULL 27 #define MAX_RATE_DIVIDER_EXPONENT 12ULL 28 #define MAX_RATE_EXPONENT 0x0FULL 29 #define MAX_RATE_MANTISSA 0xFFULL 30 31 /* Bitfields in NIX_TLX_PIR register */ 32 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) 33 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) 34 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) 35 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) 36 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) 37 38 struct otx2_tc_flow_stats { 39 u64 bytes; 40 u64 pkts; 41 u64 used; 42 }; 43 44 struct otx2_tc_flow { 45 struct rhash_head node; 46 unsigned long cookie; 47 unsigned int bitpos; 48 struct rcu_head rcu; 49 struct otx2_tc_flow_stats stats; 50 spinlock_t lock; /* lock for stats */ 51 u16 rq; 52 u16 entry; 53 u16 leaf_profile; 54 bool is_act_police; 55 }; 56 57 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) 58 { 59 struct otx2_tc_info *tc = &nic->tc_info; 60 61 if (!nic->flow_cfg->max_flows || is_otx2_vf(nic->pcifunc)) 62 return 0; 63 64 /* Max flows changed, free the existing bitmap */ 65 kfree(tc->tc_entries_bitmap); 66 67 tc->tc_entries_bitmap = 68 kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows), 69 sizeof(long), GFP_KERNEL); 70 if (!tc->tc_entries_bitmap) { 71 netdev_err(nic->netdev, 72 "Unable to alloc TC flow entries bitmap\n"); 73 return -ENOMEM; 74 } 75 76 return 0; 77 } 78 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); 79 80 static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp, 81 u32 *burst_mantissa) 82 { 83 unsigned int tmp; 84 85 /* Burst is calculated as 86 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 87 * Max supported burst size is 130,816 bytes. 88 */ 89 burst = min_t(u32, burst, MAX_BURST_SIZE); 90 if (burst) { 91 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 92 tmp = burst - rounddown_pow_of_two(burst); 93 if (burst < MAX_BURST_MANTISSA) 94 *burst_mantissa = tmp * 2; 95 else 96 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 97 } else { 98 *burst_exp = MAX_BURST_EXPONENT; 99 *burst_mantissa = MAX_BURST_MANTISSA; 100 } 101 } 102 103 static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp, 104 u32 *mantissa, u32 *div_exp) 105 { 106 unsigned int tmp; 107 108 /* Rate calculation by hardware 109 * 110 * PIR_ADD = ((256 + mantissa) << exp) / 256 111 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 112 * The resultant rate is in Mbps. 113 */ 114 115 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 116 * Setting this to '0' will ease the calculation of 117 * exponent and mantissa. 118 */ 119 *div_exp = 0; 120 121 if (maxrate) { 122 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 123 tmp = maxrate - rounddown_pow_of_two(maxrate); 124 if (maxrate < MAX_RATE_MANTISSA) 125 *mantissa = tmp * 2; 126 else 127 *mantissa = tmp / (1ULL << (*exp - 7)); 128 } else { 129 /* Instead of disabling rate limiting, set all values to max */ 130 *exp = MAX_RATE_EXPONENT; 131 *mantissa = MAX_RATE_MANTISSA; 132 } 133 } 134 135 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate) 136 { 137 struct otx2_hw *hw = &nic->hw; 138 struct nix_txschq_config *req; 139 u32 burst_exp, burst_mantissa; 140 u32 exp, mantissa, div_exp; 141 int txschq, err; 142 143 /* All SQs share the same TL4, so pick the first scheduler */ 144 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 145 146 /* Get exponent and mantissa values from the desired rate */ 147 otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa); 148 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 149 150 mutex_lock(&nic->mbox.lock); 151 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 152 if (!req) { 153 mutex_unlock(&nic->mbox.lock); 154 return -ENOMEM; 155 } 156 157 req->lvl = NIX_TXSCH_LVL_TL4; 158 req->num_regs = 1; 159 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 160 req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) | 161 FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) | 162 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 163 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 164 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 165 166 err = otx2_sync_mbox_msg(&nic->mbox); 167 mutex_unlock(&nic->mbox.lock); 168 return err; 169 } 170 171 static int otx2_tc_validate_flow(struct otx2_nic *nic, 172 struct flow_action *actions, 173 struct netlink_ext_ack *extack) 174 { 175 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 176 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 177 return -EINVAL; 178 } 179 180 if (!flow_action_has_entries(actions)) { 181 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 182 return -EINVAL; 183 } 184 185 if (!flow_offload_has_one_action(actions)) { 186 NL_SET_ERR_MSG_MOD(extack, 187 "Egress MATCHALL offload supports only 1 policing action"); 188 return -EINVAL; 189 } 190 return 0; 191 } 192 193 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 194 struct tc_cls_matchall_offload *cls) 195 { 196 struct netlink_ext_ack *extack = cls->common.extack; 197 struct flow_action *actions = &cls->rule->action; 198 struct flow_action_entry *entry; 199 u32 rate; 200 int err; 201 202 err = otx2_tc_validate_flow(nic, actions, extack); 203 if (err) 204 return err; 205 206 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 207 NL_SET_ERR_MSG_MOD(extack, 208 "Only one Egress MATCHALL ratelimiter can be offloaded"); 209 return -ENOMEM; 210 } 211 212 entry = &cls->rule->action.entries[0]; 213 switch (entry->id) { 214 case FLOW_ACTION_POLICE: 215 if (entry->police.rate_pkt_ps) { 216 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 217 return -EOPNOTSUPP; 218 } 219 /* Convert bytes per second to Mbps */ 220 rate = entry->police.rate_bytes_ps * 8; 221 rate = max_t(u32, rate / 1000000, 1); 222 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate); 223 if (err) 224 return err; 225 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 226 break; 227 default: 228 NL_SET_ERR_MSG_MOD(extack, 229 "Only police action is supported with Egress MATCHALL offload"); 230 return -EOPNOTSUPP; 231 } 232 233 return 0; 234 } 235 236 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 237 struct tc_cls_matchall_offload *cls) 238 { 239 struct netlink_ext_ack *extack = cls->common.extack; 240 int err; 241 242 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 243 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 244 return -EINVAL; 245 } 246 247 err = otx2_set_matchall_egress_rate(nic, 0, 0); 248 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 249 return err; 250 } 251 252 static int otx2_tc_act_set_police(struct otx2_nic *nic, 253 struct otx2_tc_flow *node, 254 struct flow_cls_offload *f, 255 u64 rate, u32 burst, u32 mark, 256 struct npc_install_flow_req *req, bool pps) 257 { 258 struct netlink_ext_ack *extack = f->common.extack; 259 struct otx2_hw *hw = &nic->hw; 260 int rq_idx, rc; 261 262 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 263 if (rq_idx >= hw->rx_queues) { 264 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 265 return -EINVAL; 266 } 267 268 mutex_lock(&nic->mbox.lock); 269 270 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 271 if (rc) { 272 mutex_unlock(&nic->mbox.lock); 273 return rc; 274 } 275 276 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); 277 if (rc) 278 goto free_leaf; 279 280 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); 281 if (rc) 282 goto free_leaf; 283 284 mutex_unlock(&nic->mbox.lock); 285 286 req->match_id = mark & 0xFFFFULL; 287 req->index = rq_idx; 288 req->op = NIX_RX_ACTIONOP_UCAST; 289 set_bit(rq_idx, &nic->rq_bmap); 290 node->is_act_police = true; 291 node->rq = rq_idx; 292 293 return 0; 294 295 free_leaf: 296 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 297 netdev_err(nic->netdev, 298 "Unable to free leaf bandwidth profile(%d)\n", 299 node->leaf_profile); 300 mutex_unlock(&nic->mbox.lock); 301 return rc; 302 } 303 304 static int otx2_tc_parse_actions(struct otx2_nic *nic, 305 struct flow_action *flow_action, 306 struct npc_install_flow_req *req, 307 struct flow_cls_offload *f, 308 struct otx2_tc_flow *node) 309 { 310 struct netlink_ext_ack *extack = f->common.extack; 311 struct flow_action_entry *act; 312 struct net_device *target; 313 struct otx2_nic *priv; 314 u32 burst, mark = 0; 315 u8 nr_police = 0; 316 bool pps = false; 317 u64 rate; 318 int i; 319 320 if (!flow_action_has_entries(flow_action)) { 321 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 322 return -EINVAL; 323 } 324 325 flow_action_for_each(i, act, flow_action) { 326 switch (act->id) { 327 case FLOW_ACTION_DROP: 328 req->op = NIX_RX_ACTIONOP_DROP; 329 return 0; 330 case FLOW_ACTION_ACCEPT: 331 req->op = NIX_RX_ACTION_DEFAULT; 332 return 0; 333 case FLOW_ACTION_REDIRECT_INGRESS: 334 target = act->dev; 335 priv = netdev_priv(target); 336 /* npc_install_flow_req doesn't support passing a target pcifunc */ 337 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 338 NL_SET_ERR_MSG_MOD(extack, 339 "can't redirect to other pf/vf"); 340 return -EOPNOTSUPP; 341 } 342 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 343 req->op = NIX_RX_ACTION_DEFAULT; 344 return 0; 345 case FLOW_ACTION_VLAN_POP: 346 req->vtag0_valid = true; 347 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 348 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 349 break; 350 case FLOW_ACTION_POLICE: 351 /* Ingress ratelimiting is not supported on OcteonTx2 */ 352 if (is_dev_otx2(nic->pdev)) { 353 NL_SET_ERR_MSG_MOD(extack, 354 "Ingress policing not supported on this platform"); 355 return -EOPNOTSUPP; 356 } 357 358 if (act->police.rate_bytes_ps > 0) { 359 rate = act->police.rate_bytes_ps * 8; 360 burst = act->police.burst; 361 } else if (act->police.rate_pkt_ps > 0) { 362 /* The algorithm used to calculate rate 363 * mantissa, exponent values for a given token 364 * rate (token can be byte or packet) requires 365 * token rate to be mutiplied by 8. 366 */ 367 rate = act->police.rate_pkt_ps * 8; 368 burst = act->police.burst_pkt; 369 pps = true; 370 } 371 nr_police++; 372 break; 373 case FLOW_ACTION_MARK: 374 mark = act->mark; 375 break; 376 default: 377 return -EOPNOTSUPP; 378 } 379 } 380 381 if (nr_police > 1) { 382 NL_SET_ERR_MSG_MOD(extack, 383 "rate limit police offload requires a single action"); 384 return -EOPNOTSUPP; 385 } 386 387 if (nr_police) 388 return otx2_tc_act_set_police(nic, node, f, rate, burst, 389 mark, req, pps); 390 391 return 0; 392 } 393 394 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 395 struct flow_cls_offload *f, 396 struct npc_install_flow_req *req) 397 { 398 struct netlink_ext_ack *extack = f->common.extack; 399 struct flow_msg *flow_spec = &req->packet; 400 struct flow_msg *flow_mask = &req->mask; 401 struct flow_dissector *dissector; 402 struct flow_rule *rule; 403 u8 ip_proto = 0; 404 405 rule = flow_cls_offload_flow_rule(f); 406 dissector = rule->match.dissector; 407 408 if ((dissector->used_keys & 409 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 410 BIT(FLOW_DISSECTOR_KEY_BASIC) | 411 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 412 BIT(FLOW_DISSECTOR_KEY_VLAN) | 413 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 414 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 415 BIT(FLOW_DISSECTOR_KEY_PORTS) | 416 BIT(FLOW_DISSECTOR_KEY_IP)))) { 417 netdev_info(nic->netdev, "unsupported flow used key 0x%x", 418 dissector->used_keys); 419 return -EOPNOTSUPP; 420 } 421 422 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 423 struct flow_match_basic match; 424 425 flow_rule_match_basic(rule, &match); 426 427 /* All EtherTypes can be matched, no hw limitation */ 428 flow_spec->etype = match.key->n_proto; 429 flow_mask->etype = match.mask->n_proto; 430 req->features |= BIT_ULL(NPC_ETYPE); 431 432 if (match.mask->ip_proto && 433 (match.key->ip_proto != IPPROTO_TCP && 434 match.key->ip_proto != IPPROTO_UDP && 435 match.key->ip_proto != IPPROTO_SCTP && 436 match.key->ip_proto != IPPROTO_ICMP && 437 match.key->ip_proto != IPPROTO_ICMPV6)) { 438 netdev_info(nic->netdev, 439 "ip_proto=0x%x not supported\n", 440 match.key->ip_proto); 441 return -EOPNOTSUPP; 442 } 443 if (match.mask->ip_proto) 444 ip_proto = match.key->ip_proto; 445 446 if (ip_proto == IPPROTO_UDP) 447 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 448 else if (ip_proto == IPPROTO_TCP) 449 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 450 else if (ip_proto == IPPROTO_SCTP) 451 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 452 else if (ip_proto == IPPROTO_ICMP) 453 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 454 else if (ip_proto == IPPROTO_ICMPV6) 455 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 456 } 457 458 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 459 struct flow_match_eth_addrs match; 460 461 flow_rule_match_eth_addrs(rule, &match); 462 if (!is_zero_ether_addr(match.mask->src)) { 463 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 464 return -EOPNOTSUPP; 465 } 466 467 if (!is_zero_ether_addr(match.mask->dst)) { 468 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 469 ether_addr_copy(flow_mask->dmac, 470 (u8 *)&match.mask->dst); 471 req->features |= BIT_ULL(NPC_DMAC); 472 } 473 } 474 475 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 476 struct flow_match_ip match; 477 478 flow_rule_match_ip(rule, &match); 479 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 480 match.mask->tos) { 481 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 482 return -EOPNOTSUPP; 483 } 484 if (match.mask->ttl) { 485 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 486 return -EOPNOTSUPP; 487 } 488 flow_spec->tos = match.key->tos; 489 flow_mask->tos = match.mask->tos; 490 req->features |= BIT_ULL(NPC_TOS); 491 } 492 493 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 494 struct flow_match_vlan match; 495 u16 vlan_tci, vlan_tci_mask; 496 497 flow_rule_match_vlan(rule, &match); 498 499 if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { 500 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 501 ntohs(match.key->vlan_tpid)); 502 return -EOPNOTSUPP; 503 } 504 505 if (match.mask->vlan_id || 506 match.mask->vlan_dei || 507 match.mask->vlan_priority) { 508 vlan_tci = match.key->vlan_id | 509 match.key->vlan_dei << 12 | 510 match.key->vlan_priority << 13; 511 512 vlan_tci_mask = match.mask->vlan_id | 513 match.mask->vlan_dei << 12 | 514 match.mask->vlan_priority << 13; 515 516 flow_spec->vlan_tci = htons(vlan_tci); 517 flow_mask->vlan_tci = htons(vlan_tci_mask); 518 req->features |= BIT_ULL(NPC_OUTER_VID); 519 } 520 } 521 522 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 523 struct flow_match_ipv4_addrs match; 524 525 flow_rule_match_ipv4_addrs(rule, &match); 526 527 flow_spec->ip4dst = match.key->dst; 528 flow_mask->ip4dst = match.mask->dst; 529 req->features |= BIT_ULL(NPC_DIP_IPV4); 530 531 flow_spec->ip4src = match.key->src; 532 flow_mask->ip4src = match.mask->src; 533 req->features |= BIT_ULL(NPC_SIP_IPV4); 534 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 535 struct flow_match_ipv6_addrs match; 536 537 flow_rule_match_ipv6_addrs(rule, &match); 538 539 if (ipv6_addr_loopback(&match.key->dst) || 540 ipv6_addr_loopback(&match.key->src)) { 541 NL_SET_ERR_MSG_MOD(extack, 542 "Flow matching IPv6 loopback addr not supported"); 543 return -EOPNOTSUPP; 544 } 545 546 if (!ipv6_addr_any(&match.mask->dst)) { 547 memcpy(&flow_spec->ip6dst, 548 (struct in6_addr *)&match.key->dst, 549 sizeof(flow_spec->ip6dst)); 550 memcpy(&flow_mask->ip6dst, 551 (struct in6_addr *)&match.mask->dst, 552 sizeof(flow_spec->ip6dst)); 553 req->features |= BIT_ULL(NPC_DIP_IPV6); 554 } 555 556 if (!ipv6_addr_any(&match.mask->src)) { 557 memcpy(&flow_spec->ip6src, 558 (struct in6_addr *)&match.key->src, 559 sizeof(flow_spec->ip6src)); 560 memcpy(&flow_mask->ip6src, 561 (struct in6_addr *)&match.mask->src, 562 sizeof(flow_spec->ip6src)); 563 req->features |= BIT_ULL(NPC_SIP_IPV6); 564 } 565 } 566 567 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 568 struct flow_match_ports match; 569 570 flow_rule_match_ports(rule, &match); 571 572 flow_spec->dport = match.key->dst; 573 flow_mask->dport = match.mask->dst; 574 if (ip_proto == IPPROTO_UDP) 575 req->features |= BIT_ULL(NPC_DPORT_UDP); 576 else if (ip_proto == IPPROTO_TCP) 577 req->features |= BIT_ULL(NPC_DPORT_TCP); 578 else if (ip_proto == IPPROTO_SCTP) 579 req->features |= BIT_ULL(NPC_DPORT_SCTP); 580 581 flow_spec->sport = match.key->src; 582 flow_mask->sport = match.mask->src; 583 if (ip_proto == IPPROTO_UDP) 584 req->features |= BIT_ULL(NPC_SPORT_UDP); 585 else if (ip_proto == IPPROTO_TCP) 586 req->features |= BIT_ULL(NPC_SPORT_TCP); 587 else if (ip_proto == IPPROTO_SCTP) 588 req->features |= BIT_ULL(NPC_SPORT_SCTP); 589 } 590 591 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 592 } 593 594 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) 595 { 596 struct npc_delete_flow_req *req; 597 int err; 598 599 mutex_lock(&nic->mbox.lock); 600 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 601 if (!req) { 602 mutex_unlock(&nic->mbox.lock); 603 return -ENOMEM; 604 } 605 606 req->entry = entry; 607 608 /* Send message to AF */ 609 err = otx2_sync_mbox_msg(&nic->mbox); 610 if (err) { 611 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 612 entry); 613 mutex_unlock(&nic->mbox.lock); 614 return -EFAULT; 615 } 616 mutex_unlock(&nic->mbox.lock); 617 618 return 0; 619 } 620 621 static int otx2_tc_del_flow(struct otx2_nic *nic, 622 struct flow_cls_offload *tc_flow_cmd) 623 { 624 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 625 struct otx2_tc_info *tc_info = &nic->tc_info; 626 struct otx2_tc_flow *flow_node; 627 int err; 628 629 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 630 &tc_flow_cmd->cookie, 631 tc_info->flow_ht_params); 632 if (!flow_node) { 633 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 634 tc_flow_cmd->cookie); 635 return -EINVAL; 636 } 637 638 if (flow_node->is_act_police) { 639 mutex_lock(&nic->mbox.lock); 640 641 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 642 flow_node->leaf_profile, false); 643 if (err) 644 netdev_err(nic->netdev, 645 "Unmapping RQ %d & profile %d failed\n", 646 flow_node->rq, flow_node->leaf_profile); 647 648 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 649 if (err) 650 netdev_err(nic->netdev, 651 "Unable to free leaf bandwidth profile(%d)\n", 652 flow_node->leaf_profile); 653 654 __clear_bit(flow_node->rq, &nic->rq_bmap); 655 656 mutex_unlock(&nic->mbox.lock); 657 } 658 659 otx2_del_mcam_flow_entry(nic, flow_node->entry); 660 661 WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table, 662 &flow_node->node, 663 nic->tc_info.flow_ht_params)); 664 kfree_rcu(flow_node, rcu); 665 666 clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); 667 flow_cfg->nr_flows--; 668 669 return 0; 670 } 671 672 static int otx2_tc_add_flow(struct otx2_nic *nic, 673 struct flow_cls_offload *tc_flow_cmd) 674 { 675 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 676 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 677 struct otx2_tc_info *tc_info = &nic->tc_info; 678 struct otx2_tc_flow *new_node, *old_node; 679 struct npc_install_flow_req *req, dummy; 680 int rc, err; 681 682 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 683 return -ENOMEM; 684 685 if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) { 686 NL_SET_ERR_MSG_MOD(extack, 687 "Free MCAM entry not available to add the flow"); 688 return -ENOMEM; 689 } 690 691 /* allocate memory for the new flow and it's node */ 692 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 693 if (!new_node) 694 return -ENOMEM; 695 spin_lock_init(&new_node->lock); 696 new_node->cookie = tc_flow_cmd->cookie; 697 698 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 699 700 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 701 if (rc) { 702 kfree_rcu(new_node, rcu); 703 return rc; 704 } 705 706 /* If a flow exists with the same cookie, delete it */ 707 old_node = rhashtable_lookup_fast(&tc_info->flow_table, 708 &tc_flow_cmd->cookie, 709 tc_info->flow_ht_params); 710 if (old_node) 711 otx2_tc_del_flow(nic, tc_flow_cmd); 712 713 mutex_lock(&nic->mbox.lock); 714 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 715 if (!req) { 716 mutex_unlock(&nic->mbox.lock); 717 rc = -ENOMEM; 718 goto free_leaf; 719 } 720 721 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 722 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 723 724 new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, 725 flow_cfg->max_flows); 726 req->channel = nic->hw.rx_chan_base; 727 req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1]; 728 req->intf = NIX_INTF_RX; 729 req->set_cntr = 1; 730 new_node->entry = req->entry; 731 732 /* Send message to AF */ 733 rc = otx2_sync_mbox_msg(&nic->mbox); 734 if (rc) { 735 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 736 mutex_unlock(&nic->mbox.lock); 737 kfree_rcu(new_node, rcu); 738 goto free_leaf; 739 } 740 mutex_unlock(&nic->mbox.lock); 741 742 /* add new flow to flow-table */ 743 rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node, 744 nic->tc_info.flow_ht_params); 745 if (rc) { 746 otx2_del_mcam_flow_entry(nic, req->entry); 747 kfree_rcu(new_node, rcu); 748 goto free_leaf; 749 } 750 751 set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); 752 flow_cfg->nr_flows++; 753 754 return 0; 755 756 free_leaf: 757 if (new_node->is_act_police) { 758 mutex_lock(&nic->mbox.lock); 759 760 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 761 new_node->leaf_profile, false); 762 if (err) 763 netdev_err(nic->netdev, 764 "Unmapping RQ %d & profile %d failed\n", 765 new_node->rq, new_node->leaf_profile); 766 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 767 if (err) 768 netdev_err(nic->netdev, 769 "Unable to free leaf bandwidth profile(%d)\n", 770 new_node->leaf_profile); 771 772 __clear_bit(new_node->rq, &nic->rq_bmap); 773 774 mutex_unlock(&nic->mbox.lock); 775 } 776 777 return rc; 778 } 779 780 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 781 struct flow_cls_offload *tc_flow_cmd) 782 { 783 struct otx2_tc_info *tc_info = &nic->tc_info; 784 struct npc_mcam_get_stats_req *req; 785 struct npc_mcam_get_stats_rsp *rsp; 786 struct otx2_tc_flow_stats *stats; 787 struct otx2_tc_flow *flow_node; 788 int err; 789 790 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 791 &tc_flow_cmd->cookie, 792 tc_info->flow_ht_params); 793 if (!flow_node) { 794 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 795 tc_flow_cmd->cookie); 796 return -EINVAL; 797 } 798 799 mutex_lock(&nic->mbox.lock); 800 801 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 802 if (!req) { 803 mutex_unlock(&nic->mbox.lock); 804 return -ENOMEM; 805 } 806 807 req->entry = flow_node->entry; 808 809 err = otx2_sync_mbox_msg(&nic->mbox); 810 if (err) { 811 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 812 req->entry); 813 mutex_unlock(&nic->mbox.lock); 814 return -EFAULT; 815 } 816 817 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 818 (&nic->mbox.mbox, 0, &req->hdr); 819 if (IS_ERR(rsp)) { 820 mutex_unlock(&nic->mbox.lock); 821 return PTR_ERR(rsp); 822 } 823 824 mutex_unlock(&nic->mbox.lock); 825 826 if (!rsp->stat_ena) 827 return -EINVAL; 828 829 stats = &flow_node->stats; 830 831 spin_lock(&flow_node->lock); 832 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 833 FLOW_ACTION_HW_STATS_IMMEDIATE); 834 stats->pkts = rsp->stat; 835 spin_unlock(&flow_node->lock); 836 837 return 0; 838 } 839 840 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 841 struct flow_cls_offload *cls_flower) 842 { 843 switch (cls_flower->command) { 844 case FLOW_CLS_REPLACE: 845 return otx2_tc_add_flow(nic, cls_flower); 846 case FLOW_CLS_DESTROY: 847 return otx2_tc_del_flow(nic, cls_flower); 848 case FLOW_CLS_STATS: 849 return otx2_tc_get_flow_stats(nic, cls_flower); 850 default: 851 return -EOPNOTSUPP; 852 } 853 } 854 855 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 856 struct tc_cls_matchall_offload *cls) 857 { 858 struct netlink_ext_ack *extack = cls->common.extack; 859 struct flow_action *actions = &cls->rule->action; 860 struct flow_action_entry *entry; 861 u64 rate; 862 int err; 863 864 err = otx2_tc_validate_flow(nic, actions, extack); 865 if (err) 866 return err; 867 868 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 869 NL_SET_ERR_MSG_MOD(extack, 870 "Only one ingress MATCHALL ratelimitter can be offloaded"); 871 return -ENOMEM; 872 } 873 874 entry = &cls->rule->action.entries[0]; 875 switch (entry->id) { 876 case FLOW_ACTION_POLICE: 877 /* Ingress ratelimiting is not supported on OcteonTx2 */ 878 if (is_dev_otx2(nic->pdev)) { 879 NL_SET_ERR_MSG_MOD(extack, 880 "Ingress policing not supported on this platform"); 881 return -EOPNOTSUPP; 882 } 883 884 err = cn10k_alloc_matchall_ipolicer(nic); 885 if (err) 886 return err; 887 888 /* Convert to bits per second */ 889 rate = entry->police.rate_bytes_ps * 8; 890 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 891 if (err) 892 return err; 893 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 894 break; 895 default: 896 NL_SET_ERR_MSG_MOD(extack, 897 "Only police action supported with Ingress MATCHALL offload"); 898 return -EOPNOTSUPP; 899 } 900 901 return 0; 902 } 903 904 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 905 struct tc_cls_matchall_offload *cls) 906 { 907 struct netlink_ext_ack *extack = cls->common.extack; 908 int err; 909 910 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 911 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 912 return -EINVAL; 913 } 914 915 err = cn10k_free_matchall_ipolicer(nic); 916 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 917 return err; 918 } 919 920 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 921 struct tc_cls_matchall_offload *cls_matchall) 922 { 923 switch (cls_matchall->command) { 924 case TC_CLSMATCHALL_REPLACE: 925 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 926 case TC_CLSMATCHALL_DESTROY: 927 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 928 case TC_CLSMATCHALL_STATS: 929 default: 930 break; 931 } 932 933 return -EOPNOTSUPP; 934 } 935 936 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 937 void *type_data, void *cb_priv) 938 { 939 struct otx2_nic *nic = cb_priv; 940 941 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 942 return -EOPNOTSUPP; 943 944 switch (type) { 945 case TC_SETUP_CLSFLOWER: 946 return otx2_setup_tc_cls_flower(nic, type_data); 947 case TC_SETUP_CLSMATCHALL: 948 return otx2_setup_tc_ingress_matchall(nic, type_data); 949 default: 950 break; 951 } 952 953 return -EOPNOTSUPP; 954 } 955 956 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 957 struct tc_cls_matchall_offload *cls_matchall) 958 { 959 switch (cls_matchall->command) { 960 case TC_CLSMATCHALL_REPLACE: 961 return otx2_tc_egress_matchall_install(nic, cls_matchall); 962 case TC_CLSMATCHALL_DESTROY: 963 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 964 case TC_CLSMATCHALL_STATS: 965 default: 966 break; 967 } 968 969 return -EOPNOTSUPP; 970 } 971 972 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 973 void *type_data, void *cb_priv) 974 { 975 struct otx2_nic *nic = cb_priv; 976 977 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 978 return -EOPNOTSUPP; 979 980 switch (type) { 981 case TC_SETUP_CLSMATCHALL: 982 return otx2_setup_tc_egress_matchall(nic, type_data); 983 default: 984 break; 985 } 986 987 return -EOPNOTSUPP; 988 } 989 990 static LIST_HEAD(otx2_block_cb_list); 991 992 static int otx2_setup_tc_block(struct net_device *netdev, 993 struct flow_block_offload *f) 994 { 995 struct otx2_nic *nic = netdev_priv(netdev); 996 flow_setup_cb_t *cb; 997 bool ingress; 998 999 if (f->block_shared) 1000 return -EOPNOTSUPP; 1001 1002 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1003 cb = otx2_setup_tc_block_ingress_cb; 1004 ingress = true; 1005 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1006 cb = otx2_setup_tc_block_egress_cb; 1007 ingress = false; 1008 } else { 1009 return -EOPNOTSUPP; 1010 } 1011 1012 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1013 nic, nic, ingress); 1014 } 1015 1016 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1017 void *type_data) 1018 { 1019 switch (type) { 1020 case TC_SETUP_BLOCK: 1021 return otx2_setup_tc_block(netdev, type_data); 1022 default: 1023 return -EOPNOTSUPP; 1024 } 1025 } 1026 1027 static const struct rhashtable_params tc_flow_ht_params = { 1028 .head_offset = offsetof(struct otx2_tc_flow, node), 1029 .key_offset = offsetof(struct otx2_tc_flow, cookie), 1030 .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie), 1031 .automatic_shrinking = true, 1032 }; 1033 1034 int otx2_init_tc(struct otx2_nic *nic) 1035 { 1036 struct otx2_tc_info *tc = &nic->tc_info; 1037 int err; 1038 1039 /* Exclude receive queue 0 being used for police action */ 1040 set_bit(0, &nic->rq_bmap); 1041 1042 if (!nic->flow_cfg) { 1043 netdev_err(nic->netdev, 1044 "Can't init TC, nic->flow_cfg is not setup\n"); 1045 return -EINVAL; 1046 } 1047 1048 err = otx2_tc_alloc_ent_bitmap(nic); 1049 if (err) 1050 return err; 1051 1052 tc->flow_ht_params = tc_flow_ht_params; 1053 return rhashtable_init(&tc->flow_table, &tc->flow_ht_params); 1054 } 1055 1056 void otx2_shutdown_tc(struct otx2_nic *nic) 1057 { 1058 struct otx2_tc_info *tc = &nic->tc_info; 1059 1060 kfree(tc->tc_entries_bitmap); 1061 rhashtable_destroy(&tc->flow_table); 1062 } 1063