1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 23 /* Egress rate limiting definitions */ 24 #define MAX_BURST_EXPONENT 0x0FULL 25 #define MAX_BURST_MANTISSA 0xFFULL 26 #define MAX_BURST_SIZE 130816ULL 27 #define MAX_RATE_DIVIDER_EXPONENT 12ULL 28 #define MAX_RATE_EXPONENT 0x0FULL 29 #define MAX_RATE_MANTISSA 0xFFULL 30 31 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 32 #define CN10K_MAX_BURST_SIZE 8453888ULL 33 34 /* Bitfields in NIX_TLX_PIR register */ 35 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1) 36 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9) 37 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13) 38 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29) 39 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37) 40 41 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 42 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 43 44 struct otx2_tc_flow_stats { 45 u64 bytes; 46 u64 pkts; 47 u64 used; 48 }; 49 50 struct otx2_tc_flow { 51 struct rhash_head node; 52 unsigned long cookie; 53 unsigned int bitpos; 54 struct rcu_head rcu; 55 struct otx2_tc_flow_stats stats; 56 spinlock_t lock; /* lock for stats */ 57 u16 rq; 58 u16 entry; 59 u16 leaf_profile; 60 bool is_act_police; 61 }; 62 63 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) 64 { 65 struct otx2_tc_info *tc = &nic->tc_info; 66 67 if (!nic->flow_cfg->max_flows) 68 return 0; 69 70 /* Max flows changed, free the existing bitmap */ 71 kfree(tc->tc_entries_bitmap); 72 73 tc->tc_entries_bitmap = 74 kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows), 75 sizeof(long), GFP_KERNEL); 76 if (!tc->tc_entries_bitmap) { 77 netdev_err(nic->netdev, 78 "Unable to alloc TC flow entries bitmap\n"); 79 return -ENOMEM; 80 } 81 82 return 0; 83 } 84 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); 85 86 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 87 u32 *burst_exp, u32 *burst_mantissa) 88 { 89 int max_burst, max_mantissa; 90 unsigned int tmp; 91 92 if (is_dev_otx2(nic->pdev)) { 93 max_burst = MAX_BURST_SIZE; 94 max_mantissa = MAX_BURST_MANTISSA; 95 } else { 96 max_burst = CN10K_MAX_BURST_SIZE; 97 max_mantissa = CN10K_MAX_BURST_MANTISSA; 98 } 99 100 /* Burst is calculated as 101 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 102 * Max supported burst size is 130,816 bytes. 103 */ 104 burst = min_t(u32, burst, max_burst); 105 if (burst) { 106 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 107 tmp = burst - rounddown_pow_of_two(burst); 108 if (burst < max_mantissa) 109 *burst_mantissa = tmp * 2; 110 else 111 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 112 } else { 113 *burst_exp = MAX_BURST_EXPONENT; 114 *burst_mantissa = max_mantissa; 115 } 116 } 117 118 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 119 u32 *mantissa, u32 *div_exp) 120 { 121 u64 tmp; 122 123 /* Rate calculation by hardware 124 * 125 * PIR_ADD = ((256 + mantissa) << exp) / 256 126 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 127 * The resultant rate is in Mbps. 128 */ 129 130 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 131 * Setting this to '0' will ease the calculation of 132 * exponent and mantissa. 133 */ 134 *div_exp = 0; 135 136 if (maxrate) { 137 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 138 tmp = maxrate - rounddown_pow_of_two(maxrate); 139 if (maxrate < MAX_RATE_MANTISSA) 140 *mantissa = tmp * 2; 141 else 142 *mantissa = tmp / (1ULL << (*exp - 7)); 143 } else { 144 /* Instead of disabling rate limiting, set all values to max */ 145 *exp = MAX_RATE_EXPONENT; 146 *mantissa = MAX_RATE_MANTISSA; 147 } 148 } 149 150 static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 151 u64 maxrate, u32 burst) 152 { 153 u32 burst_exp, burst_mantissa; 154 u32 exp, mantissa, div_exp; 155 u64 regval = 0; 156 157 /* Get exponent and mantissa values from the desired rate */ 158 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 159 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 160 161 if (is_dev_otx2(nic->pdev)) { 162 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 163 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 164 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 165 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 166 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 167 } else { 168 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 169 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 170 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 171 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 172 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 173 } 174 175 return regval; 176 } 177 178 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 179 u32 burst, u64 maxrate) 180 { 181 struct otx2_hw *hw = &nic->hw; 182 struct nix_txschq_config *req; 183 int txschq, err; 184 185 /* All SQs share the same TL4, so pick the first scheduler */ 186 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 187 188 mutex_lock(&nic->mbox.lock); 189 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 190 if (!req) { 191 mutex_unlock(&nic->mbox.lock); 192 return -ENOMEM; 193 } 194 195 req->lvl = NIX_TXSCH_LVL_TL4; 196 req->num_regs = 1; 197 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 198 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 199 200 err = otx2_sync_mbox_msg(&nic->mbox); 201 mutex_unlock(&nic->mbox.lock); 202 return err; 203 } 204 205 static int otx2_tc_validate_flow(struct otx2_nic *nic, 206 struct flow_action *actions, 207 struct netlink_ext_ack *extack) 208 { 209 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 210 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 211 return -EINVAL; 212 } 213 214 if (!flow_action_has_entries(actions)) { 215 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 216 return -EINVAL; 217 } 218 219 if (!flow_offload_has_one_action(actions)) { 220 NL_SET_ERR_MSG_MOD(extack, 221 "Egress MATCHALL offload supports only 1 policing action"); 222 return -EINVAL; 223 } 224 return 0; 225 } 226 227 static int otx2_policer_validate(const struct flow_action *action, 228 const struct flow_action_entry *act, 229 struct netlink_ext_ack *extack) 230 { 231 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 232 NL_SET_ERR_MSG_MOD(extack, 233 "Offload not supported when exceed action is not drop"); 234 return -EOPNOTSUPP; 235 } 236 237 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 238 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 239 NL_SET_ERR_MSG_MOD(extack, 240 "Offload not supported when conform action is not pipe or ok"); 241 return -EOPNOTSUPP; 242 } 243 244 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 245 !flow_action_is_last_entry(action, act)) { 246 NL_SET_ERR_MSG_MOD(extack, 247 "Offload not supported when conform action is ok, but action is not last"); 248 return -EOPNOTSUPP; 249 } 250 251 if (act->police.peakrate_bytes_ps || 252 act->police.avrate || act->police.overhead) { 253 NL_SET_ERR_MSG_MOD(extack, 254 "Offload not supported when peakrate/avrate/overhead is configured"); 255 return -EOPNOTSUPP; 256 } 257 258 return 0; 259 } 260 261 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 262 struct tc_cls_matchall_offload *cls) 263 { 264 struct netlink_ext_ack *extack = cls->common.extack; 265 struct flow_action *actions = &cls->rule->action; 266 struct flow_action_entry *entry; 267 u64 rate; 268 int err; 269 270 err = otx2_tc_validate_flow(nic, actions, extack); 271 if (err) 272 return err; 273 274 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 275 NL_SET_ERR_MSG_MOD(extack, 276 "Only one Egress MATCHALL ratelimiter can be offloaded"); 277 return -ENOMEM; 278 } 279 280 entry = &cls->rule->action.entries[0]; 281 switch (entry->id) { 282 case FLOW_ACTION_POLICE: 283 err = otx2_policer_validate(&cls->rule->action, entry, extack); 284 if (err) 285 return err; 286 287 if (entry->police.rate_pkt_ps) { 288 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 289 return -EOPNOTSUPP; 290 } 291 /* Convert bytes per second to Mbps */ 292 rate = entry->police.rate_bytes_ps * 8; 293 rate = max_t(u64, rate / 1000000, 1); 294 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate); 295 if (err) 296 return err; 297 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 298 break; 299 default: 300 NL_SET_ERR_MSG_MOD(extack, 301 "Only police action is supported with Egress MATCHALL offload"); 302 return -EOPNOTSUPP; 303 } 304 305 return 0; 306 } 307 308 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 309 struct tc_cls_matchall_offload *cls) 310 { 311 struct netlink_ext_ack *extack = cls->common.extack; 312 int err; 313 314 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 315 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 316 return -EINVAL; 317 } 318 319 err = otx2_set_matchall_egress_rate(nic, 0, 0); 320 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 321 return err; 322 } 323 324 static int otx2_tc_act_set_police(struct otx2_nic *nic, 325 struct otx2_tc_flow *node, 326 struct flow_cls_offload *f, 327 u64 rate, u32 burst, u32 mark, 328 struct npc_install_flow_req *req, bool pps) 329 { 330 struct netlink_ext_ack *extack = f->common.extack; 331 struct otx2_hw *hw = &nic->hw; 332 int rq_idx, rc; 333 334 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 335 if (rq_idx >= hw->rx_queues) { 336 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 337 return -EINVAL; 338 } 339 340 mutex_lock(&nic->mbox.lock); 341 342 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 343 if (rc) { 344 mutex_unlock(&nic->mbox.lock); 345 return rc; 346 } 347 348 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); 349 if (rc) 350 goto free_leaf; 351 352 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); 353 if (rc) 354 goto free_leaf; 355 356 mutex_unlock(&nic->mbox.lock); 357 358 req->match_id = mark & 0xFFFFULL; 359 req->index = rq_idx; 360 req->op = NIX_RX_ACTIONOP_UCAST; 361 set_bit(rq_idx, &nic->rq_bmap); 362 node->is_act_police = true; 363 node->rq = rq_idx; 364 365 return 0; 366 367 free_leaf: 368 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 369 netdev_err(nic->netdev, 370 "Unable to free leaf bandwidth profile(%d)\n", 371 node->leaf_profile); 372 mutex_unlock(&nic->mbox.lock); 373 return rc; 374 } 375 376 static int otx2_tc_parse_actions(struct otx2_nic *nic, 377 struct flow_action *flow_action, 378 struct npc_install_flow_req *req, 379 struct flow_cls_offload *f, 380 struct otx2_tc_flow *node) 381 { 382 struct netlink_ext_ack *extack = f->common.extack; 383 struct flow_action_entry *act; 384 struct net_device *target; 385 struct otx2_nic *priv; 386 u32 burst, mark = 0; 387 u8 nr_police = 0; 388 bool pps = false; 389 u64 rate; 390 int err; 391 int i; 392 393 if (!flow_action_has_entries(flow_action)) { 394 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 395 return -EINVAL; 396 } 397 398 flow_action_for_each(i, act, flow_action) { 399 switch (act->id) { 400 case FLOW_ACTION_DROP: 401 req->op = NIX_RX_ACTIONOP_DROP; 402 return 0; 403 case FLOW_ACTION_ACCEPT: 404 req->op = NIX_RX_ACTION_DEFAULT; 405 return 0; 406 case FLOW_ACTION_REDIRECT_INGRESS: 407 target = act->dev; 408 priv = netdev_priv(target); 409 /* npc_install_flow_req doesn't support passing a target pcifunc */ 410 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 411 NL_SET_ERR_MSG_MOD(extack, 412 "can't redirect to other pf/vf"); 413 return -EOPNOTSUPP; 414 } 415 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 416 req->op = NIX_RX_ACTION_DEFAULT; 417 return 0; 418 case FLOW_ACTION_VLAN_POP: 419 req->vtag0_valid = true; 420 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 421 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 422 break; 423 case FLOW_ACTION_POLICE: 424 /* Ingress ratelimiting is not supported on OcteonTx2 */ 425 if (is_dev_otx2(nic->pdev)) { 426 NL_SET_ERR_MSG_MOD(extack, 427 "Ingress policing not supported on this platform"); 428 return -EOPNOTSUPP; 429 } 430 431 err = otx2_policer_validate(flow_action, act, extack); 432 if (err) 433 return err; 434 435 if (act->police.rate_bytes_ps > 0) { 436 rate = act->police.rate_bytes_ps * 8; 437 burst = act->police.burst; 438 } else if (act->police.rate_pkt_ps > 0) { 439 /* The algorithm used to calculate rate 440 * mantissa, exponent values for a given token 441 * rate (token can be byte or packet) requires 442 * token rate to be mutiplied by 8. 443 */ 444 rate = act->police.rate_pkt_ps * 8; 445 burst = act->police.burst_pkt; 446 pps = true; 447 } 448 nr_police++; 449 break; 450 case FLOW_ACTION_MARK: 451 mark = act->mark; 452 break; 453 default: 454 return -EOPNOTSUPP; 455 } 456 } 457 458 if (nr_police > 1) { 459 NL_SET_ERR_MSG_MOD(extack, 460 "rate limit police offload requires a single action"); 461 return -EOPNOTSUPP; 462 } 463 464 if (nr_police) 465 return otx2_tc_act_set_police(nic, node, f, rate, burst, 466 mark, req, pps); 467 468 return 0; 469 } 470 471 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 472 struct flow_cls_offload *f, 473 struct npc_install_flow_req *req) 474 { 475 struct netlink_ext_ack *extack = f->common.extack; 476 struct flow_msg *flow_spec = &req->packet; 477 struct flow_msg *flow_mask = &req->mask; 478 struct flow_dissector *dissector; 479 struct flow_rule *rule; 480 u8 ip_proto = 0; 481 482 rule = flow_cls_offload_flow_rule(f); 483 dissector = rule->match.dissector; 484 485 if ((dissector->used_keys & 486 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 487 BIT(FLOW_DISSECTOR_KEY_BASIC) | 488 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 489 BIT(FLOW_DISSECTOR_KEY_VLAN) | 490 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 491 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 492 BIT(FLOW_DISSECTOR_KEY_PORTS) | 493 BIT(FLOW_DISSECTOR_KEY_IP)))) { 494 netdev_info(nic->netdev, "unsupported flow used key 0x%x", 495 dissector->used_keys); 496 return -EOPNOTSUPP; 497 } 498 499 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 500 struct flow_match_basic match; 501 502 flow_rule_match_basic(rule, &match); 503 504 /* All EtherTypes can be matched, no hw limitation */ 505 flow_spec->etype = match.key->n_proto; 506 flow_mask->etype = match.mask->n_proto; 507 req->features |= BIT_ULL(NPC_ETYPE); 508 509 if (match.mask->ip_proto && 510 (match.key->ip_proto != IPPROTO_TCP && 511 match.key->ip_proto != IPPROTO_UDP && 512 match.key->ip_proto != IPPROTO_SCTP && 513 match.key->ip_proto != IPPROTO_ICMP && 514 match.key->ip_proto != IPPROTO_ICMPV6)) { 515 netdev_info(nic->netdev, 516 "ip_proto=0x%x not supported\n", 517 match.key->ip_proto); 518 return -EOPNOTSUPP; 519 } 520 if (match.mask->ip_proto) 521 ip_proto = match.key->ip_proto; 522 523 if (ip_proto == IPPROTO_UDP) 524 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 525 else if (ip_proto == IPPROTO_TCP) 526 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 527 else if (ip_proto == IPPROTO_SCTP) 528 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 529 else if (ip_proto == IPPROTO_ICMP) 530 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 531 else if (ip_proto == IPPROTO_ICMPV6) 532 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 533 } 534 535 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 536 struct flow_match_eth_addrs match; 537 538 flow_rule_match_eth_addrs(rule, &match); 539 if (!is_zero_ether_addr(match.mask->src)) { 540 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 541 return -EOPNOTSUPP; 542 } 543 544 if (!is_zero_ether_addr(match.mask->dst)) { 545 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 546 ether_addr_copy(flow_mask->dmac, 547 (u8 *)&match.mask->dst); 548 req->features |= BIT_ULL(NPC_DMAC); 549 } 550 } 551 552 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 553 struct flow_match_ip match; 554 555 flow_rule_match_ip(rule, &match); 556 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 557 match.mask->tos) { 558 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 559 return -EOPNOTSUPP; 560 } 561 if (match.mask->ttl) { 562 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 563 return -EOPNOTSUPP; 564 } 565 flow_spec->tos = match.key->tos; 566 flow_mask->tos = match.mask->tos; 567 req->features |= BIT_ULL(NPC_TOS); 568 } 569 570 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 571 struct flow_match_vlan match; 572 u16 vlan_tci, vlan_tci_mask; 573 574 flow_rule_match_vlan(rule, &match); 575 576 if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { 577 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 578 ntohs(match.key->vlan_tpid)); 579 return -EOPNOTSUPP; 580 } 581 582 if (match.mask->vlan_id || 583 match.mask->vlan_dei || 584 match.mask->vlan_priority) { 585 vlan_tci = match.key->vlan_id | 586 match.key->vlan_dei << 12 | 587 match.key->vlan_priority << 13; 588 589 vlan_tci_mask = match.mask->vlan_id | 590 match.mask->vlan_dei << 12 | 591 match.mask->vlan_priority << 13; 592 593 flow_spec->vlan_tci = htons(vlan_tci); 594 flow_mask->vlan_tci = htons(vlan_tci_mask); 595 req->features |= BIT_ULL(NPC_OUTER_VID); 596 } 597 } 598 599 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 600 struct flow_match_ipv4_addrs match; 601 602 flow_rule_match_ipv4_addrs(rule, &match); 603 604 flow_spec->ip4dst = match.key->dst; 605 flow_mask->ip4dst = match.mask->dst; 606 req->features |= BIT_ULL(NPC_DIP_IPV4); 607 608 flow_spec->ip4src = match.key->src; 609 flow_mask->ip4src = match.mask->src; 610 req->features |= BIT_ULL(NPC_SIP_IPV4); 611 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 612 struct flow_match_ipv6_addrs match; 613 614 flow_rule_match_ipv6_addrs(rule, &match); 615 616 if (ipv6_addr_loopback(&match.key->dst) || 617 ipv6_addr_loopback(&match.key->src)) { 618 NL_SET_ERR_MSG_MOD(extack, 619 "Flow matching IPv6 loopback addr not supported"); 620 return -EOPNOTSUPP; 621 } 622 623 if (!ipv6_addr_any(&match.mask->dst)) { 624 memcpy(&flow_spec->ip6dst, 625 (struct in6_addr *)&match.key->dst, 626 sizeof(flow_spec->ip6dst)); 627 memcpy(&flow_mask->ip6dst, 628 (struct in6_addr *)&match.mask->dst, 629 sizeof(flow_spec->ip6dst)); 630 req->features |= BIT_ULL(NPC_DIP_IPV6); 631 } 632 633 if (!ipv6_addr_any(&match.mask->src)) { 634 memcpy(&flow_spec->ip6src, 635 (struct in6_addr *)&match.key->src, 636 sizeof(flow_spec->ip6src)); 637 memcpy(&flow_mask->ip6src, 638 (struct in6_addr *)&match.mask->src, 639 sizeof(flow_spec->ip6src)); 640 req->features |= BIT_ULL(NPC_SIP_IPV6); 641 } 642 } 643 644 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 645 struct flow_match_ports match; 646 647 flow_rule_match_ports(rule, &match); 648 649 flow_spec->dport = match.key->dst; 650 flow_mask->dport = match.mask->dst; 651 652 if (flow_mask->dport) { 653 if (ip_proto == IPPROTO_UDP) 654 req->features |= BIT_ULL(NPC_DPORT_UDP); 655 else if (ip_proto == IPPROTO_TCP) 656 req->features |= BIT_ULL(NPC_DPORT_TCP); 657 else if (ip_proto == IPPROTO_SCTP) 658 req->features |= BIT_ULL(NPC_DPORT_SCTP); 659 } 660 661 flow_spec->sport = match.key->src; 662 flow_mask->sport = match.mask->src; 663 664 if (flow_mask->sport) { 665 if (ip_proto == IPPROTO_UDP) 666 req->features |= BIT_ULL(NPC_SPORT_UDP); 667 else if (ip_proto == IPPROTO_TCP) 668 req->features |= BIT_ULL(NPC_SPORT_TCP); 669 else if (ip_proto == IPPROTO_SCTP) 670 req->features |= BIT_ULL(NPC_SPORT_SCTP); 671 } 672 } 673 674 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 675 } 676 677 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) 678 { 679 struct npc_delete_flow_req *req; 680 int err; 681 682 mutex_lock(&nic->mbox.lock); 683 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 684 if (!req) { 685 mutex_unlock(&nic->mbox.lock); 686 return -ENOMEM; 687 } 688 689 req->entry = entry; 690 691 /* Send message to AF */ 692 err = otx2_sync_mbox_msg(&nic->mbox); 693 if (err) { 694 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 695 entry); 696 mutex_unlock(&nic->mbox.lock); 697 return -EFAULT; 698 } 699 mutex_unlock(&nic->mbox.lock); 700 701 return 0; 702 } 703 704 static int otx2_tc_del_flow(struct otx2_nic *nic, 705 struct flow_cls_offload *tc_flow_cmd) 706 { 707 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 708 struct otx2_tc_info *tc_info = &nic->tc_info; 709 struct otx2_tc_flow *flow_node; 710 int err; 711 712 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 713 &tc_flow_cmd->cookie, 714 tc_info->flow_ht_params); 715 if (!flow_node) { 716 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 717 tc_flow_cmd->cookie); 718 return -EINVAL; 719 } 720 721 if (flow_node->is_act_police) { 722 mutex_lock(&nic->mbox.lock); 723 724 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 725 flow_node->leaf_profile, false); 726 if (err) 727 netdev_err(nic->netdev, 728 "Unmapping RQ %d & profile %d failed\n", 729 flow_node->rq, flow_node->leaf_profile); 730 731 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 732 if (err) 733 netdev_err(nic->netdev, 734 "Unable to free leaf bandwidth profile(%d)\n", 735 flow_node->leaf_profile); 736 737 __clear_bit(flow_node->rq, &nic->rq_bmap); 738 739 mutex_unlock(&nic->mbox.lock); 740 } 741 742 otx2_del_mcam_flow_entry(nic, flow_node->entry); 743 744 WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table, 745 &flow_node->node, 746 nic->tc_info.flow_ht_params)); 747 kfree_rcu(flow_node, rcu); 748 749 clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); 750 flow_cfg->nr_flows--; 751 752 return 0; 753 } 754 755 static int otx2_tc_add_flow(struct otx2_nic *nic, 756 struct flow_cls_offload *tc_flow_cmd) 757 { 758 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 759 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 760 struct otx2_tc_info *tc_info = &nic->tc_info; 761 struct otx2_tc_flow *new_node, *old_node; 762 struct npc_install_flow_req *req, dummy; 763 int rc, err; 764 765 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 766 return -ENOMEM; 767 768 if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) { 769 NL_SET_ERR_MSG_MOD(extack, 770 "Free MCAM entry not available to add the flow"); 771 return -ENOMEM; 772 } 773 774 /* allocate memory for the new flow and it's node */ 775 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 776 if (!new_node) 777 return -ENOMEM; 778 spin_lock_init(&new_node->lock); 779 new_node->cookie = tc_flow_cmd->cookie; 780 781 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 782 783 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 784 if (rc) { 785 kfree_rcu(new_node, rcu); 786 return rc; 787 } 788 789 /* If a flow exists with the same cookie, delete it */ 790 old_node = rhashtable_lookup_fast(&tc_info->flow_table, 791 &tc_flow_cmd->cookie, 792 tc_info->flow_ht_params); 793 if (old_node) 794 otx2_tc_del_flow(nic, tc_flow_cmd); 795 796 mutex_lock(&nic->mbox.lock); 797 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 798 if (!req) { 799 mutex_unlock(&nic->mbox.lock); 800 rc = -ENOMEM; 801 goto free_leaf; 802 } 803 804 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 805 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 806 807 new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, 808 flow_cfg->max_flows); 809 req->channel = nic->hw.rx_chan_base; 810 req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1]; 811 req->intf = NIX_INTF_RX; 812 req->set_cntr = 1; 813 new_node->entry = req->entry; 814 815 /* Send message to AF */ 816 rc = otx2_sync_mbox_msg(&nic->mbox); 817 if (rc) { 818 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 819 mutex_unlock(&nic->mbox.lock); 820 kfree_rcu(new_node, rcu); 821 goto free_leaf; 822 } 823 mutex_unlock(&nic->mbox.lock); 824 825 /* add new flow to flow-table */ 826 rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node, 827 nic->tc_info.flow_ht_params); 828 if (rc) { 829 otx2_del_mcam_flow_entry(nic, req->entry); 830 kfree_rcu(new_node, rcu); 831 goto free_leaf; 832 } 833 834 set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); 835 flow_cfg->nr_flows++; 836 837 return 0; 838 839 free_leaf: 840 if (new_node->is_act_police) { 841 mutex_lock(&nic->mbox.lock); 842 843 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 844 new_node->leaf_profile, false); 845 if (err) 846 netdev_err(nic->netdev, 847 "Unmapping RQ %d & profile %d failed\n", 848 new_node->rq, new_node->leaf_profile); 849 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 850 if (err) 851 netdev_err(nic->netdev, 852 "Unable to free leaf bandwidth profile(%d)\n", 853 new_node->leaf_profile); 854 855 __clear_bit(new_node->rq, &nic->rq_bmap); 856 857 mutex_unlock(&nic->mbox.lock); 858 } 859 860 return rc; 861 } 862 863 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 864 struct flow_cls_offload *tc_flow_cmd) 865 { 866 struct otx2_tc_info *tc_info = &nic->tc_info; 867 struct npc_mcam_get_stats_req *req; 868 struct npc_mcam_get_stats_rsp *rsp; 869 struct otx2_tc_flow_stats *stats; 870 struct otx2_tc_flow *flow_node; 871 int err; 872 873 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 874 &tc_flow_cmd->cookie, 875 tc_info->flow_ht_params); 876 if (!flow_node) { 877 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 878 tc_flow_cmd->cookie); 879 return -EINVAL; 880 } 881 882 mutex_lock(&nic->mbox.lock); 883 884 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 885 if (!req) { 886 mutex_unlock(&nic->mbox.lock); 887 return -ENOMEM; 888 } 889 890 req->entry = flow_node->entry; 891 892 err = otx2_sync_mbox_msg(&nic->mbox); 893 if (err) { 894 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 895 req->entry); 896 mutex_unlock(&nic->mbox.lock); 897 return -EFAULT; 898 } 899 900 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 901 (&nic->mbox.mbox, 0, &req->hdr); 902 if (IS_ERR(rsp)) { 903 mutex_unlock(&nic->mbox.lock); 904 return PTR_ERR(rsp); 905 } 906 907 mutex_unlock(&nic->mbox.lock); 908 909 if (!rsp->stat_ena) 910 return -EINVAL; 911 912 stats = &flow_node->stats; 913 914 spin_lock(&flow_node->lock); 915 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 916 FLOW_ACTION_HW_STATS_IMMEDIATE); 917 stats->pkts = rsp->stat; 918 spin_unlock(&flow_node->lock); 919 920 return 0; 921 } 922 923 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 924 struct flow_cls_offload *cls_flower) 925 { 926 switch (cls_flower->command) { 927 case FLOW_CLS_REPLACE: 928 return otx2_tc_add_flow(nic, cls_flower); 929 case FLOW_CLS_DESTROY: 930 return otx2_tc_del_flow(nic, cls_flower); 931 case FLOW_CLS_STATS: 932 return otx2_tc_get_flow_stats(nic, cls_flower); 933 default: 934 return -EOPNOTSUPP; 935 } 936 } 937 938 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 939 struct tc_cls_matchall_offload *cls) 940 { 941 struct netlink_ext_ack *extack = cls->common.extack; 942 struct flow_action *actions = &cls->rule->action; 943 struct flow_action_entry *entry; 944 u64 rate; 945 int err; 946 947 err = otx2_tc_validate_flow(nic, actions, extack); 948 if (err) 949 return err; 950 951 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 952 NL_SET_ERR_MSG_MOD(extack, 953 "Only one ingress MATCHALL ratelimitter can be offloaded"); 954 return -ENOMEM; 955 } 956 957 entry = &cls->rule->action.entries[0]; 958 switch (entry->id) { 959 case FLOW_ACTION_POLICE: 960 /* Ingress ratelimiting is not supported on OcteonTx2 */ 961 if (is_dev_otx2(nic->pdev)) { 962 NL_SET_ERR_MSG_MOD(extack, 963 "Ingress policing not supported on this platform"); 964 return -EOPNOTSUPP; 965 } 966 967 err = cn10k_alloc_matchall_ipolicer(nic); 968 if (err) 969 return err; 970 971 /* Convert to bits per second */ 972 rate = entry->police.rate_bytes_ps * 8; 973 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 974 if (err) 975 return err; 976 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 977 break; 978 default: 979 NL_SET_ERR_MSG_MOD(extack, 980 "Only police action supported with Ingress MATCHALL offload"); 981 return -EOPNOTSUPP; 982 } 983 984 return 0; 985 } 986 987 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 988 struct tc_cls_matchall_offload *cls) 989 { 990 struct netlink_ext_ack *extack = cls->common.extack; 991 int err; 992 993 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 994 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 995 return -EINVAL; 996 } 997 998 err = cn10k_free_matchall_ipolicer(nic); 999 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1000 return err; 1001 } 1002 1003 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1004 struct tc_cls_matchall_offload *cls_matchall) 1005 { 1006 switch (cls_matchall->command) { 1007 case TC_CLSMATCHALL_REPLACE: 1008 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1009 case TC_CLSMATCHALL_DESTROY: 1010 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1011 case TC_CLSMATCHALL_STATS: 1012 default: 1013 break; 1014 } 1015 1016 return -EOPNOTSUPP; 1017 } 1018 1019 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1020 void *type_data, void *cb_priv) 1021 { 1022 struct otx2_nic *nic = cb_priv; 1023 1024 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1025 return -EOPNOTSUPP; 1026 1027 switch (type) { 1028 case TC_SETUP_CLSFLOWER: 1029 return otx2_setup_tc_cls_flower(nic, type_data); 1030 case TC_SETUP_CLSMATCHALL: 1031 return otx2_setup_tc_ingress_matchall(nic, type_data); 1032 default: 1033 break; 1034 } 1035 1036 return -EOPNOTSUPP; 1037 } 1038 1039 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1040 struct tc_cls_matchall_offload *cls_matchall) 1041 { 1042 switch (cls_matchall->command) { 1043 case TC_CLSMATCHALL_REPLACE: 1044 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1045 case TC_CLSMATCHALL_DESTROY: 1046 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1047 case TC_CLSMATCHALL_STATS: 1048 default: 1049 break; 1050 } 1051 1052 return -EOPNOTSUPP; 1053 } 1054 1055 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1056 void *type_data, void *cb_priv) 1057 { 1058 struct otx2_nic *nic = cb_priv; 1059 1060 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1061 return -EOPNOTSUPP; 1062 1063 switch (type) { 1064 case TC_SETUP_CLSMATCHALL: 1065 return otx2_setup_tc_egress_matchall(nic, type_data); 1066 default: 1067 break; 1068 } 1069 1070 return -EOPNOTSUPP; 1071 } 1072 1073 static LIST_HEAD(otx2_block_cb_list); 1074 1075 static int otx2_setup_tc_block(struct net_device *netdev, 1076 struct flow_block_offload *f) 1077 { 1078 struct otx2_nic *nic = netdev_priv(netdev); 1079 flow_setup_cb_t *cb; 1080 bool ingress; 1081 1082 if (f->block_shared) 1083 return -EOPNOTSUPP; 1084 1085 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1086 cb = otx2_setup_tc_block_ingress_cb; 1087 ingress = true; 1088 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1089 cb = otx2_setup_tc_block_egress_cb; 1090 ingress = false; 1091 } else { 1092 return -EOPNOTSUPP; 1093 } 1094 1095 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1096 nic, nic, ingress); 1097 } 1098 1099 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1100 void *type_data) 1101 { 1102 switch (type) { 1103 case TC_SETUP_BLOCK: 1104 return otx2_setup_tc_block(netdev, type_data); 1105 default: 1106 return -EOPNOTSUPP; 1107 } 1108 } 1109 EXPORT_SYMBOL(otx2_setup_tc); 1110 1111 static const struct rhashtable_params tc_flow_ht_params = { 1112 .head_offset = offsetof(struct otx2_tc_flow, node), 1113 .key_offset = offsetof(struct otx2_tc_flow, cookie), 1114 .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie), 1115 .automatic_shrinking = true, 1116 }; 1117 1118 int otx2_init_tc(struct otx2_nic *nic) 1119 { 1120 struct otx2_tc_info *tc = &nic->tc_info; 1121 int err; 1122 1123 /* Exclude receive queue 0 being used for police action */ 1124 set_bit(0, &nic->rq_bmap); 1125 1126 if (!nic->flow_cfg) { 1127 netdev_err(nic->netdev, 1128 "Can't init TC, nic->flow_cfg is not setup\n"); 1129 return -EINVAL; 1130 } 1131 1132 err = otx2_tc_alloc_ent_bitmap(nic); 1133 if (err) 1134 return err; 1135 1136 tc->flow_ht_params = tc_flow_ht_params; 1137 return rhashtable_init(&tc->flow_table, &tc->flow_ht_params); 1138 } 1139 EXPORT_SYMBOL(otx2_init_tc); 1140 1141 void otx2_shutdown_tc(struct otx2_nic *nic) 1142 { 1143 struct otx2_tc_info *tc = &nic->tc_info; 1144 1145 kfree(tc->tc_entries_bitmap); 1146 rhashtable_destroy(&tc->flow_table); 1147 } 1148 EXPORT_SYMBOL(otx2_shutdown_tc); 1149