1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 struct otx2_tc_flow_stats { 31 u64 bytes; 32 u64 pkts; 33 u64 used; 34 }; 35 36 struct otx2_tc_flow { 37 struct rhash_head node; 38 unsigned long cookie; 39 unsigned int bitpos; 40 struct rcu_head rcu; 41 struct otx2_tc_flow_stats stats; 42 spinlock_t lock; /* lock for stats */ 43 u16 rq; 44 u16 entry; 45 u16 leaf_profile; 46 bool is_act_police; 47 }; 48 49 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) 50 { 51 struct otx2_tc_info *tc = &nic->tc_info; 52 53 if (!nic->flow_cfg->max_flows) 54 return 0; 55 56 /* Max flows changed, free the existing bitmap */ 57 kfree(tc->tc_entries_bitmap); 58 59 tc->tc_entries_bitmap = 60 kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows), 61 sizeof(long), GFP_KERNEL); 62 if (!tc->tc_entries_bitmap) { 63 netdev_err(nic->netdev, 64 "Unable to alloc TC flow entries bitmap\n"); 65 return -ENOMEM; 66 } 67 68 return 0; 69 } 70 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); 71 72 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 73 u32 *burst_exp, u32 *burst_mantissa) 74 { 75 int max_burst, max_mantissa; 76 unsigned int tmp; 77 78 if (is_dev_otx2(nic->pdev)) { 79 max_burst = MAX_BURST_SIZE; 80 max_mantissa = MAX_BURST_MANTISSA; 81 } else { 82 max_burst = CN10K_MAX_BURST_SIZE; 83 max_mantissa = CN10K_MAX_BURST_MANTISSA; 84 } 85 86 /* Burst is calculated as 87 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 88 * Max supported burst size is 130,816 bytes. 89 */ 90 burst = min_t(u32, burst, max_burst); 91 if (burst) { 92 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 93 tmp = burst - rounddown_pow_of_two(burst); 94 if (burst < max_mantissa) 95 *burst_mantissa = tmp * 2; 96 else 97 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 98 } else { 99 *burst_exp = MAX_BURST_EXPONENT; 100 *burst_mantissa = max_mantissa; 101 } 102 } 103 104 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 105 u32 *mantissa, u32 *div_exp) 106 { 107 u64 tmp; 108 109 /* Rate calculation by hardware 110 * 111 * PIR_ADD = ((256 + mantissa) << exp) / 256 112 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 113 * The resultant rate is in Mbps. 114 */ 115 116 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 117 * Setting this to '0' will ease the calculation of 118 * exponent and mantissa. 119 */ 120 *div_exp = 0; 121 122 if (maxrate) { 123 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 124 tmp = maxrate - rounddown_pow_of_two(maxrate); 125 if (maxrate < MAX_RATE_MANTISSA) 126 *mantissa = tmp * 2; 127 else 128 *mantissa = tmp / (1ULL << (*exp - 7)); 129 } else { 130 /* Instead of disabling rate limiting, set all values to max */ 131 *exp = MAX_RATE_EXPONENT; 132 *mantissa = MAX_RATE_MANTISSA; 133 } 134 } 135 136 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 137 u64 maxrate, u32 burst) 138 { 139 u32 burst_exp, burst_mantissa; 140 u32 exp, mantissa, div_exp; 141 u64 regval = 0; 142 143 /* Get exponent and mantissa values from the desired rate */ 144 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 145 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 146 147 if (is_dev_otx2(nic->pdev)) { 148 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 149 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 150 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 151 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 152 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 153 } else { 154 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 155 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 156 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 157 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 158 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 159 } 160 161 return regval; 162 } 163 164 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 165 u32 burst, u64 maxrate) 166 { 167 struct otx2_hw *hw = &nic->hw; 168 struct nix_txschq_config *req; 169 int txschq, err; 170 171 /* All SQs share the same TL4, so pick the first scheduler */ 172 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 173 174 mutex_lock(&nic->mbox.lock); 175 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 176 if (!req) { 177 mutex_unlock(&nic->mbox.lock); 178 return -ENOMEM; 179 } 180 181 req->lvl = NIX_TXSCH_LVL_TL4; 182 req->num_regs = 1; 183 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 184 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 185 186 err = otx2_sync_mbox_msg(&nic->mbox); 187 mutex_unlock(&nic->mbox.lock); 188 return err; 189 } 190 191 static int otx2_tc_validate_flow(struct otx2_nic *nic, 192 struct flow_action *actions, 193 struct netlink_ext_ack *extack) 194 { 195 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 196 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 197 return -EINVAL; 198 } 199 200 if (!flow_action_has_entries(actions)) { 201 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 202 return -EINVAL; 203 } 204 205 if (!flow_offload_has_one_action(actions)) { 206 NL_SET_ERR_MSG_MOD(extack, 207 "Egress MATCHALL offload supports only 1 policing action"); 208 return -EINVAL; 209 } 210 return 0; 211 } 212 213 static int otx2_policer_validate(const struct flow_action *action, 214 const struct flow_action_entry *act, 215 struct netlink_ext_ack *extack) 216 { 217 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 218 NL_SET_ERR_MSG_MOD(extack, 219 "Offload not supported when exceed action is not drop"); 220 return -EOPNOTSUPP; 221 } 222 223 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 224 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 225 NL_SET_ERR_MSG_MOD(extack, 226 "Offload not supported when conform action is not pipe or ok"); 227 return -EOPNOTSUPP; 228 } 229 230 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 231 !flow_action_is_last_entry(action, act)) { 232 NL_SET_ERR_MSG_MOD(extack, 233 "Offload not supported when conform action is ok, but action is not last"); 234 return -EOPNOTSUPP; 235 } 236 237 if (act->police.peakrate_bytes_ps || 238 act->police.avrate || act->police.overhead) { 239 NL_SET_ERR_MSG_MOD(extack, 240 "Offload not supported when peakrate/avrate/overhead is configured"); 241 return -EOPNOTSUPP; 242 } 243 244 return 0; 245 } 246 247 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 248 struct tc_cls_matchall_offload *cls) 249 { 250 struct netlink_ext_ack *extack = cls->common.extack; 251 struct flow_action *actions = &cls->rule->action; 252 struct flow_action_entry *entry; 253 int err; 254 255 err = otx2_tc_validate_flow(nic, actions, extack); 256 if (err) 257 return err; 258 259 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 260 NL_SET_ERR_MSG_MOD(extack, 261 "Only one Egress MATCHALL ratelimiter can be offloaded"); 262 return -ENOMEM; 263 } 264 265 entry = &cls->rule->action.entries[0]; 266 switch (entry->id) { 267 case FLOW_ACTION_POLICE: 268 err = otx2_policer_validate(&cls->rule->action, entry, extack); 269 if (err) 270 return err; 271 272 if (entry->police.rate_pkt_ps) { 273 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 274 return -EOPNOTSUPP; 275 } 276 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 277 otx2_convert_rate(entry->police.rate_bytes_ps)); 278 if (err) 279 return err; 280 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 281 break; 282 default: 283 NL_SET_ERR_MSG_MOD(extack, 284 "Only police action is supported with Egress MATCHALL offload"); 285 return -EOPNOTSUPP; 286 } 287 288 return 0; 289 } 290 291 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 292 struct tc_cls_matchall_offload *cls) 293 { 294 struct netlink_ext_ack *extack = cls->common.extack; 295 int err; 296 297 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 298 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 299 return -EINVAL; 300 } 301 302 err = otx2_set_matchall_egress_rate(nic, 0, 0); 303 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 304 return err; 305 } 306 307 static int otx2_tc_act_set_police(struct otx2_nic *nic, 308 struct otx2_tc_flow *node, 309 struct flow_cls_offload *f, 310 u64 rate, u32 burst, u32 mark, 311 struct npc_install_flow_req *req, bool pps) 312 { 313 struct netlink_ext_ack *extack = f->common.extack; 314 struct otx2_hw *hw = &nic->hw; 315 int rq_idx, rc; 316 317 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 318 if (rq_idx >= hw->rx_queues) { 319 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 320 return -EINVAL; 321 } 322 323 mutex_lock(&nic->mbox.lock); 324 325 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 326 if (rc) { 327 mutex_unlock(&nic->mbox.lock); 328 return rc; 329 } 330 331 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); 332 if (rc) 333 goto free_leaf; 334 335 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); 336 if (rc) 337 goto free_leaf; 338 339 mutex_unlock(&nic->mbox.lock); 340 341 req->match_id = mark & 0xFFFFULL; 342 req->index = rq_idx; 343 req->op = NIX_RX_ACTIONOP_UCAST; 344 set_bit(rq_idx, &nic->rq_bmap); 345 node->is_act_police = true; 346 node->rq = rq_idx; 347 348 return 0; 349 350 free_leaf: 351 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 352 netdev_err(nic->netdev, 353 "Unable to free leaf bandwidth profile(%d)\n", 354 node->leaf_profile); 355 mutex_unlock(&nic->mbox.lock); 356 return rc; 357 } 358 359 static int otx2_tc_parse_actions(struct otx2_nic *nic, 360 struct flow_action *flow_action, 361 struct npc_install_flow_req *req, 362 struct flow_cls_offload *f, 363 struct otx2_tc_flow *node) 364 { 365 struct netlink_ext_ack *extack = f->common.extack; 366 struct flow_action_entry *act; 367 struct net_device *target; 368 struct otx2_nic *priv; 369 u32 burst, mark = 0; 370 u8 nr_police = 0; 371 bool pps = false; 372 u64 rate; 373 int err; 374 int i; 375 376 if (!flow_action_has_entries(flow_action)) { 377 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 378 return -EINVAL; 379 } 380 381 flow_action_for_each(i, act, flow_action) { 382 switch (act->id) { 383 case FLOW_ACTION_DROP: 384 req->op = NIX_RX_ACTIONOP_DROP; 385 return 0; 386 case FLOW_ACTION_ACCEPT: 387 req->op = NIX_RX_ACTION_DEFAULT; 388 return 0; 389 case FLOW_ACTION_REDIRECT_INGRESS: 390 target = act->dev; 391 priv = netdev_priv(target); 392 /* npc_install_flow_req doesn't support passing a target pcifunc */ 393 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 394 NL_SET_ERR_MSG_MOD(extack, 395 "can't redirect to other pf/vf"); 396 return -EOPNOTSUPP; 397 } 398 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 399 req->op = NIX_RX_ACTION_DEFAULT; 400 return 0; 401 case FLOW_ACTION_VLAN_POP: 402 req->vtag0_valid = true; 403 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 404 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 405 break; 406 case FLOW_ACTION_POLICE: 407 /* Ingress ratelimiting is not supported on OcteonTx2 */ 408 if (is_dev_otx2(nic->pdev)) { 409 NL_SET_ERR_MSG_MOD(extack, 410 "Ingress policing not supported on this platform"); 411 return -EOPNOTSUPP; 412 } 413 414 err = otx2_policer_validate(flow_action, act, extack); 415 if (err) 416 return err; 417 418 if (act->police.rate_bytes_ps > 0) { 419 rate = act->police.rate_bytes_ps * 8; 420 burst = act->police.burst; 421 } else if (act->police.rate_pkt_ps > 0) { 422 /* The algorithm used to calculate rate 423 * mantissa, exponent values for a given token 424 * rate (token can be byte or packet) requires 425 * token rate to be mutiplied by 8. 426 */ 427 rate = act->police.rate_pkt_ps * 8; 428 burst = act->police.burst_pkt; 429 pps = true; 430 } 431 nr_police++; 432 break; 433 case FLOW_ACTION_MARK: 434 mark = act->mark; 435 break; 436 default: 437 return -EOPNOTSUPP; 438 } 439 } 440 441 if (nr_police > 1) { 442 NL_SET_ERR_MSG_MOD(extack, 443 "rate limit police offload requires a single action"); 444 return -EOPNOTSUPP; 445 } 446 447 if (nr_police) 448 return otx2_tc_act_set_police(nic, node, f, rate, burst, 449 mark, req, pps); 450 451 return 0; 452 } 453 454 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 455 struct flow_cls_offload *f, 456 struct npc_install_flow_req *req) 457 { 458 struct netlink_ext_ack *extack = f->common.extack; 459 struct flow_msg *flow_spec = &req->packet; 460 struct flow_msg *flow_mask = &req->mask; 461 struct flow_dissector *dissector; 462 struct flow_rule *rule; 463 u8 ip_proto = 0; 464 465 rule = flow_cls_offload_flow_rule(f); 466 dissector = rule->match.dissector; 467 468 if ((dissector->used_keys & 469 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 470 BIT(FLOW_DISSECTOR_KEY_BASIC) | 471 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 472 BIT(FLOW_DISSECTOR_KEY_VLAN) | 473 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 474 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 475 BIT(FLOW_DISSECTOR_KEY_PORTS) | 476 BIT(FLOW_DISSECTOR_KEY_IP)))) { 477 netdev_info(nic->netdev, "unsupported flow used key 0x%x", 478 dissector->used_keys); 479 return -EOPNOTSUPP; 480 } 481 482 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 483 struct flow_match_basic match; 484 485 flow_rule_match_basic(rule, &match); 486 487 /* All EtherTypes can be matched, no hw limitation */ 488 flow_spec->etype = match.key->n_proto; 489 flow_mask->etype = match.mask->n_proto; 490 req->features |= BIT_ULL(NPC_ETYPE); 491 492 if (match.mask->ip_proto && 493 (match.key->ip_proto != IPPROTO_TCP && 494 match.key->ip_proto != IPPROTO_UDP && 495 match.key->ip_proto != IPPROTO_SCTP && 496 match.key->ip_proto != IPPROTO_ICMP && 497 match.key->ip_proto != IPPROTO_ICMPV6)) { 498 netdev_info(nic->netdev, 499 "ip_proto=0x%x not supported\n", 500 match.key->ip_proto); 501 return -EOPNOTSUPP; 502 } 503 if (match.mask->ip_proto) 504 ip_proto = match.key->ip_proto; 505 506 if (ip_proto == IPPROTO_UDP) 507 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 508 else if (ip_proto == IPPROTO_TCP) 509 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 510 else if (ip_proto == IPPROTO_SCTP) 511 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 512 else if (ip_proto == IPPROTO_ICMP) 513 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 514 else if (ip_proto == IPPROTO_ICMPV6) 515 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 516 } 517 518 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 519 struct flow_match_control match; 520 521 flow_rule_match_control(rule, &match); 522 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 523 NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); 524 return -EOPNOTSUPP; 525 } 526 527 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 528 if (ntohs(flow_spec->etype) == ETH_P_IP) { 529 flow_spec->ip_flag = IPV4_FLAG_MORE; 530 flow_mask->ip_flag = IPV4_FLAG_MORE; 531 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 532 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 533 flow_spec->next_header = IPPROTO_FRAGMENT; 534 flow_mask->next_header = 0xff; 535 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 536 } else { 537 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 538 return -EOPNOTSUPP; 539 } 540 } 541 } 542 543 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 544 struct flow_match_eth_addrs match; 545 546 flow_rule_match_eth_addrs(rule, &match); 547 if (!is_zero_ether_addr(match.mask->src)) { 548 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 549 return -EOPNOTSUPP; 550 } 551 552 if (!is_zero_ether_addr(match.mask->dst)) { 553 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 554 ether_addr_copy(flow_mask->dmac, 555 (u8 *)&match.mask->dst); 556 req->features |= BIT_ULL(NPC_DMAC); 557 } 558 } 559 560 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 561 struct flow_match_ip match; 562 563 flow_rule_match_ip(rule, &match); 564 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 565 match.mask->tos) { 566 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 567 return -EOPNOTSUPP; 568 } 569 if (match.mask->ttl) { 570 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 571 return -EOPNOTSUPP; 572 } 573 flow_spec->tos = match.key->tos; 574 flow_mask->tos = match.mask->tos; 575 req->features |= BIT_ULL(NPC_TOS); 576 } 577 578 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 579 struct flow_match_vlan match; 580 u16 vlan_tci, vlan_tci_mask; 581 582 flow_rule_match_vlan(rule, &match); 583 584 if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { 585 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 586 ntohs(match.key->vlan_tpid)); 587 return -EOPNOTSUPP; 588 } 589 590 if (match.mask->vlan_id || 591 match.mask->vlan_dei || 592 match.mask->vlan_priority) { 593 vlan_tci = match.key->vlan_id | 594 match.key->vlan_dei << 12 | 595 match.key->vlan_priority << 13; 596 597 vlan_tci_mask = match.mask->vlan_id | 598 match.mask->vlan_dei << 12 | 599 match.mask->vlan_priority << 13; 600 601 flow_spec->vlan_tci = htons(vlan_tci); 602 flow_mask->vlan_tci = htons(vlan_tci_mask); 603 req->features |= BIT_ULL(NPC_OUTER_VID); 604 } 605 } 606 607 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 608 struct flow_match_ipv4_addrs match; 609 610 flow_rule_match_ipv4_addrs(rule, &match); 611 612 flow_spec->ip4dst = match.key->dst; 613 flow_mask->ip4dst = match.mask->dst; 614 req->features |= BIT_ULL(NPC_DIP_IPV4); 615 616 flow_spec->ip4src = match.key->src; 617 flow_mask->ip4src = match.mask->src; 618 req->features |= BIT_ULL(NPC_SIP_IPV4); 619 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 620 struct flow_match_ipv6_addrs match; 621 622 flow_rule_match_ipv6_addrs(rule, &match); 623 624 if (ipv6_addr_loopback(&match.key->dst) || 625 ipv6_addr_loopback(&match.key->src)) { 626 NL_SET_ERR_MSG_MOD(extack, 627 "Flow matching IPv6 loopback addr not supported"); 628 return -EOPNOTSUPP; 629 } 630 631 if (!ipv6_addr_any(&match.mask->dst)) { 632 memcpy(&flow_spec->ip6dst, 633 (struct in6_addr *)&match.key->dst, 634 sizeof(flow_spec->ip6dst)); 635 memcpy(&flow_mask->ip6dst, 636 (struct in6_addr *)&match.mask->dst, 637 sizeof(flow_spec->ip6dst)); 638 req->features |= BIT_ULL(NPC_DIP_IPV6); 639 } 640 641 if (!ipv6_addr_any(&match.mask->src)) { 642 memcpy(&flow_spec->ip6src, 643 (struct in6_addr *)&match.key->src, 644 sizeof(flow_spec->ip6src)); 645 memcpy(&flow_mask->ip6src, 646 (struct in6_addr *)&match.mask->src, 647 sizeof(flow_spec->ip6src)); 648 req->features |= BIT_ULL(NPC_SIP_IPV6); 649 } 650 } 651 652 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 653 struct flow_match_ports match; 654 655 flow_rule_match_ports(rule, &match); 656 657 flow_spec->dport = match.key->dst; 658 flow_mask->dport = match.mask->dst; 659 660 if (flow_mask->dport) { 661 if (ip_proto == IPPROTO_UDP) 662 req->features |= BIT_ULL(NPC_DPORT_UDP); 663 else if (ip_proto == IPPROTO_TCP) 664 req->features |= BIT_ULL(NPC_DPORT_TCP); 665 else if (ip_proto == IPPROTO_SCTP) 666 req->features |= BIT_ULL(NPC_DPORT_SCTP); 667 } 668 669 flow_spec->sport = match.key->src; 670 flow_mask->sport = match.mask->src; 671 672 if (flow_mask->sport) { 673 if (ip_proto == IPPROTO_UDP) 674 req->features |= BIT_ULL(NPC_SPORT_UDP); 675 else if (ip_proto == IPPROTO_TCP) 676 req->features |= BIT_ULL(NPC_SPORT_TCP); 677 else if (ip_proto == IPPROTO_SCTP) 678 req->features |= BIT_ULL(NPC_SPORT_SCTP); 679 } 680 } 681 682 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 683 } 684 685 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) 686 { 687 struct npc_delete_flow_req *req; 688 int err; 689 690 mutex_lock(&nic->mbox.lock); 691 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 692 if (!req) { 693 mutex_unlock(&nic->mbox.lock); 694 return -ENOMEM; 695 } 696 697 req->entry = entry; 698 699 /* Send message to AF */ 700 err = otx2_sync_mbox_msg(&nic->mbox); 701 if (err) { 702 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 703 entry); 704 mutex_unlock(&nic->mbox.lock); 705 return -EFAULT; 706 } 707 mutex_unlock(&nic->mbox.lock); 708 709 return 0; 710 } 711 712 static int otx2_tc_del_flow(struct otx2_nic *nic, 713 struct flow_cls_offload *tc_flow_cmd) 714 { 715 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 716 struct otx2_tc_info *tc_info = &nic->tc_info; 717 struct otx2_tc_flow *flow_node; 718 int err; 719 720 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 721 &tc_flow_cmd->cookie, 722 tc_info->flow_ht_params); 723 if (!flow_node) { 724 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 725 tc_flow_cmd->cookie); 726 return -EINVAL; 727 } 728 729 if (flow_node->is_act_police) { 730 mutex_lock(&nic->mbox.lock); 731 732 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 733 flow_node->leaf_profile, false); 734 if (err) 735 netdev_err(nic->netdev, 736 "Unmapping RQ %d & profile %d failed\n", 737 flow_node->rq, flow_node->leaf_profile); 738 739 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 740 if (err) 741 netdev_err(nic->netdev, 742 "Unable to free leaf bandwidth profile(%d)\n", 743 flow_node->leaf_profile); 744 745 __clear_bit(flow_node->rq, &nic->rq_bmap); 746 747 mutex_unlock(&nic->mbox.lock); 748 } 749 750 otx2_del_mcam_flow_entry(nic, flow_node->entry); 751 752 WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table, 753 &flow_node->node, 754 nic->tc_info.flow_ht_params)); 755 kfree_rcu(flow_node, rcu); 756 757 clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); 758 flow_cfg->nr_flows--; 759 760 return 0; 761 } 762 763 static int otx2_tc_add_flow(struct otx2_nic *nic, 764 struct flow_cls_offload *tc_flow_cmd) 765 { 766 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 767 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 768 struct otx2_tc_info *tc_info = &nic->tc_info; 769 struct otx2_tc_flow *new_node, *old_node; 770 struct npc_install_flow_req *req, dummy; 771 int rc, err; 772 773 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 774 return -ENOMEM; 775 776 if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) { 777 NL_SET_ERR_MSG_MOD(extack, 778 "Free MCAM entry not available to add the flow"); 779 return -ENOMEM; 780 } 781 782 /* allocate memory for the new flow and it's node */ 783 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 784 if (!new_node) 785 return -ENOMEM; 786 spin_lock_init(&new_node->lock); 787 new_node->cookie = tc_flow_cmd->cookie; 788 789 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 790 791 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 792 if (rc) { 793 kfree_rcu(new_node, rcu); 794 return rc; 795 } 796 797 /* If a flow exists with the same cookie, delete it */ 798 old_node = rhashtable_lookup_fast(&tc_info->flow_table, 799 &tc_flow_cmd->cookie, 800 tc_info->flow_ht_params); 801 if (old_node) 802 otx2_tc_del_flow(nic, tc_flow_cmd); 803 804 mutex_lock(&nic->mbox.lock); 805 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 806 if (!req) { 807 mutex_unlock(&nic->mbox.lock); 808 rc = -ENOMEM; 809 goto free_leaf; 810 } 811 812 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 813 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 814 815 new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, 816 flow_cfg->max_flows); 817 req->channel = nic->hw.rx_chan_base; 818 req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1]; 819 req->intf = NIX_INTF_RX; 820 req->set_cntr = 1; 821 new_node->entry = req->entry; 822 823 /* Send message to AF */ 824 rc = otx2_sync_mbox_msg(&nic->mbox); 825 if (rc) { 826 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 827 mutex_unlock(&nic->mbox.lock); 828 kfree_rcu(new_node, rcu); 829 goto free_leaf; 830 } 831 mutex_unlock(&nic->mbox.lock); 832 833 /* add new flow to flow-table */ 834 rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node, 835 nic->tc_info.flow_ht_params); 836 if (rc) { 837 otx2_del_mcam_flow_entry(nic, req->entry); 838 kfree_rcu(new_node, rcu); 839 goto free_leaf; 840 } 841 842 set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); 843 flow_cfg->nr_flows++; 844 845 return 0; 846 847 free_leaf: 848 if (new_node->is_act_police) { 849 mutex_lock(&nic->mbox.lock); 850 851 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 852 new_node->leaf_profile, false); 853 if (err) 854 netdev_err(nic->netdev, 855 "Unmapping RQ %d & profile %d failed\n", 856 new_node->rq, new_node->leaf_profile); 857 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 858 if (err) 859 netdev_err(nic->netdev, 860 "Unable to free leaf bandwidth profile(%d)\n", 861 new_node->leaf_profile); 862 863 __clear_bit(new_node->rq, &nic->rq_bmap); 864 865 mutex_unlock(&nic->mbox.lock); 866 } 867 868 return rc; 869 } 870 871 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 872 struct flow_cls_offload *tc_flow_cmd) 873 { 874 struct otx2_tc_info *tc_info = &nic->tc_info; 875 struct npc_mcam_get_stats_req *req; 876 struct npc_mcam_get_stats_rsp *rsp; 877 struct otx2_tc_flow_stats *stats; 878 struct otx2_tc_flow *flow_node; 879 int err; 880 881 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 882 &tc_flow_cmd->cookie, 883 tc_info->flow_ht_params); 884 if (!flow_node) { 885 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 886 tc_flow_cmd->cookie); 887 return -EINVAL; 888 } 889 890 mutex_lock(&nic->mbox.lock); 891 892 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 893 if (!req) { 894 mutex_unlock(&nic->mbox.lock); 895 return -ENOMEM; 896 } 897 898 req->entry = flow_node->entry; 899 900 err = otx2_sync_mbox_msg(&nic->mbox); 901 if (err) { 902 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 903 req->entry); 904 mutex_unlock(&nic->mbox.lock); 905 return -EFAULT; 906 } 907 908 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 909 (&nic->mbox.mbox, 0, &req->hdr); 910 if (IS_ERR(rsp)) { 911 mutex_unlock(&nic->mbox.lock); 912 return PTR_ERR(rsp); 913 } 914 915 mutex_unlock(&nic->mbox.lock); 916 917 if (!rsp->stat_ena) 918 return -EINVAL; 919 920 stats = &flow_node->stats; 921 922 spin_lock(&flow_node->lock); 923 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 924 FLOW_ACTION_HW_STATS_IMMEDIATE); 925 stats->pkts = rsp->stat; 926 spin_unlock(&flow_node->lock); 927 928 return 0; 929 } 930 931 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 932 struct flow_cls_offload *cls_flower) 933 { 934 switch (cls_flower->command) { 935 case FLOW_CLS_REPLACE: 936 return otx2_tc_add_flow(nic, cls_flower); 937 case FLOW_CLS_DESTROY: 938 return otx2_tc_del_flow(nic, cls_flower); 939 case FLOW_CLS_STATS: 940 return otx2_tc_get_flow_stats(nic, cls_flower); 941 default: 942 return -EOPNOTSUPP; 943 } 944 } 945 946 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 947 struct tc_cls_matchall_offload *cls) 948 { 949 struct netlink_ext_ack *extack = cls->common.extack; 950 struct flow_action *actions = &cls->rule->action; 951 struct flow_action_entry *entry; 952 u64 rate; 953 int err; 954 955 err = otx2_tc_validate_flow(nic, actions, extack); 956 if (err) 957 return err; 958 959 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 960 NL_SET_ERR_MSG_MOD(extack, 961 "Only one ingress MATCHALL ratelimitter can be offloaded"); 962 return -ENOMEM; 963 } 964 965 entry = &cls->rule->action.entries[0]; 966 switch (entry->id) { 967 case FLOW_ACTION_POLICE: 968 /* Ingress ratelimiting is not supported on OcteonTx2 */ 969 if (is_dev_otx2(nic->pdev)) { 970 NL_SET_ERR_MSG_MOD(extack, 971 "Ingress policing not supported on this platform"); 972 return -EOPNOTSUPP; 973 } 974 975 err = cn10k_alloc_matchall_ipolicer(nic); 976 if (err) 977 return err; 978 979 /* Convert to bits per second */ 980 rate = entry->police.rate_bytes_ps * 8; 981 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 982 if (err) 983 return err; 984 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 985 break; 986 default: 987 NL_SET_ERR_MSG_MOD(extack, 988 "Only police action supported with Ingress MATCHALL offload"); 989 return -EOPNOTSUPP; 990 } 991 992 return 0; 993 } 994 995 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 996 struct tc_cls_matchall_offload *cls) 997 { 998 struct netlink_ext_ack *extack = cls->common.extack; 999 int err; 1000 1001 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1002 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1003 return -EINVAL; 1004 } 1005 1006 err = cn10k_free_matchall_ipolicer(nic); 1007 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1008 return err; 1009 } 1010 1011 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1012 struct tc_cls_matchall_offload *cls_matchall) 1013 { 1014 switch (cls_matchall->command) { 1015 case TC_CLSMATCHALL_REPLACE: 1016 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1017 case TC_CLSMATCHALL_DESTROY: 1018 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1019 case TC_CLSMATCHALL_STATS: 1020 default: 1021 break; 1022 } 1023 1024 return -EOPNOTSUPP; 1025 } 1026 1027 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1028 void *type_data, void *cb_priv) 1029 { 1030 struct otx2_nic *nic = cb_priv; 1031 1032 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1033 return -EOPNOTSUPP; 1034 1035 switch (type) { 1036 case TC_SETUP_CLSFLOWER: 1037 return otx2_setup_tc_cls_flower(nic, type_data); 1038 case TC_SETUP_CLSMATCHALL: 1039 return otx2_setup_tc_ingress_matchall(nic, type_data); 1040 default: 1041 break; 1042 } 1043 1044 return -EOPNOTSUPP; 1045 } 1046 1047 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1048 struct tc_cls_matchall_offload *cls_matchall) 1049 { 1050 switch (cls_matchall->command) { 1051 case TC_CLSMATCHALL_REPLACE: 1052 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1053 case TC_CLSMATCHALL_DESTROY: 1054 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1055 case TC_CLSMATCHALL_STATS: 1056 default: 1057 break; 1058 } 1059 1060 return -EOPNOTSUPP; 1061 } 1062 1063 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1064 void *type_data, void *cb_priv) 1065 { 1066 struct otx2_nic *nic = cb_priv; 1067 1068 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1069 return -EOPNOTSUPP; 1070 1071 switch (type) { 1072 case TC_SETUP_CLSMATCHALL: 1073 return otx2_setup_tc_egress_matchall(nic, type_data); 1074 default: 1075 break; 1076 } 1077 1078 return -EOPNOTSUPP; 1079 } 1080 1081 static LIST_HEAD(otx2_block_cb_list); 1082 1083 static int otx2_setup_tc_block(struct net_device *netdev, 1084 struct flow_block_offload *f) 1085 { 1086 struct otx2_nic *nic = netdev_priv(netdev); 1087 flow_setup_cb_t *cb; 1088 bool ingress; 1089 1090 if (f->block_shared) 1091 return -EOPNOTSUPP; 1092 1093 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1094 cb = otx2_setup_tc_block_ingress_cb; 1095 ingress = true; 1096 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1097 cb = otx2_setup_tc_block_egress_cb; 1098 ingress = false; 1099 } else { 1100 return -EOPNOTSUPP; 1101 } 1102 1103 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1104 nic, nic, ingress); 1105 } 1106 1107 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1108 void *type_data) 1109 { 1110 switch (type) { 1111 case TC_SETUP_BLOCK: 1112 return otx2_setup_tc_block(netdev, type_data); 1113 case TC_SETUP_QDISC_HTB: 1114 return otx2_setup_tc_htb(netdev, type_data); 1115 default: 1116 return -EOPNOTSUPP; 1117 } 1118 } 1119 EXPORT_SYMBOL(otx2_setup_tc); 1120 1121 static const struct rhashtable_params tc_flow_ht_params = { 1122 .head_offset = offsetof(struct otx2_tc_flow, node), 1123 .key_offset = offsetof(struct otx2_tc_flow, cookie), 1124 .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie), 1125 .automatic_shrinking = true, 1126 }; 1127 1128 int otx2_init_tc(struct otx2_nic *nic) 1129 { 1130 struct otx2_tc_info *tc = &nic->tc_info; 1131 int err; 1132 1133 /* Exclude receive queue 0 being used for police action */ 1134 set_bit(0, &nic->rq_bmap); 1135 1136 if (!nic->flow_cfg) { 1137 netdev_err(nic->netdev, 1138 "Can't init TC, nic->flow_cfg is not setup\n"); 1139 return -EINVAL; 1140 } 1141 1142 err = otx2_tc_alloc_ent_bitmap(nic); 1143 if (err) 1144 return err; 1145 1146 tc->flow_ht_params = tc_flow_ht_params; 1147 err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params); 1148 if (err) { 1149 kfree(tc->tc_entries_bitmap); 1150 tc->tc_entries_bitmap = NULL; 1151 } 1152 return err; 1153 } 1154 EXPORT_SYMBOL(otx2_init_tc); 1155 1156 void otx2_shutdown_tc(struct otx2_nic *nic) 1157 { 1158 struct otx2_tc_info *tc = &nic->tc_info; 1159 1160 kfree(tc->tc_entries_bitmap); 1161 rhashtable_destroy(&tc->flow_table); 1162 } 1163 EXPORT_SYMBOL(otx2_shutdown_tc); 1164