1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 struct otx2_tc_flow_stats { 31 u64 bytes; 32 u64 pkts; 33 u64 used; 34 }; 35 36 struct otx2_tc_flow { 37 struct rhash_head node; 38 unsigned long cookie; 39 unsigned int bitpos; 40 struct rcu_head rcu; 41 struct otx2_tc_flow_stats stats; 42 spinlock_t lock; /* lock for stats */ 43 u16 rq; 44 u16 entry; 45 u16 leaf_profile; 46 bool is_act_police; 47 }; 48 49 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) 50 { 51 struct otx2_tc_info *tc = &nic->tc_info; 52 53 if (!nic->flow_cfg->max_flows) 54 return 0; 55 56 /* Max flows changed, free the existing bitmap */ 57 kfree(tc->tc_entries_bitmap); 58 59 tc->tc_entries_bitmap = 60 kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows), 61 sizeof(long), GFP_KERNEL); 62 if (!tc->tc_entries_bitmap) { 63 netdev_err(nic->netdev, 64 "Unable to alloc TC flow entries bitmap\n"); 65 return -ENOMEM; 66 } 67 68 return 0; 69 } 70 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); 71 72 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 73 u32 *burst_exp, u32 *burst_mantissa) 74 { 75 int max_burst, max_mantissa; 76 unsigned int tmp; 77 78 if (is_dev_otx2(nic->pdev)) { 79 max_burst = MAX_BURST_SIZE; 80 max_mantissa = MAX_BURST_MANTISSA; 81 } else { 82 max_burst = CN10K_MAX_BURST_SIZE; 83 max_mantissa = CN10K_MAX_BURST_MANTISSA; 84 } 85 86 /* Burst is calculated as 87 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 88 * Max supported burst size is 130,816 bytes. 89 */ 90 burst = min_t(u32, burst, max_burst); 91 if (burst) { 92 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 93 tmp = burst - rounddown_pow_of_two(burst); 94 if (burst < max_mantissa) 95 *burst_mantissa = tmp * 2; 96 else 97 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 98 } else { 99 *burst_exp = MAX_BURST_EXPONENT; 100 *burst_mantissa = max_mantissa; 101 } 102 } 103 104 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 105 u32 *mantissa, u32 *div_exp) 106 { 107 u64 tmp; 108 109 /* Rate calculation by hardware 110 * 111 * PIR_ADD = ((256 + mantissa) << exp) / 256 112 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 113 * The resultant rate is in Mbps. 114 */ 115 116 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 117 * Setting this to '0' will ease the calculation of 118 * exponent and mantissa. 119 */ 120 *div_exp = 0; 121 122 if (maxrate) { 123 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 124 tmp = maxrate - rounddown_pow_of_two(maxrate); 125 if (maxrate < MAX_RATE_MANTISSA) 126 *mantissa = tmp * 2; 127 else 128 *mantissa = tmp / (1ULL << (*exp - 7)); 129 } else { 130 /* Instead of disabling rate limiting, set all values to max */ 131 *exp = MAX_RATE_EXPONENT; 132 *mantissa = MAX_RATE_MANTISSA; 133 } 134 } 135 136 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 137 u64 maxrate, u32 burst) 138 { 139 u32 burst_exp, burst_mantissa; 140 u32 exp, mantissa, div_exp; 141 u64 regval = 0; 142 143 /* Get exponent and mantissa values from the desired rate */ 144 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 145 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 146 147 if (is_dev_otx2(nic->pdev)) { 148 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 149 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 150 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 151 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 152 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 153 } else { 154 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 155 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 156 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 157 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 158 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 159 } 160 161 return regval; 162 } 163 164 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 165 u32 burst, u64 maxrate) 166 { 167 struct otx2_hw *hw = &nic->hw; 168 struct nix_txschq_config *req; 169 int txschq, err; 170 171 /* All SQs share the same TL4, so pick the first scheduler */ 172 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 173 174 mutex_lock(&nic->mbox.lock); 175 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 176 if (!req) { 177 mutex_unlock(&nic->mbox.lock); 178 return -ENOMEM; 179 } 180 181 req->lvl = NIX_TXSCH_LVL_TL4; 182 req->num_regs = 1; 183 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 184 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 185 186 err = otx2_sync_mbox_msg(&nic->mbox); 187 mutex_unlock(&nic->mbox.lock); 188 return err; 189 } 190 191 static int otx2_tc_validate_flow(struct otx2_nic *nic, 192 struct flow_action *actions, 193 struct netlink_ext_ack *extack) 194 { 195 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 196 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 197 return -EINVAL; 198 } 199 200 if (!flow_action_has_entries(actions)) { 201 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 202 return -EINVAL; 203 } 204 205 if (!flow_offload_has_one_action(actions)) { 206 NL_SET_ERR_MSG_MOD(extack, 207 "Egress MATCHALL offload supports only 1 policing action"); 208 return -EINVAL; 209 } 210 return 0; 211 } 212 213 static int otx2_policer_validate(const struct flow_action *action, 214 const struct flow_action_entry *act, 215 struct netlink_ext_ack *extack) 216 { 217 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 218 NL_SET_ERR_MSG_MOD(extack, 219 "Offload not supported when exceed action is not drop"); 220 return -EOPNOTSUPP; 221 } 222 223 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 224 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 225 NL_SET_ERR_MSG_MOD(extack, 226 "Offload not supported when conform action is not pipe or ok"); 227 return -EOPNOTSUPP; 228 } 229 230 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 231 !flow_action_is_last_entry(action, act)) { 232 NL_SET_ERR_MSG_MOD(extack, 233 "Offload not supported when conform action is ok, but action is not last"); 234 return -EOPNOTSUPP; 235 } 236 237 if (act->police.peakrate_bytes_ps || 238 act->police.avrate || act->police.overhead) { 239 NL_SET_ERR_MSG_MOD(extack, 240 "Offload not supported when peakrate/avrate/overhead is configured"); 241 return -EOPNOTSUPP; 242 } 243 244 return 0; 245 } 246 247 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 248 struct tc_cls_matchall_offload *cls) 249 { 250 struct netlink_ext_ack *extack = cls->common.extack; 251 struct flow_action *actions = &cls->rule->action; 252 struct flow_action_entry *entry; 253 int err; 254 255 err = otx2_tc_validate_flow(nic, actions, extack); 256 if (err) 257 return err; 258 259 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 260 NL_SET_ERR_MSG_MOD(extack, 261 "Only one Egress MATCHALL ratelimiter can be offloaded"); 262 return -ENOMEM; 263 } 264 265 entry = &cls->rule->action.entries[0]; 266 switch (entry->id) { 267 case FLOW_ACTION_POLICE: 268 err = otx2_policer_validate(&cls->rule->action, entry, extack); 269 if (err) 270 return err; 271 272 if (entry->police.rate_pkt_ps) { 273 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 274 return -EOPNOTSUPP; 275 } 276 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 277 otx2_convert_rate(entry->police.rate_bytes_ps)); 278 if (err) 279 return err; 280 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 281 break; 282 default: 283 NL_SET_ERR_MSG_MOD(extack, 284 "Only police action is supported with Egress MATCHALL offload"); 285 return -EOPNOTSUPP; 286 } 287 288 return 0; 289 } 290 291 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 292 struct tc_cls_matchall_offload *cls) 293 { 294 struct netlink_ext_ack *extack = cls->common.extack; 295 int err; 296 297 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 298 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 299 return -EINVAL; 300 } 301 302 err = otx2_set_matchall_egress_rate(nic, 0, 0); 303 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 304 return err; 305 } 306 307 static int otx2_tc_act_set_police(struct otx2_nic *nic, 308 struct otx2_tc_flow *node, 309 struct flow_cls_offload *f, 310 u64 rate, u32 burst, u32 mark, 311 struct npc_install_flow_req *req, bool pps) 312 { 313 struct netlink_ext_ack *extack = f->common.extack; 314 struct otx2_hw *hw = &nic->hw; 315 int rq_idx, rc; 316 317 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 318 if (rq_idx >= hw->rx_queues) { 319 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 320 return -EINVAL; 321 } 322 323 mutex_lock(&nic->mbox.lock); 324 325 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 326 if (rc) { 327 mutex_unlock(&nic->mbox.lock); 328 return rc; 329 } 330 331 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps); 332 if (rc) 333 goto free_leaf; 334 335 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true); 336 if (rc) 337 goto free_leaf; 338 339 mutex_unlock(&nic->mbox.lock); 340 341 req->match_id = mark & 0xFFFFULL; 342 req->index = rq_idx; 343 req->op = NIX_RX_ACTIONOP_UCAST; 344 set_bit(rq_idx, &nic->rq_bmap); 345 node->is_act_police = true; 346 node->rq = rq_idx; 347 348 return 0; 349 350 free_leaf: 351 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 352 netdev_err(nic->netdev, 353 "Unable to free leaf bandwidth profile(%d)\n", 354 node->leaf_profile); 355 mutex_unlock(&nic->mbox.lock); 356 return rc; 357 } 358 359 static int otx2_tc_parse_actions(struct otx2_nic *nic, 360 struct flow_action *flow_action, 361 struct npc_install_flow_req *req, 362 struct flow_cls_offload *f, 363 struct otx2_tc_flow *node) 364 { 365 struct netlink_ext_ack *extack = f->common.extack; 366 struct flow_action_entry *act; 367 struct net_device *target; 368 struct otx2_nic *priv; 369 u32 burst, mark = 0; 370 u8 nr_police = 0; 371 bool pps = false; 372 u64 rate; 373 int err; 374 int i; 375 376 if (!flow_action_has_entries(flow_action)) { 377 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 378 return -EINVAL; 379 } 380 381 flow_action_for_each(i, act, flow_action) { 382 switch (act->id) { 383 case FLOW_ACTION_DROP: 384 req->op = NIX_RX_ACTIONOP_DROP; 385 return 0; 386 case FLOW_ACTION_ACCEPT: 387 req->op = NIX_RX_ACTION_DEFAULT; 388 return 0; 389 case FLOW_ACTION_REDIRECT_INGRESS: 390 target = act->dev; 391 priv = netdev_priv(target); 392 /* npc_install_flow_req doesn't support passing a target pcifunc */ 393 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 394 NL_SET_ERR_MSG_MOD(extack, 395 "can't redirect to other pf/vf"); 396 return -EOPNOTSUPP; 397 } 398 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 399 400 /* if op is already set; avoid overwriting the same */ 401 if (!req->op) 402 req->op = NIX_RX_ACTION_DEFAULT; 403 break; 404 405 case FLOW_ACTION_VLAN_POP: 406 req->vtag0_valid = true; 407 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 408 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 409 break; 410 case FLOW_ACTION_POLICE: 411 /* Ingress ratelimiting is not supported on OcteonTx2 */ 412 if (is_dev_otx2(nic->pdev)) { 413 NL_SET_ERR_MSG_MOD(extack, 414 "Ingress policing not supported on this platform"); 415 return -EOPNOTSUPP; 416 } 417 418 err = otx2_policer_validate(flow_action, act, extack); 419 if (err) 420 return err; 421 422 if (act->police.rate_bytes_ps > 0) { 423 rate = act->police.rate_bytes_ps * 8; 424 burst = act->police.burst; 425 } else if (act->police.rate_pkt_ps > 0) { 426 /* The algorithm used to calculate rate 427 * mantissa, exponent values for a given token 428 * rate (token can be byte or packet) requires 429 * token rate to be mutiplied by 8. 430 */ 431 rate = act->police.rate_pkt_ps * 8; 432 burst = act->police.burst_pkt; 433 pps = true; 434 } 435 nr_police++; 436 break; 437 case FLOW_ACTION_MARK: 438 mark = act->mark; 439 break; 440 441 case FLOW_ACTION_RX_QUEUE_MAPPING: 442 req->op = NIX_RX_ACTIONOP_UCAST; 443 req->index = act->rx_queue; 444 break; 445 446 default: 447 return -EOPNOTSUPP; 448 } 449 } 450 451 if (nr_police > 1) { 452 NL_SET_ERR_MSG_MOD(extack, 453 "rate limit police offload requires a single action"); 454 return -EOPNOTSUPP; 455 } 456 457 if (nr_police) 458 return otx2_tc_act_set_police(nic, node, f, rate, burst, 459 mark, req, pps); 460 461 return 0; 462 } 463 464 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 465 struct flow_cls_offload *f, 466 struct npc_install_flow_req *req) 467 { 468 struct netlink_ext_ack *extack = f->common.extack; 469 struct flow_msg *flow_spec = &req->packet; 470 struct flow_msg *flow_mask = &req->mask; 471 struct flow_dissector *dissector; 472 struct flow_rule *rule; 473 u8 ip_proto = 0; 474 475 rule = flow_cls_offload_flow_rule(f); 476 dissector = rule->match.dissector; 477 478 if ((dissector->used_keys & 479 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 480 BIT(FLOW_DISSECTOR_KEY_BASIC) | 481 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 482 BIT(FLOW_DISSECTOR_KEY_VLAN) | 483 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 484 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 485 BIT(FLOW_DISSECTOR_KEY_PORTS) | 486 BIT(FLOW_DISSECTOR_KEY_IP)))) { 487 netdev_info(nic->netdev, "unsupported flow used key 0x%x", 488 dissector->used_keys); 489 return -EOPNOTSUPP; 490 } 491 492 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 493 struct flow_match_basic match; 494 495 flow_rule_match_basic(rule, &match); 496 497 /* All EtherTypes can be matched, no hw limitation */ 498 flow_spec->etype = match.key->n_proto; 499 flow_mask->etype = match.mask->n_proto; 500 req->features |= BIT_ULL(NPC_ETYPE); 501 502 if (match.mask->ip_proto && 503 (match.key->ip_proto != IPPROTO_TCP && 504 match.key->ip_proto != IPPROTO_UDP && 505 match.key->ip_proto != IPPROTO_SCTP && 506 match.key->ip_proto != IPPROTO_ICMP && 507 match.key->ip_proto != IPPROTO_ICMPV6)) { 508 netdev_info(nic->netdev, 509 "ip_proto=0x%x not supported\n", 510 match.key->ip_proto); 511 return -EOPNOTSUPP; 512 } 513 if (match.mask->ip_proto) 514 ip_proto = match.key->ip_proto; 515 516 if (ip_proto == IPPROTO_UDP) 517 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 518 else if (ip_proto == IPPROTO_TCP) 519 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 520 else if (ip_proto == IPPROTO_SCTP) 521 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 522 else if (ip_proto == IPPROTO_ICMP) 523 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 524 else if (ip_proto == IPPROTO_ICMPV6) 525 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 526 } 527 528 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 529 struct flow_match_control match; 530 531 flow_rule_match_control(rule, &match); 532 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 533 NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); 534 return -EOPNOTSUPP; 535 } 536 537 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 538 if (ntohs(flow_spec->etype) == ETH_P_IP) { 539 flow_spec->ip_flag = IPV4_FLAG_MORE; 540 flow_mask->ip_flag = IPV4_FLAG_MORE; 541 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 542 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 543 flow_spec->next_header = IPPROTO_FRAGMENT; 544 flow_mask->next_header = 0xff; 545 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 546 } else { 547 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 548 return -EOPNOTSUPP; 549 } 550 } 551 } 552 553 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 554 struct flow_match_eth_addrs match; 555 556 flow_rule_match_eth_addrs(rule, &match); 557 if (!is_zero_ether_addr(match.mask->src)) { 558 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 559 return -EOPNOTSUPP; 560 } 561 562 if (!is_zero_ether_addr(match.mask->dst)) { 563 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 564 ether_addr_copy(flow_mask->dmac, 565 (u8 *)&match.mask->dst); 566 req->features |= BIT_ULL(NPC_DMAC); 567 } 568 } 569 570 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 571 struct flow_match_ip match; 572 573 flow_rule_match_ip(rule, &match); 574 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 575 match.mask->tos) { 576 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 577 return -EOPNOTSUPP; 578 } 579 if (match.mask->ttl) { 580 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 581 return -EOPNOTSUPP; 582 } 583 flow_spec->tos = match.key->tos; 584 flow_mask->tos = match.mask->tos; 585 req->features |= BIT_ULL(NPC_TOS); 586 } 587 588 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 589 struct flow_match_vlan match; 590 u16 vlan_tci, vlan_tci_mask; 591 592 flow_rule_match_vlan(rule, &match); 593 594 if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) { 595 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 596 ntohs(match.key->vlan_tpid)); 597 return -EOPNOTSUPP; 598 } 599 600 if (match.mask->vlan_id || 601 match.mask->vlan_dei || 602 match.mask->vlan_priority) { 603 vlan_tci = match.key->vlan_id | 604 match.key->vlan_dei << 12 | 605 match.key->vlan_priority << 13; 606 607 vlan_tci_mask = match.mask->vlan_id | 608 match.mask->vlan_dei << 12 | 609 match.mask->vlan_priority << 13; 610 611 flow_spec->vlan_tci = htons(vlan_tci); 612 flow_mask->vlan_tci = htons(vlan_tci_mask); 613 req->features |= BIT_ULL(NPC_OUTER_VID); 614 } 615 } 616 617 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 618 struct flow_match_ipv4_addrs match; 619 620 flow_rule_match_ipv4_addrs(rule, &match); 621 622 flow_spec->ip4dst = match.key->dst; 623 flow_mask->ip4dst = match.mask->dst; 624 req->features |= BIT_ULL(NPC_DIP_IPV4); 625 626 flow_spec->ip4src = match.key->src; 627 flow_mask->ip4src = match.mask->src; 628 req->features |= BIT_ULL(NPC_SIP_IPV4); 629 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 630 struct flow_match_ipv6_addrs match; 631 632 flow_rule_match_ipv6_addrs(rule, &match); 633 634 if (ipv6_addr_loopback(&match.key->dst) || 635 ipv6_addr_loopback(&match.key->src)) { 636 NL_SET_ERR_MSG_MOD(extack, 637 "Flow matching IPv6 loopback addr not supported"); 638 return -EOPNOTSUPP; 639 } 640 641 if (!ipv6_addr_any(&match.mask->dst)) { 642 memcpy(&flow_spec->ip6dst, 643 (struct in6_addr *)&match.key->dst, 644 sizeof(flow_spec->ip6dst)); 645 memcpy(&flow_mask->ip6dst, 646 (struct in6_addr *)&match.mask->dst, 647 sizeof(flow_spec->ip6dst)); 648 req->features |= BIT_ULL(NPC_DIP_IPV6); 649 } 650 651 if (!ipv6_addr_any(&match.mask->src)) { 652 memcpy(&flow_spec->ip6src, 653 (struct in6_addr *)&match.key->src, 654 sizeof(flow_spec->ip6src)); 655 memcpy(&flow_mask->ip6src, 656 (struct in6_addr *)&match.mask->src, 657 sizeof(flow_spec->ip6src)); 658 req->features |= BIT_ULL(NPC_SIP_IPV6); 659 } 660 } 661 662 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 663 struct flow_match_ports match; 664 665 flow_rule_match_ports(rule, &match); 666 667 flow_spec->dport = match.key->dst; 668 flow_mask->dport = match.mask->dst; 669 670 if (flow_mask->dport) { 671 if (ip_proto == IPPROTO_UDP) 672 req->features |= BIT_ULL(NPC_DPORT_UDP); 673 else if (ip_proto == IPPROTO_TCP) 674 req->features |= BIT_ULL(NPC_DPORT_TCP); 675 else if (ip_proto == IPPROTO_SCTP) 676 req->features |= BIT_ULL(NPC_DPORT_SCTP); 677 } 678 679 flow_spec->sport = match.key->src; 680 flow_mask->sport = match.mask->src; 681 682 if (flow_mask->sport) { 683 if (ip_proto == IPPROTO_UDP) 684 req->features |= BIT_ULL(NPC_SPORT_UDP); 685 else if (ip_proto == IPPROTO_TCP) 686 req->features |= BIT_ULL(NPC_SPORT_TCP); 687 else if (ip_proto == IPPROTO_SCTP) 688 req->features |= BIT_ULL(NPC_SPORT_SCTP); 689 } 690 } 691 692 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 693 } 694 695 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) 696 { 697 struct npc_delete_flow_req *req; 698 int err; 699 700 mutex_lock(&nic->mbox.lock); 701 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 702 if (!req) { 703 mutex_unlock(&nic->mbox.lock); 704 return -ENOMEM; 705 } 706 707 req->entry = entry; 708 709 /* Send message to AF */ 710 err = otx2_sync_mbox_msg(&nic->mbox); 711 if (err) { 712 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 713 entry); 714 mutex_unlock(&nic->mbox.lock); 715 return -EFAULT; 716 } 717 mutex_unlock(&nic->mbox.lock); 718 719 return 0; 720 } 721 722 static int otx2_tc_del_flow(struct otx2_nic *nic, 723 struct flow_cls_offload *tc_flow_cmd) 724 { 725 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 726 struct otx2_tc_info *tc_info = &nic->tc_info; 727 struct otx2_tc_flow *flow_node; 728 int err; 729 730 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 731 &tc_flow_cmd->cookie, 732 tc_info->flow_ht_params); 733 if (!flow_node) { 734 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 735 tc_flow_cmd->cookie); 736 return -EINVAL; 737 } 738 739 if (flow_node->is_act_police) { 740 mutex_lock(&nic->mbox.lock); 741 742 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 743 flow_node->leaf_profile, false); 744 if (err) 745 netdev_err(nic->netdev, 746 "Unmapping RQ %d & profile %d failed\n", 747 flow_node->rq, flow_node->leaf_profile); 748 749 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 750 if (err) 751 netdev_err(nic->netdev, 752 "Unable to free leaf bandwidth profile(%d)\n", 753 flow_node->leaf_profile); 754 755 __clear_bit(flow_node->rq, &nic->rq_bmap); 756 757 mutex_unlock(&nic->mbox.lock); 758 } 759 760 otx2_del_mcam_flow_entry(nic, flow_node->entry); 761 762 WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table, 763 &flow_node->node, 764 nic->tc_info.flow_ht_params)); 765 kfree_rcu(flow_node, rcu); 766 767 clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); 768 flow_cfg->nr_flows--; 769 770 return 0; 771 } 772 773 static int otx2_tc_add_flow(struct otx2_nic *nic, 774 struct flow_cls_offload *tc_flow_cmd) 775 { 776 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 777 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 778 struct otx2_tc_info *tc_info = &nic->tc_info; 779 struct otx2_tc_flow *new_node, *old_node; 780 struct npc_install_flow_req *req, dummy; 781 int rc, err; 782 783 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 784 return -ENOMEM; 785 786 if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) { 787 NL_SET_ERR_MSG_MOD(extack, 788 "Free MCAM entry not available to add the flow"); 789 return -ENOMEM; 790 } 791 792 /* allocate memory for the new flow and it's node */ 793 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 794 if (!new_node) 795 return -ENOMEM; 796 spin_lock_init(&new_node->lock); 797 new_node->cookie = tc_flow_cmd->cookie; 798 799 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 800 801 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 802 if (rc) { 803 kfree_rcu(new_node, rcu); 804 return rc; 805 } 806 807 /* If a flow exists with the same cookie, delete it */ 808 old_node = rhashtable_lookup_fast(&tc_info->flow_table, 809 &tc_flow_cmd->cookie, 810 tc_info->flow_ht_params); 811 if (old_node) 812 otx2_tc_del_flow(nic, tc_flow_cmd); 813 814 mutex_lock(&nic->mbox.lock); 815 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 816 if (!req) { 817 mutex_unlock(&nic->mbox.lock); 818 rc = -ENOMEM; 819 goto free_leaf; 820 } 821 822 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 823 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 824 825 new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, 826 flow_cfg->max_flows); 827 req->channel = nic->hw.rx_chan_base; 828 req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1]; 829 req->intf = NIX_INTF_RX; 830 req->set_cntr = 1; 831 new_node->entry = req->entry; 832 833 /* Send message to AF */ 834 rc = otx2_sync_mbox_msg(&nic->mbox); 835 if (rc) { 836 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 837 mutex_unlock(&nic->mbox.lock); 838 kfree_rcu(new_node, rcu); 839 goto free_leaf; 840 } 841 mutex_unlock(&nic->mbox.lock); 842 843 /* add new flow to flow-table */ 844 rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node, 845 nic->tc_info.flow_ht_params); 846 if (rc) { 847 otx2_del_mcam_flow_entry(nic, req->entry); 848 kfree_rcu(new_node, rcu); 849 goto free_leaf; 850 } 851 852 set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); 853 flow_cfg->nr_flows++; 854 855 return 0; 856 857 free_leaf: 858 if (new_node->is_act_police) { 859 mutex_lock(&nic->mbox.lock); 860 861 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 862 new_node->leaf_profile, false); 863 if (err) 864 netdev_err(nic->netdev, 865 "Unmapping RQ %d & profile %d failed\n", 866 new_node->rq, new_node->leaf_profile); 867 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 868 if (err) 869 netdev_err(nic->netdev, 870 "Unable to free leaf bandwidth profile(%d)\n", 871 new_node->leaf_profile); 872 873 __clear_bit(new_node->rq, &nic->rq_bmap); 874 875 mutex_unlock(&nic->mbox.lock); 876 } 877 878 return rc; 879 } 880 881 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 882 struct flow_cls_offload *tc_flow_cmd) 883 { 884 struct otx2_tc_info *tc_info = &nic->tc_info; 885 struct npc_mcam_get_stats_req *req; 886 struct npc_mcam_get_stats_rsp *rsp; 887 struct otx2_tc_flow_stats *stats; 888 struct otx2_tc_flow *flow_node; 889 int err; 890 891 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 892 &tc_flow_cmd->cookie, 893 tc_info->flow_ht_params); 894 if (!flow_node) { 895 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 896 tc_flow_cmd->cookie); 897 return -EINVAL; 898 } 899 900 mutex_lock(&nic->mbox.lock); 901 902 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 903 if (!req) { 904 mutex_unlock(&nic->mbox.lock); 905 return -ENOMEM; 906 } 907 908 req->entry = flow_node->entry; 909 910 err = otx2_sync_mbox_msg(&nic->mbox); 911 if (err) { 912 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 913 req->entry); 914 mutex_unlock(&nic->mbox.lock); 915 return -EFAULT; 916 } 917 918 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 919 (&nic->mbox.mbox, 0, &req->hdr); 920 if (IS_ERR(rsp)) { 921 mutex_unlock(&nic->mbox.lock); 922 return PTR_ERR(rsp); 923 } 924 925 mutex_unlock(&nic->mbox.lock); 926 927 if (!rsp->stat_ena) 928 return -EINVAL; 929 930 stats = &flow_node->stats; 931 932 spin_lock(&flow_node->lock); 933 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 934 FLOW_ACTION_HW_STATS_IMMEDIATE); 935 stats->pkts = rsp->stat; 936 spin_unlock(&flow_node->lock); 937 938 return 0; 939 } 940 941 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 942 struct flow_cls_offload *cls_flower) 943 { 944 switch (cls_flower->command) { 945 case FLOW_CLS_REPLACE: 946 return otx2_tc_add_flow(nic, cls_flower); 947 case FLOW_CLS_DESTROY: 948 return otx2_tc_del_flow(nic, cls_flower); 949 case FLOW_CLS_STATS: 950 return otx2_tc_get_flow_stats(nic, cls_flower); 951 default: 952 return -EOPNOTSUPP; 953 } 954 } 955 956 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 957 struct tc_cls_matchall_offload *cls) 958 { 959 struct netlink_ext_ack *extack = cls->common.extack; 960 struct flow_action *actions = &cls->rule->action; 961 struct flow_action_entry *entry; 962 u64 rate; 963 int err; 964 965 err = otx2_tc_validate_flow(nic, actions, extack); 966 if (err) 967 return err; 968 969 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 970 NL_SET_ERR_MSG_MOD(extack, 971 "Only one ingress MATCHALL ratelimitter can be offloaded"); 972 return -ENOMEM; 973 } 974 975 entry = &cls->rule->action.entries[0]; 976 switch (entry->id) { 977 case FLOW_ACTION_POLICE: 978 /* Ingress ratelimiting is not supported on OcteonTx2 */ 979 if (is_dev_otx2(nic->pdev)) { 980 NL_SET_ERR_MSG_MOD(extack, 981 "Ingress policing not supported on this platform"); 982 return -EOPNOTSUPP; 983 } 984 985 err = cn10k_alloc_matchall_ipolicer(nic); 986 if (err) 987 return err; 988 989 /* Convert to bits per second */ 990 rate = entry->police.rate_bytes_ps * 8; 991 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 992 if (err) 993 return err; 994 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 995 break; 996 default: 997 NL_SET_ERR_MSG_MOD(extack, 998 "Only police action supported with Ingress MATCHALL offload"); 999 return -EOPNOTSUPP; 1000 } 1001 1002 return 0; 1003 } 1004 1005 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1006 struct tc_cls_matchall_offload *cls) 1007 { 1008 struct netlink_ext_ack *extack = cls->common.extack; 1009 int err; 1010 1011 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1012 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1013 return -EINVAL; 1014 } 1015 1016 err = cn10k_free_matchall_ipolicer(nic); 1017 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1018 return err; 1019 } 1020 1021 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1022 struct tc_cls_matchall_offload *cls_matchall) 1023 { 1024 switch (cls_matchall->command) { 1025 case TC_CLSMATCHALL_REPLACE: 1026 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1027 case TC_CLSMATCHALL_DESTROY: 1028 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1029 case TC_CLSMATCHALL_STATS: 1030 default: 1031 break; 1032 } 1033 1034 return -EOPNOTSUPP; 1035 } 1036 1037 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1038 void *type_data, void *cb_priv) 1039 { 1040 struct otx2_nic *nic = cb_priv; 1041 1042 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1043 return -EOPNOTSUPP; 1044 1045 switch (type) { 1046 case TC_SETUP_CLSFLOWER: 1047 return otx2_setup_tc_cls_flower(nic, type_data); 1048 case TC_SETUP_CLSMATCHALL: 1049 return otx2_setup_tc_ingress_matchall(nic, type_data); 1050 default: 1051 break; 1052 } 1053 1054 return -EOPNOTSUPP; 1055 } 1056 1057 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1058 struct tc_cls_matchall_offload *cls_matchall) 1059 { 1060 switch (cls_matchall->command) { 1061 case TC_CLSMATCHALL_REPLACE: 1062 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1063 case TC_CLSMATCHALL_DESTROY: 1064 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1065 case TC_CLSMATCHALL_STATS: 1066 default: 1067 break; 1068 } 1069 1070 return -EOPNOTSUPP; 1071 } 1072 1073 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1074 void *type_data, void *cb_priv) 1075 { 1076 struct otx2_nic *nic = cb_priv; 1077 1078 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1079 return -EOPNOTSUPP; 1080 1081 switch (type) { 1082 case TC_SETUP_CLSMATCHALL: 1083 return otx2_setup_tc_egress_matchall(nic, type_data); 1084 default: 1085 break; 1086 } 1087 1088 return -EOPNOTSUPP; 1089 } 1090 1091 static LIST_HEAD(otx2_block_cb_list); 1092 1093 static int otx2_setup_tc_block(struct net_device *netdev, 1094 struct flow_block_offload *f) 1095 { 1096 struct otx2_nic *nic = netdev_priv(netdev); 1097 flow_setup_cb_t *cb; 1098 bool ingress; 1099 1100 if (f->block_shared) 1101 return -EOPNOTSUPP; 1102 1103 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1104 cb = otx2_setup_tc_block_ingress_cb; 1105 ingress = true; 1106 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1107 cb = otx2_setup_tc_block_egress_cb; 1108 ingress = false; 1109 } else { 1110 return -EOPNOTSUPP; 1111 } 1112 1113 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1114 nic, nic, ingress); 1115 } 1116 1117 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1118 void *type_data) 1119 { 1120 switch (type) { 1121 case TC_SETUP_BLOCK: 1122 return otx2_setup_tc_block(netdev, type_data); 1123 case TC_SETUP_QDISC_HTB: 1124 return otx2_setup_tc_htb(netdev, type_data); 1125 default: 1126 return -EOPNOTSUPP; 1127 } 1128 } 1129 EXPORT_SYMBOL(otx2_setup_tc); 1130 1131 static const struct rhashtable_params tc_flow_ht_params = { 1132 .head_offset = offsetof(struct otx2_tc_flow, node), 1133 .key_offset = offsetof(struct otx2_tc_flow, cookie), 1134 .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie), 1135 .automatic_shrinking = true, 1136 }; 1137 1138 int otx2_init_tc(struct otx2_nic *nic) 1139 { 1140 struct otx2_tc_info *tc = &nic->tc_info; 1141 int err; 1142 1143 /* Exclude receive queue 0 being used for police action */ 1144 set_bit(0, &nic->rq_bmap); 1145 1146 if (!nic->flow_cfg) { 1147 netdev_err(nic->netdev, 1148 "Can't init TC, nic->flow_cfg is not setup\n"); 1149 return -EINVAL; 1150 } 1151 1152 err = otx2_tc_alloc_ent_bitmap(nic); 1153 if (err) 1154 return err; 1155 1156 tc->flow_ht_params = tc_flow_ht_params; 1157 err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params); 1158 if (err) { 1159 kfree(tc->tc_entries_bitmap); 1160 tc->tc_entries_bitmap = NULL; 1161 } 1162 return err; 1163 } 1164 EXPORT_SYMBOL(otx2_init_tc); 1165 1166 void otx2_shutdown_tc(struct otx2_nic *nic) 1167 { 1168 struct otx2_tc_info *tc = &nic->tc_info; 1169 1170 kfree(tc->tc_entries_bitmap); 1171 rhashtable_destroy(&tc->flow_table); 1172 } 1173 EXPORT_SYMBOL(otx2_shutdown_tc); 1174