1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2019 Netronome Systems, Inc. */ 3 4 #include <linux/hash.h> 5 #include <linux/hashtable.h> 6 #include <linux/jhash.h> 7 #include <linux/math64.h> 8 #include <linux/vmalloc.h> 9 #include <net/pkt_cls.h> 10 #include <net/pkt_sched.h> 11 12 #include "cmsg.h" 13 #include "main.h" 14 #include "../nfp_port.h" 15 16 #define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000) 17 #define NFP_FL_QOS_PPS BIT(15) 18 #define NFP_FL_QOS_METER BIT(10) 19 20 struct nfp_police_cfg_head { 21 __be32 flags_opts; 22 union { 23 __be32 meter_id; 24 __be32 port; 25 }; 26 }; 27 28 enum NFP_FL_QOS_TYPES { 29 NFP_FL_QOS_TYPE_BPS, 30 NFP_FL_QOS_TYPE_PPS, 31 NFP_FL_QOS_TYPE_MAX, 32 }; 33 34 /* Police cmsg for configuring a trTCM traffic conditioner (8W/32B) 35 * See RFC 2698 for more details. 36 * ---------------------------------------------------------------- 37 * 3 2 1 38 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 39 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 40 * | Reserved |p| Reserved | 41 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 42 * | Port Ingress | 43 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 44 * | Token Bucket Peak | 45 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 46 * | Token Bucket Committed | 47 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 48 * | Peak Burst Size | 49 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 50 * | Committed Burst Size | 51 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 52 * | Peak Information Rate | 53 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 54 * | Committed Information Rate | 55 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 56 * Word[0](FLag options): 57 * [15] p(pps) 1 for pps, 0 for bps 58 * 59 * Meter control message 60 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 61 * +-------------------------------+-+---+-----+-+---------+-+---+-+ 62 * | Reserved |p| Y |TYPE |E|TSHFV |P| PC|R| 63 * +-------------------------------+-+---+-----+-+---------+-+---+-+ 64 * | meter ID | 65 * +-------------------------------+-------------------------------+ 66 * 67 */ 68 struct nfp_police_config { 69 struct nfp_police_cfg_head head; 70 __be32 bkt_tkn_p; 71 __be32 bkt_tkn_c; 72 __be32 pbs; 73 __be32 cbs; 74 __be32 pir; 75 __be32 cir; 76 }; 77 78 struct nfp_police_stats_reply { 79 struct nfp_police_cfg_head head; 80 __be64 pass_bytes; 81 __be64 pass_pkts; 82 __be64 drop_bytes; 83 __be64 drop_pkts; 84 }; 85 86 int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress, 87 bool pps, u32 id, u32 rate, u32 burst) 88 { 89 struct nfp_police_config *config; 90 struct sk_buff *skb; 91 92 skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config), 93 NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL); 94 if (!skb) 95 return -ENOMEM; 96 97 config = nfp_flower_cmsg_get_data(skb); 98 memset(config, 0, sizeof(struct nfp_police_config)); 99 if (pps) 100 config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS); 101 if (!ingress) 102 config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_METER); 103 104 if (ingress) 105 config->head.port = cpu_to_be32(id); 106 else 107 config->head.meter_id = cpu_to_be32(id); 108 109 config->bkt_tkn_p = cpu_to_be32(burst); 110 config->bkt_tkn_c = cpu_to_be32(burst); 111 config->pbs = cpu_to_be32(burst); 112 config->cbs = cpu_to_be32(burst); 113 config->pir = cpu_to_be32(rate); 114 config->cir = cpu_to_be32(rate); 115 nfp_ctrl_tx(app->ctrl, skb); 116 117 return 0; 118 } 119 120 static int nfp_policer_validate(const struct flow_action *action, 121 const struct flow_action_entry *act, 122 struct netlink_ext_ack *extack, 123 bool ingress) 124 { 125 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 126 NL_SET_ERR_MSG_MOD(extack, 127 "Offload not supported when exceed action is not drop"); 128 return -EOPNOTSUPP; 129 } 130 131 if (ingress) { 132 if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE && 133 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 134 NL_SET_ERR_MSG_MOD(extack, 135 "Offload not supported when conform action is not continue or ok"); 136 return -EOPNOTSUPP; 137 } 138 } else { 139 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 140 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 141 NL_SET_ERR_MSG_MOD(extack, 142 "Offload not supported when conform action is not pipe or ok"); 143 return -EOPNOTSUPP; 144 } 145 } 146 147 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 148 !flow_action_is_last_entry(action, act)) { 149 NL_SET_ERR_MSG_MOD(extack, 150 "Offload not supported when conform action is ok, but action is not last"); 151 return -EOPNOTSUPP; 152 } 153 154 if (act->police.peakrate_bytes_ps || 155 act->police.avrate || act->police.overhead) { 156 NL_SET_ERR_MSG_MOD(extack, 157 "Offload not supported when peakrate/avrate/overhead is configured"); 158 return -EOPNOTSUPP; 159 } 160 161 return 0; 162 } 163 164 static int 165 nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, 166 struct tc_cls_matchall_offload *flow, 167 struct netlink_ext_ack *extack) 168 { 169 struct flow_action_entry *paction = &flow->rule->action.entries[0]; 170 u32 action_num = flow->rule->action.num_entries; 171 struct nfp_flower_priv *fl_priv = app->priv; 172 struct flow_action_entry *action = NULL; 173 struct nfp_flower_repr_priv *repr_priv; 174 u32 netdev_port_id, i; 175 struct nfp_repr *repr; 176 bool pps_support; 177 u32 bps_num = 0; 178 u32 pps_num = 0; 179 u32 burst; 180 bool pps; 181 u64 rate; 182 int err; 183 184 if (!nfp_netdev_is_nfp_repr(netdev)) { 185 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); 186 return -EOPNOTSUPP; 187 } 188 repr = netdev_priv(netdev); 189 repr_priv = repr->app_priv; 190 netdev_port_id = nfp_repr_get_port_id(netdev); 191 pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS); 192 193 if (repr_priv->block_shared) { 194 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks"); 195 return -EOPNOTSUPP; 196 } 197 198 if (repr->port->type != NFP_PORT_VF_PORT) { 199 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports"); 200 return -EOPNOTSUPP; 201 } 202 203 if (pps_support) { 204 if (action_num > 2 || action_num == 0) { 205 NL_SET_ERR_MSG_MOD(extack, 206 "unsupported offload: qos rate limit offload only support action number 1 or 2"); 207 return -EOPNOTSUPP; 208 } 209 } else { 210 if (!flow_offload_has_one_action(&flow->rule->action)) { 211 NL_SET_ERR_MSG_MOD(extack, 212 "unsupported offload: qos rate limit offload requires a single action"); 213 return -EOPNOTSUPP; 214 } 215 } 216 217 if (flow->common.prio != 1) { 218 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority"); 219 return -EOPNOTSUPP; 220 } 221 222 for (i = 0 ; i < action_num; i++) { 223 action = paction + i; 224 if (action->id != FLOW_ACTION_POLICE) { 225 NL_SET_ERR_MSG_MOD(extack, 226 "unsupported offload: qos rate limit offload requires police action"); 227 return -EOPNOTSUPP; 228 } 229 230 err = nfp_policer_validate(&flow->rule->action, action, extack, true); 231 if (err) 232 return err; 233 234 if (action->police.rate_bytes_ps > 0) { 235 if (bps_num++) { 236 NL_SET_ERR_MSG_MOD(extack, 237 "unsupported offload: qos rate limit offload only support one BPS action"); 238 return -EOPNOTSUPP; 239 } 240 } 241 if (action->police.rate_pkt_ps > 0) { 242 if (!pps_support) { 243 NL_SET_ERR_MSG_MOD(extack, 244 "unsupported offload: FW does not support PPS action"); 245 return -EOPNOTSUPP; 246 } 247 if (pps_num++) { 248 NL_SET_ERR_MSG_MOD(extack, 249 "unsupported offload: qos rate limit offload only support one PPS action"); 250 return -EOPNOTSUPP; 251 } 252 } 253 } 254 255 for (i = 0 ; i < action_num; i++) { 256 /* Set QoS data for this interface */ 257 action = paction + i; 258 if (action->police.rate_bytes_ps > 0) { 259 rate = action->police.rate_bytes_ps; 260 burst = action->police.burst; 261 } else if (action->police.rate_pkt_ps > 0) { 262 rate = action->police.rate_pkt_ps; 263 burst = action->police.burst_pkt; 264 } else { 265 NL_SET_ERR_MSG_MOD(extack, 266 "unsupported offload: qos rate limit is not BPS or PPS"); 267 continue; 268 } 269 270 if (rate != 0) { 271 pps = false; 272 if (action->police.rate_pkt_ps > 0) 273 pps = true; 274 nfp_flower_offload_one_police(repr->app, true, 275 pps, netdev_port_id, 276 rate, burst); 277 } 278 } 279 repr_priv->qos_table.netdev_port_id = netdev_port_id; 280 fl_priv->qos_rate_limiters++; 281 if (fl_priv->qos_rate_limiters == 1) 282 schedule_delayed_work(&fl_priv->qos_stats_work, 283 NFP_FL_QOS_UPDATE); 284 285 return 0; 286 } 287 288 static int 289 nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev, 290 struct tc_cls_matchall_offload *flow, 291 struct netlink_ext_ack *extack) 292 { 293 struct nfp_flower_priv *fl_priv = app->priv; 294 struct nfp_flower_repr_priv *repr_priv; 295 struct nfp_police_config *config; 296 u32 netdev_port_id, i; 297 struct nfp_repr *repr; 298 struct sk_buff *skb; 299 bool pps_support; 300 301 if (!nfp_netdev_is_nfp_repr(netdev)) { 302 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); 303 return -EOPNOTSUPP; 304 } 305 repr = netdev_priv(netdev); 306 307 netdev_port_id = nfp_repr_get_port_id(netdev); 308 repr_priv = repr->app_priv; 309 pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS); 310 311 if (!repr_priv->qos_table.netdev_port_id) { 312 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist"); 313 return -EOPNOTSUPP; 314 } 315 316 memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos)); 317 fl_priv->qos_rate_limiters--; 318 if (!fl_priv->qos_rate_limiters) 319 cancel_delayed_work_sync(&fl_priv->qos_stats_work); 320 for (i = 0 ; i < NFP_FL_QOS_TYPE_MAX; i++) { 321 if (i == NFP_FL_QOS_TYPE_PPS && !pps_support) 322 break; 323 /* 0:bps 1:pps 324 * Clear QoS data for this interface. 325 * There is no need to check if a specific QOS_TYPE was 326 * configured as the firmware handles clearing a QoS entry 327 * safely, even if it wasn't explicitly added. 328 */ 329 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), 330 NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL); 331 if (!skb) 332 return -ENOMEM; 333 334 config = nfp_flower_cmsg_get_data(skb); 335 memset(config, 0, sizeof(struct nfp_police_config)); 336 if (i == NFP_FL_QOS_TYPE_PPS) 337 config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS); 338 config->head.port = cpu_to_be32(netdev_port_id); 339 nfp_ctrl_tx(repr->app->ctrl, skb); 340 } 341 342 return 0; 343 } 344 345 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb) 346 { 347 struct nfp_flower_priv *fl_priv = app->priv; 348 struct nfp_flower_repr_priv *repr_priv; 349 struct nfp_police_stats_reply *msg; 350 struct nfp_stat_pair *curr_stats; 351 struct nfp_stat_pair *prev_stats; 352 struct net_device *netdev; 353 struct nfp_repr *repr; 354 u32 netdev_port_id; 355 356 msg = nfp_flower_cmsg_get_data(skb); 357 if (be32_to_cpu(msg->head.flags_opts) & NFP_FL_QOS_METER) 358 return nfp_act_stats_reply(app, msg); 359 360 netdev_port_id = be32_to_cpu(msg->head.port); 361 rcu_read_lock(); 362 netdev = nfp_app_dev_get(app, netdev_port_id, NULL); 363 if (!netdev) 364 goto exit_unlock_rcu; 365 366 repr = netdev_priv(netdev); 367 repr_priv = repr->app_priv; 368 curr_stats = &repr_priv->qos_table.curr_stats; 369 prev_stats = &repr_priv->qos_table.prev_stats; 370 371 spin_lock_bh(&fl_priv->qos_stats_lock); 372 curr_stats->pkts = be64_to_cpu(msg->pass_pkts) + 373 be64_to_cpu(msg->drop_pkts); 374 curr_stats->bytes = be64_to_cpu(msg->pass_bytes) + 375 be64_to_cpu(msg->drop_bytes); 376 377 if (!repr_priv->qos_table.last_update) { 378 prev_stats->pkts = curr_stats->pkts; 379 prev_stats->bytes = curr_stats->bytes; 380 } 381 382 repr_priv->qos_table.last_update = jiffies; 383 spin_unlock_bh(&fl_priv->qos_stats_lock); 384 385 exit_unlock_rcu: 386 rcu_read_unlock(); 387 } 388 389 static void 390 nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv, 391 u32 id, bool ingress) 392 { 393 struct nfp_police_cfg_head *head; 394 struct sk_buff *skb; 395 396 skb = nfp_flower_cmsg_alloc(fl_priv->app, 397 sizeof(struct nfp_police_cfg_head), 398 NFP_FLOWER_CMSG_TYPE_QOS_STATS, 399 GFP_ATOMIC); 400 if (!skb) 401 return; 402 head = nfp_flower_cmsg_get_data(skb); 403 404 memset(head, 0, sizeof(struct nfp_police_cfg_head)); 405 if (ingress) { 406 head->port = cpu_to_be32(id); 407 } else { 408 head->flags_opts = cpu_to_be32(NFP_FL_QOS_METER); 409 head->meter_id = cpu_to_be32(id); 410 } 411 412 nfp_ctrl_tx(fl_priv->app->ctrl, skb); 413 } 414 415 static void 416 nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv) 417 { 418 struct nfp_reprs *repr_set; 419 int i; 420 421 rcu_read_lock(); 422 repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]); 423 if (!repr_set) 424 goto exit_unlock_rcu; 425 426 for (i = 0; i < repr_set->num_reprs; i++) { 427 struct net_device *netdev; 428 429 netdev = rcu_dereference(repr_set->reprs[i]); 430 if (netdev) { 431 struct nfp_repr *priv = netdev_priv(netdev); 432 struct nfp_flower_repr_priv *repr_priv; 433 u32 netdev_port_id; 434 435 repr_priv = priv->app_priv; 436 netdev_port_id = repr_priv->qos_table.netdev_port_id; 437 if (!netdev_port_id) 438 continue; 439 440 nfp_flower_stats_rlim_request(fl_priv, 441 netdev_port_id, true); 442 } 443 } 444 445 exit_unlock_rcu: 446 rcu_read_unlock(); 447 } 448 449 static void update_stats_cache(struct work_struct *work) 450 { 451 struct delayed_work *delayed_work; 452 struct nfp_flower_priv *fl_priv; 453 454 delayed_work = to_delayed_work(work); 455 fl_priv = container_of(delayed_work, struct nfp_flower_priv, 456 qos_stats_work); 457 458 nfp_flower_stats_rlim_request_all(fl_priv); 459 nfp_flower_stats_meter_request_all(fl_priv); 460 461 schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE); 462 } 463 464 static int 465 nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev, 466 struct tc_cls_matchall_offload *flow, 467 struct netlink_ext_ack *extack) 468 { 469 struct nfp_flower_priv *fl_priv = app->priv; 470 struct nfp_flower_repr_priv *repr_priv; 471 struct nfp_stat_pair *curr_stats; 472 struct nfp_stat_pair *prev_stats; 473 u64 diff_bytes, diff_pkts; 474 struct nfp_repr *repr; 475 476 if (!nfp_netdev_is_nfp_repr(netdev)) { 477 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); 478 return -EOPNOTSUPP; 479 } 480 repr = netdev_priv(netdev); 481 482 repr_priv = repr->app_priv; 483 if (!repr_priv->qos_table.netdev_port_id) { 484 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update"); 485 return -EOPNOTSUPP; 486 } 487 488 spin_lock_bh(&fl_priv->qos_stats_lock); 489 curr_stats = &repr_priv->qos_table.curr_stats; 490 prev_stats = &repr_priv->qos_table.prev_stats; 491 diff_pkts = curr_stats->pkts - prev_stats->pkts; 492 diff_bytes = curr_stats->bytes - prev_stats->bytes; 493 prev_stats->pkts = curr_stats->pkts; 494 prev_stats->bytes = curr_stats->bytes; 495 spin_unlock_bh(&fl_priv->qos_stats_lock); 496 497 flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0, 498 repr_priv->qos_table.last_update, 499 FLOW_ACTION_HW_STATS_DELAYED); 500 return 0; 501 } 502 503 void nfp_flower_qos_init(struct nfp_app *app) 504 { 505 struct nfp_flower_priv *fl_priv = app->priv; 506 507 spin_lock_init(&fl_priv->qos_stats_lock); 508 mutex_init(&fl_priv->meter_stats_lock); 509 nfp_init_meter_table(app); 510 511 INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache); 512 } 513 514 void nfp_flower_qos_cleanup(struct nfp_app *app) 515 { 516 struct nfp_flower_priv *fl_priv = app->priv; 517 518 cancel_delayed_work_sync(&fl_priv->qos_stats_work); 519 } 520 521 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, 522 struct tc_cls_matchall_offload *flow) 523 { 524 struct netlink_ext_ack *extack = flow->common.extack; 525 struct nfp_flower_priv *fl_priv = app->priv; 526 527 if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) { 528 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload"); 529 return -EOPNOTSUPP; 530 } 531 532 switch (flow->command) { 533 case TC_CLSMATCHALL_REPLACE: 534 return nfp_flower_install_rate_limiter(app, netdev, flow, 535 extack); 536 case TC_CLSMATCHALL_DESTROY: 537 return nfp_flower_remove_rate_limiter(app, netdev, flow, 538 extack); 539 case TC_CLSMATCHALL_STATS: 540 return nfp_flower_stats_rate_limiter(app, netdev, flow, 541 extack); 542 default: 543 return -EOPNOTSUPP; 544 } 545 } 546 547 /* Offload tc action, currently only for tc police */ 548 549 static const struct rhashtable_params stats_meter_table_params = { 550 .key_offset = offsetof(struct nfp_meter_entry, meter_id), 551 .head_offset = offsetof(struct nfp_meter_entry, ht_node), 552 .key_len = sizeof(u32), 553 }; 554 555 struct nfp_meter_entry * 556 nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id) 557 { 558 struct nfp_flower_priv *priv = app->priv; 559 560 return rhashtable_lookup_fast(&priv->meter_table, &meter_id, 561 stats_meter_table_params); 562 } 563 564 static struct nfp_meter_entry * 565 nfp_flower_add_meter_entry(struct nfp_app *app, u32 meter_id) 566 { 567 struct nfp_meter_entry *meter_entry = NULL; 568 struct nfp_flower_priv *priv = app->priv; 569 570 meter_entry = rhashtable_lookup_fast(&priv->meter_table, 571 &meter_id, 572 stats_meter_table_params); 573 if (meter_entry) 574 return meter_entry; 575 576 meter_entry = kzalloc(sizeof(*meter_entry), GFP_KERNEL); 577 if (!meter_entry) 578 return NULL; 579 580 meter_entry->meter_id = meter_id; 581 meter_entry->used = jiffies; 582 if (rhashtable_insert_fast(&priv->meter_table, &meter_entry->ht_node, 583 stats_meter_table_params)) { 584 kfree(meter_entry); 585 return NULL; 586 } 587 588 priv->qos_rate_limiters++; 589 if (priv->qos_rate_limiters == 1) 590 schedule_delayed_work(&priv->qos_stats_work, 591 NFP_FL_QOS_UPDATE); 592 593 return meter_entry; 594 } 595 596 static void nfp_flower_del_meter_entry(struct nfp_app *app, u32 meter_id) 597 { 598 struct nfp_meter_entry *meter_entry = NULL; 599 struct nfp_flower_priv *priv = app->priv; 600 601 meter_entry = rhashtable_lookup_fast(&priv->meter_table, &meter_id, 602 stats_meter_table_params); 603 if (!meter_entry) 604 return; 605 606 rhashtable_remove_fast(&priv->meter_table, 607 &meter_entry->ht_node, 608 stats_meter_table_params); 609 kfree(meter_entry); 610 priv->qos_rate_limiters--; 611 if (!priv->qos_rate_limiters) 612 cancel_delayed_work_sync(&priv->qos_stats_work); 613 } 614 615 int nfp_flower_setup_meter_entry(struct nfp_app *app, 616 const struct flow_action_entry *action, 617 enum nfp_meter_op op, 618 u32 meter_id) 619 { 620 struct nfp_flower_priv *fl_priv = app->priv; 621 struct nfp_meter_entry *meter_entry = NULL; 622 int err = 0; 623 624 mutex_lock(&fl_priv->meter_stats_lock); 625 626 switch (op) { 627 case NFP_METER_DEL: 628 nfp_flower_del_meter_entry(app, meter_id); 629 goto exit_unlock; 630 case NFP_METER_ADD: 631 meter_entry = nfp_flower_add_meter_entry(app, meter_id); 632 break; 633 default: 634 err = -EOPNOTSUPP; 635 goto exit_unlock; 636 } 637 638 if (!meter_entry) { 639 err = -ENOMEM; 640 goto exit_unlock; 641 } 642 643 if (action->police.rate_bytes_ps > 0) { 644 meter_entry->bps = true; 645 meter_entry->rate = action->police.rate_bytes_ps; 646 meter_entry->burst = action->police.burst; 647 } else { 648 meter_entry->bps = false; 649 meter_entry->rate = action->police.rate_pkt_ps; 650 meter_entry->burst = action->police.burst_pkt; 651 } 652 653 exit_unlock: 654 mutex_unlock(&fl_priv->meter_stats_lock); 655 return err; 656 } 657 658 int nfp_init_meter_table(struct nfp_app *app) 659 { 660 struct nfp_flower_priv *priv = app->priv; 661 662 return rhashtable_init(&priv->meter_table, &stats_meter_table_params); 663 } 664 665 void 666 nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv) 667 { 668 struct nfp_meter_entry *meter_entry = NULL; 669 struct rhashtable_iter iter; 670 671 mutex_lock(&fl_priv->meter_stats_lock); 672 rhashtable_walk_enter(&fl_priv->meter_table, &iter); 673 rhashtable_walk_start(&iter); 674 675 while ((meter_entry = rhashtable_walk_next(&iter)) != NULL) { 676 if (IS_ERR(meter_entry)) 677 continue; 678 nfp_flower_stats_rlim_request(fl_priv, 679 meter_entry->meter_id, false); 680 } 681 682 rhashtable_walk_stop(&iter); 683 rhashtable_walk_exit(&iter); 684 mutex_unlock(&fl_priv->meter_stats_lock); 685 } 686 687 static int 688 nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act, 689 struct netlink_ext_ack *extack) 690 { 691 struct flow_action_entry *paction = &fl_act->action.entries[0]; 692 u32 action_num = fl_act->action.num_entries; 693 struct nfp_flower_priv *fl_priv = app->priv; 694 struct flow_action_entry *action = NULL; 695 u32 burst, i, meter_id; 696 bool pps_support, pps; 697 bool add = false; 698 u64 rate; 699 int err; 700 701 pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS); 702 703 for (i = 0 ; i < action_num; i++) { 704 /* Set qos associate data for this interface */ 705 action = paction + i; 706 if (action->id != FLOW_ACTION_POLICE) { 707 NL_SET_ERR_MSG_MOD(extack, 708 "unsupported offload: qos rate limit offload requires police action"); 709 continue; 710 } 711 712 err = nfp_policer_validate(&fl_act->action, action, extack, false); 713 if (err) 714 return err; 715 716 if (action->police.rate_bytes_ps > 0) { 717 rate = action->police.rate_bytes_ps; 718 burst = action->police.burst; 719 } else if (action->police.rate_pkt_ps > 0 && pps_support) { 720 rate = action->police.rate_pkt_ps; 721 burst = action->police.burst_pkt; 722 } else { 723 NL_SET_ERR_MSG_MOD(extack, 724 "unsupported offload: unsupported qos rate limit"); 725 continue; 726 } 727 728 if (rate != 0) { 729 meter_id = action->hw_index; 730 if (nfp_flower_setup_meter_entry(app, action, NFP_METER_ADD, meter_id)) 731 continue; 732 733 pps = false; 734 if (action->police.rate_pkt_ps > 0) 735 pps = true; 736 nfp_flower_offload_one_police(app, false, pps, meter_id, 737 rate, burst); 738 add = true; 739 } 740 } 741 742 return add ? 0 : -EOPNOTSUPP; 743 } 744 745 static int 746 nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act, 747 struct netlink_ext_ack *extack) 748 { 749 struct nfp_meter_entry *meter_entry = NULL; 750 struct nfp_police_config *config; 751 struct sk_buff *skb; 752 u32 meter_id; 753 bool pps; 754 755 /* Delete qos associate data for this interface */ 756 if (fl_act->id != FLOW_ACTION_POLICE) { 757 NL_SET_ERR_MSG_MOD(extack, 758 "unsupported offload: qos rate limit offload requires police action"); 759 return -EOPNOTSUPP; 760 } 761 762 meter_id = fl_act->index; 763 meter_entry = nfp_flower_search_meter_entry(app, meter_id); 764 if (!meter_entry) { 765 NL_SET_ERR_MSG_MOD(extack, 766 "no meter entry when delete the action index."); 767 return -ENOENT; 768 } 769 pps = !meter_entry->bps; 770 771 skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config), 772 NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL); 773 if (!skb) 774 return -ENOMEM; 775 776 config = nfp_flower_cmsg_get_data(skb); 777 memset(config, 0, sizeof(struct nfp_police_config)); 778 config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_METER); 779 config->head.meter_id = cpu_to_be32(meter_id); 780 if (pps) 781 config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS); 782 783 nfp_ctrl_tx(app->ctrl, skb); 784 nfp_flower_setup_meter_entry(app, NULL, NFP_METER_DEL, meter_id); 785 786 return 0; 787 } 788 789 void 790 nfp_act_stats_reply(struct nfp_app *app, void *pmsg) 791 { 792 struct nfp_flower_priv *fl_priv = app->priv; 793 struct nfp_meter_entry *meter_entry = NULL; 794 struct nfp_police_stats_reply *msg = pmsg; 795 u32 meter_id; 796 797 meter_id = be32_to_cpu(msg->head.meter_id); 798 mutex_lock(&fl_priv->meter_stats_lock); 799 800 meter_entry = nfp_flower_search_meter_entry(app, meter_id); 801 if (!meter_entry) 802 goto exit_unlock; 803 804 meter_entry->stats.curr.pkts = be64_to_cpu(msg->pass_pkts) + 805 be64_to_cpu(msg->drop_pkts); 806 meter_entry->stats.curr.bytes = be64_to_cpu(msg->pass_bytes) + 807 be64_to_cpu(msg->drop_bytes); 808 meter_entry->stats.curr.drops = be64_to_cpu(msg->drop_pkts); 809 if (!meter_entry->stats.update) { 810 meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts; 811 meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes; 812 meter_entry->stats.prev.drops = meter_entry->stats.curr.drops; 813 } 814 815 meter_entry->stats.update = jiffies; 816 817 exit_unlock: 818 mutex_unlock(&fl_priv->meter_stats_lock); 819 } 820 821 static int 822 nfp_act_stats_actions(struct nfp_app *app, struct flow_offload_action *fl_act, 823 struct netlink_ext_ack *extack) 824 { 825 struct nfp_flower_priv *fl_priv = app->priv; 826 struct nfp_meter_entry *meter_entry = NULL; 827 u64 diff_bytes, diff_pkts, diff_drops; 828 int err = 0; 829 830 if (fl_act->id != FLOW_ACTION_POLICE) { 831 NL_SET_ERR_MSG_MOD(extack, 832 "unsupported offload: qos rate limit offload requires police action"); 833 return -EOPNOTSUPP; 834 } 835 836 mutex_lock(&fl_priv->meter_stats_lock); 837 meter_entry = nfp_flower_search_meter_entry(app, fl_act->index); 838 if (!meter_entry) { 839 err = -ENOENT; 840 goto exit_unlock; 841 } 842 diff_pkts = meter_entry->stats.curr.pkts > meter_entry->stats.prev.pkts ? 843 meter_entry->stats.curr.pkts - meter_entry->stats.prev.pkts : 0; 844 diff_bytes = meter_entry->stats.curr.bytes > meter_entry->stats.prev.bytes ? 845 meter_entry->stats.curr.bytes - meter_entry->stats.prev.bytes : 0; 846 diff_drops = meter_entry->stats.curr.drops > meter_entry->stats.prev.drops ? 847 meter_entry->stats.curr.drops - meter_entry->stats.prev.drops : 0; 848 849 flow_stats_update(&fl_act->stats, diff_bytes, diff_pkts, diff_drops, 850 meter_entry->stats.update, 851 FLOW_ACTION_HW_STATS_DELAYED); 852 853 meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts; 854 meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes; 855 meter_entry->stats.prev.drops = meter_entry->stats.curr.drops; 856 857 exit_unlock: 858 mutex_unlock(&fl_priv->meter_stats_lock); 859 return err; 860 } 861 862 int nfp_setup_tc_act_offload(struct nfp_app *app, 863 struct flow_offload_action *fl_act) 864 { 865 struct netlink_ext_ack *extack = fl_act->extack; 866 struct nfp_flower_priv *fl_priv = app->priv; 867 868 if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER)) 869 return -EOPNOTSUPP; 870 871 switch (fl_act->command) { 872 case FLOW_ACT_REPLACE: 873 return nfp_act_install_actions(app, fl_act, extack); 874 case FLOW_ACT_DESTROY: 875 return nfp_act_remove_actions(app, fl_act, extack); 876 case FLOW_ACT_STATS: 877 return nfp_act_stats_actions(app, fl_act, extack); 878 default: 879 return -EOPNOTSUPP; 880 } 881 } 882