1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3 
4 #include <linux/hash.h>
5 #include <linux/hashtable.h>
6 #include <linux/jhash.h>
7 #include <linux/math64.h>
8 #include <linux/vmalloc.h>
9 #include <net/pkt_cls.h>
10 #include <net/pkt_sched.h>
11 
12 #include "cmsg.h"
13 #include "main.h"
14 #include "../nfp_port.h"
15 
16 #define NFP_FL_QOS_UPDATE		msecs_to_jiffies(1000)
17 #define NFP_FL_QOS_PPS  BIT(15)
18 #define NFP_FL_QOS_METER  BIT(10)
19 
20 struct nfp_police_cfg_head {
21 	__be32 flags_opts;
22 	union {
23 		__be32 meter_id;
24 		__be32 port;
25 	};
26 };
27 
28 enum NFP_FL_QOS_TYPES {
29 	NFP_FL_QOS_TYPE_BPS,
30 	NFP_FL_QOS_TYPE_PPS,
31 	NFP_FL_QOS_TYPE_MAX,
32 };
33 
34 /* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
35  * See RFC 2698 for more details.
36  * ----------------------------------------------------------------
37  *    3                   2                   1
38  *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
39  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
40  * |             Reserved          |p|         Reserved            |
41  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
42  * |                          Port Ingress                         |
43  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
44  * |                        Token Bucket Peak                      |
45  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
46  * |                     Token Bucket Committed                    |
47  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
48  * |                         Peak Burst Size                       |
49  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
50  * |                      Committed Burst Size                     |
51  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
52  * |                      Peak Information Rate                    |
53  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
54  * |                    Committed Information Rate                 |
55  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
56  * Word[0](FLag options):
57  * [15] p(pps) 1 for pps, 0 for bps
58  *
59  * Meter control message
60  *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
61  * +-------------------------------+-+---+-----+-+---------+-+---+-+
62  * |            Reserved           |p| Y |TYPE |E|TSHFV    |P| PC|R|
63  * +-------------------------------+-+---+-----+-+---------+-+---+-+
64  * |                            meter ID                           |
65  * +-------------------------------+-------------------------------+
66  *
67  */
68 struct nfp_police_config {
69 	struct nfp_police_cfg_head head;
70 	__be32 bkt_tkn_p;
71 	__be32 bkt_tkn_c;
72 	__be32 pbs;
73 	__be32 cbs;
74 	__be32 pir;
75 	__be32 cir;
76 };
77 
78 struct nfp_police_stats_reply {
79 	struct nfp_police_cfg_head head;
80 	__be64 pass_bytes;
81 	__be64 pass_pkts;
82 	__be64 drop_bytes;
83 	__be64 drop_pkts;
84 };
85 
86 int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
87 				  bool pps, u32 id, u32 rate, u32 burst)
88 {
89 	struct nfp_police_config *config;
90 	struct sk_buff *skb;
91 
92 	skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
93 				    NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
94 	if (!skb)
95 		return -ENOMEM;
96 
97 	config = nfp_flower_cmsg_get_data(skb);
98 	memset(config, 0, sizeof(struct nfp_police_config));
99 	if (pps)
100 		config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
101 	if (!ingress)
102 		config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_METER);
103 
104 	if (ingress)
105 		config->head.port = cpu_to_be32(id);
106 	else
107 		config->head.meter_id = cpu_to_be32(id);
108 
109 	config->bkt_tkn_p = cpu_to_be32(burst);
110 	config->bkt_tkn_c = cpu_to_be32(burst);
111 	config->pbs = cpu_to_be32(burst);
112 	config->cbs = cpu_to_be32(burst);
113 	config->pir = cpu_to_be32(rate);
114 	config->cir = cpu_to_be32(rate);
115 	nfp_ctrl_tx(app->ctrl, skb);
116 
117 	return 0;
118 }
119 
120 static int nfp_policer_validate(const struct flow_action *action,
121 				const struct flow_action_entry *act,
122 				struct netlink_ext_ack *extack)
123 {
124 	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
125 		NL_SET_ERR_MSG_MOD(extack,
126 				   "Offload not supported when exceed action is not drop");
127 		return -EOPNOTSUPP;
128 	}
129 
130 	if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE &&
131 	    act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
132 	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
133 		NL_SET_ERR_MSG_MOD(extack,
134 				   "Offload not supported when conform action is not continue, pipe or ok");
135 		return -EOPNOTSUPP;
136 	}
137 
138 	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
139 	    !flow_action_is_last_entry(action, act)) {
140 		NL_SET_ERR_MSG_MOD(extack,
141 				   "Offload not supported when conform action is ok, but action is not last");
142 		return -EOPNOTSUPP;
143 	}
144 
145 	if (act->police.peakrate_bytes_ps ||
146 	    act->police.avrate || act->police.overhead) {
147 		NL_SET_ERR_MSG_MOD(extack,
148 				   "Offload not supported when peakrate/avrate/overhead is configured");
149 		return -EOPNOTSUPP;
150 	}
151 
152 	return 0;
153 }
154 
155 static int
156 nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
157 				struct tc_cls_matchall_offload *flow,
158 				struct netlink_ext_ack *extack)
159 {
160 	struct flow_action_entry *paction = &flow->rule->action.entries[0];
161 	u32 action_num = flow->rule->action.num_entries;
162 	struct nfp_flower_priv *fl_priv = app->priv;
163 	struct flow_action_entry *action = NULL;
164 	struct nfp_flower_repr_priv *repr_priv;
165 	u32 netdev_port_id, i;
166 	struct nfp_repr *repr;
167 	bool pps_support;
168 	u32 bps_num = 0;
169 	u32 pps_num = 0;
170 	u32 burst;
171 	bool pps;
172 	u64 rate;
173 	int err;
174 
175 	if (!nfp_netdev_is_nfp_repr(netdev)) {
176 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
177 		return -EOPNOTSUPP;
178 	}
179 	repr = netdev_priv(netdev);
180 	repr_priv = repr->app_priv;
181 	netdev_port_id = nfp_repr_get_port_id(netdev);
182 	pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
183 
184 	if (repr_priv->block_shared) {
185 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
186 		return -EOPNOTSUPP;
187 	}
188 
189 	if (repr->port->type != NFP_PORT_VF_PORT) {
190 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
191 		return -EOPNOTSUPP;
192 	}
193 
194 	if (pps_support) {
195 		if (action_num > 2 || action_num == 0) {
196 			NL_SET_ERR_MSG_MOD(extack,
197 					   "unsupported offload: qos rate limit offload only support action number 1 or 2");
198 			return -EOPNOTSUPP;
199 		}
200 	} else {
201 		if (!flow_offload_has_one_action(&flow->rule->action)) {
202 			NL_SET_ERR_MSG_MOD(extack,
203 					   "unsupported offload: qos rate limit offload requires a single action");
204 			return -EOPNOTSUPP;
205 		}
206 	}
207 
208 	if (flow->common.prio != 1) {
209 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
210 		return -EOPNOTSUPP;
211 	}
212 
213 	for (i = 0 ; i < action_num; i++) {
214 		action = paction + i;
215 		if (action->id != FLOW_ACTION_POLICE) {
216 			NL_SET_ERR_MSG_MOD(extack,
217 					   "unsupported offload: qos rate limit offload requires police action");
218 			return -EOPNOTSUPP;
219 		}
220 
221 		err = nfp_policer_validate(&flow->rule->action, action, extack);
222 		if (err)
223 			return err;
224 
225 		if (action->police.rate_bytes_ps > 0) {
226 			if (bps_num++) {
227 				NL_SET_ERR_MSG_MOD(extack,
228 						   "unsupported offload: qos rate limit offload only support one BPS action");
229 				return -EOPNOTSUPP;
230 			}
231 		}
232 		if (action->police.rate_pkt_ps > 0) {
233 			if (!pps_support) {
234 				NL_SET_ERR_MSG_MOD(extack,
235 						   "unsupported offload: FW does not support PPS action");
236 				return -EOPNOTSUPP;
237 			}
238 			if (pps_num++) {
239 				NL_SET_ERR_MSG_MOD(extack,
240 						   "unsupported offload: qos rate limit offload only support one PPS action");
241 				return -EOPNOTSUPP;
242 			}
243 		}
244 	}
245 
246 	for (i = 0 ; i < action_num; i++) {
247 		/* Set QoS data for this interface */
248 		action = paction + i;
249 		if (action->police.rate_bytes_ps > 0) {
250 			rate = action->police.rate_bytes_ps;
251 			burst = action->police.burst;
252 		} else if (action->police.rate_pkt_ps > 0) {
253 			rate = action->police.rate_pkt_ps;
254 			burst = action->police.burst_pkt;
255 		} else {
256 			NL_SET_ERR_MSG_MOD(extack,
257 					   "unsupported offload: qos rate limit is not BPS or PPS");
258 			continue;
259 		}
260 
261 		if (rate != 0) {
262 			pps = false;
263 			if (action->police.rate_pkt_ps > 0)
264 				pps = true;
265 			nfp_flower_offload_one_police(repr->app, true,
266 						      pps, netdev_port_id,
267 						      rate, burst);
268 		}
269 	}
270 	repr_priv->qos_table.netdev_port_id = netdev_port_id;
271 	fl_priv->qos_rate_limiters++;
272 	if (fl_priv->qos_rate_limiters == 1)
273 		schedule_delayed_work(&fl_priv->qos_stats_work,
274 				      NFP_FL_QOS_UPDATE);
275 
276 	return 0;
277 }
278 
279 static int
280 nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
281 			       struct tc_cls_matchall_offload *flow,
282 			       struct netlink_ext_ack *extack)
283 {
284 	struct nfp_flower_priv *fl_priv = app->priv;
285 	struct nfp_flower_repr_priv *repr_priv;
286 	struct nfp_police_config *config;
287 	u32 netdev_port_id, i;
288 	struct nfp_repr *repr;
289 	struct sk_buff *skb;
290 	bool pps_support;
291 
292 	if (!nfp_netdev_is_nfp_repr(netdev)) {
293 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
294 		return -EOPNOTSUPP;
295 	}
296 	repr = netdev_priv(netdev);
297 
298 	netdev_port_id = nfp_repr_get_port_id(netdev);
299 	repr_priv = repr->app_priv;
300 	pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
301 
302 	if (!repr_priv->qos_table.netdev_port_id) {
303 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
304 		return -EOPNOTSUPP;
305 	}
306 
307 	memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
308 	fl_priv->qos_rate_limiters--;
309 	if (!fl_priv->qos_rate_limiters)
310 		cancel_delayed_work_sync(&fl_priv->qos_stats_work);
311 	for (i = 0 ; i < NFP_FL_QOS_TYPE_MAX; i++) {
312 		if (i == NFP_FL_QOS_TYPE_PPS && !pps_support)
313 			break;
314 		/* 0:bps 1:pps
315 		 * Clear QoS data for this interface.
316 		 * There is no need to check if a specific QOS_TYPE was
317 		 * configured as the firmware handles clearing a QoS entry
318 		 * safely, even if it wasn't explicitly added.
319 		 */
320 		skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
321 					    NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
322 		if (!skb)
323 			return -ENOMEM;
324 
325 		config = nfp_flower_cmsg_get_data(skb);
326 		memset(config, 0, sizeof(struct nfp_police_config));
327 		if (i == NFP_FL_QOS_TYPE_PPS)
328 			config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS);
329 		config->head.port = cpu_to_be32(netdev_port_id);
330 		nfp_ctrl_tx(repr->app->ctrl, skb);
331 	}
332 
333 	return 0;
334 }
335 
336 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
337 {
338 	struct nfp_flower_priv *fl_priv = app->priv;
339 	struct nfp_flower_repr_priv *repr_priv;
340 	struct nfp_police_stats_reply *msg;
341 	struct nfp_stat_pair *curr_stats;
342 	struct nfp_stat_pair *prev_stats;
343 	struct net_device *netdev;
344 	struct nfp_repr *repr;
345 	u32 netdev_port_id;
346 
347 	msg = nfp_flower_cmsg_get_data(skb);
348 	if (be32_to_cpu(msg->head.flags_opts) & NFP_FL_QOS_METER)
349 		return nfp_act_stats_reply(app, msg);
350 
351 	netdev_port_id = be32_to_cpu(msg->head.port);
352 	rcu_read_lock();
353 	netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
354 	if (!netdev)
355 		goto exit_unlock_rcu;
356 
357 	repr = netdev_priv(netdev);
358 	repr_priv = repr->app_priv;
359 	curr_stats = &repr_priv->qos_table.curr_stats;
360 	prev_stats = &repr_priv->qos_table.prev_stats;
361 
362 	spin_lock_bh(&fl_priv->qos_stats_lock);
363 	curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
364 			   be64_to_cpu(msg->drop_pkts);
365 	curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
366 			    be64_to_cpu(msg->drop_bytes);
367 
368 	if (!repr_priv->qos_table.last_update) {
369 		prev_stats->pkts = curr_stats->pkts;
370 		prev_stats->bytes = curr_stats->bytes;
371 	}
372 
373 	repr_priv->qos_table.last_update = jiffies;
374 	spin_unlock_bh(&fl_priv->qos_stats_lock);
375 
376 exit_unlock_rcu:
377 	rcu_read_unlock();
378 }
379 
380 static void
381 nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
382 			      u32 id, bool ingress)
383 {
384 	struct nfp_police_cfg_head *head;
385 	struct sk_buff *skb;
386 
387 	skb = nfp_flower_cmsg_alloc(fl_priv->app,
388 				    sizeof(struct nfp_police_cfg_head),
389 				    NFP_FLOWER_CMSG_TYPE_QOS_STATS,
390 				    GFP_ATOMIC);
391 	if (!skb)
392 		return;
393 	head = nfp_flower_cmsg_get_data(skb);
394 
395 	memset(head, 0, sizeof(struct nfp_police_cfg_head));
396 	if (ingress) {
397 		head->port = cpu_to_be32(id);
398 	} else {
399 		head->flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
400 		head->meter_id = cpu_to_be32(id);
401 	}
402 
403 	nfp_ctrl_tx(fl_priv->app->ctrl, skb);
404 }
405 
406 static void
407 nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
408 {
409 	struct nfp_reprs *repr_set;
410 	int i;
411 
412 	rcu_read_lock();
413 	repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
414 	if (!repr_set)
415 		goto exit_unlock_rcu;
416 
417 	for (i = 0; i < repr_set->num_reprs; i++) {
418 		struct net_device *netdev;
419 
420 		netdev = rcu_dereference(repr_set->reprs[i]);
421 		if (netdev) {
422 			struct nfp_repr *priv = netdev_priv(netdev);
423 			struct nfp_flower_repr_priv *repr_priv;
424 			u32 netdev_port_id;
425 
426 			repr_priv = priv->app_priv;
427 			netdev_port_id = repr_priv->qos_table.netdev_port_id;
428 			if (!netdev_port_id)
429 				continue;
430 
431 			nfp_flower_stats_rlim_request(fl_priv,
432 						      netdev_port_id, true);
433 		}
434 	}
435 
436 exit_unlock_rcu:
437 	rcu_read_unlock();
438 }
439 
440 static void update_stats_cache(struct work_struct *work)
441 {
442 	struct delayed_work *delayed_work;
443 	struct nfp_flower_priv *fl_priv;
444 
445 	delayed_work = to_delayed_work(work);
446 	fl_priv = container_of(delayed_work, struct nfp_flower_priv,
447 			       qos_stats_work);
448 
449 	nfp_flower_stats_rlim_request_all(fl_priv);
450 	nfp_flower_stats_meter_request_all(fl_priv);
451 
452 	schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
453 }
454 
455 static int
456 nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
457 			      struct tc_cls_matchall_offload *flow,
458 			      struct netlink_ext_ack *extack)
459 {
460 	struct nfp_flower_priv *fl_priv = app->priv;
461 	struct nfp_flower_repr_priv *repr_priv;
462 	struct nfp_stat_pair *curr_stats;
463 	struct nfp_stat_pair *prev_stats;
464 	u64 diff_bytes, diff_pkts;
465 	struct nfp_repr *repr;
466 
467 	if (!nfp_netdev_is_nfp_repr(netdev)) {
468 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
469 		return -EOPNOTSUPP;
470 	}
471 	repr = netdev_priv(netdev);
472 
473 	repr_priv = repr->app_priv;
474 	if (!repr_priv->qos_table.netdev_port_id) {
475 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
476 		return -EOPNOTSUPP;
477 	}
478 
479 	spin_lock_bh(&fl_priv->qos_stats_lock);
480 	curr_stats = &repr_priv->qos_table.curr_stats;
481 	prev_stats = &repr_priv->qos_table.prev_stats;
482 	diff_pkts = curr_stats->pkts - prev_stats->pkts;
483 	diff_bytes = curr_stats->bytes - prev_stats->bytes;
484 	prev_stats->pkts = curr_stats->pkts;
485 	prev_stats->bytes = curr_stats->bytes;
486 	spin_unlock_bh(&fl_priv->qos_stats_lock);
487 
488 	flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0,
489 			  repr_priv->qos_table.last_update,
490 			  FLOW_ACTION_HW_STATS_DELAYED);
491 	return 0;
492 }
493 
494 void nfp_flower_qos_init(struct nfp_app *app)
495 {
496 	struct nfp_flower_priv *fl_priv = app->priv;
497 
498 	spin_lock_init(&fl_priv->qos_stats_lock);
499 	mutex_init(&fl_priv->meter_stats_lock);
500 	nfp_init_meter_table(app);
501 
502 	INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
503 }
504 
505 void nfp_flower_qos_cleanup(struct nfp_app *app)
506 {
507 	struct nfp_flower_priv *fl_priv = app->priv;
508 
509 	cancel_delayed_work_sync(&fl_priv->qos_stats_work);
510 }
511 
512 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
513 				 struct tc_cls_matchall_offload *flow)
514 {
515 	struct netlink_ext_ack *extack = flow->common.extack;
516 	struct nfp_flower_priv *fl_priv = app->priv;
517 
518 	if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
519 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
520 		return -EOPNOTSUPP;
521 	}
522 
523 	switch (flow->command) {
524 	case TC_CLSMATCHALL_REPLACE:
525 		return nfp_flower_install_rate_limiter(app, netdev, flow,
526 						       extack);
527 	case TC_CLSMATCHALL_DESTROY:
528 		return nfp_flower_remove_rate_limiter(app, netdev, flow,
529 						      extack);
530 	case TC_CLSMATCHALL_STATS:
531 		return nfp_flower_stats_rate_limiter(app, netdev, flow,
532 						     extack);
533 	default:
534 		return -EOPNOTSUPP;
535 	}
536 }
537 
538 /* Offload tc action, currently only for tc police */
539 
540 static const struct rhashtable_params stats_meter_table_params = {
541 	.key_offset	= offsetof(struct nfp_meter_entry, meter_id),
542 	.head_offset	= offsetof(struct nfp_meter_entry, ht_node),
543 	.key_len	= sizeof(u32),
544 };
545 
546 struct nfp_meter_entry *
547 nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id)
548 {
549 	struct nfp_flower_priv *priv = app->priv;
550 
551 	return rhashtable_lookup_fast(&priv->meter_table, &meter_id,
552 				      stats_meter_table_params);
553 }
554 
555 static struct nfp_meter_entry *
556 nfp_flower_add_meter_entry(struct nfp_app *app, u32 meter_id)
557 {
558 	struct nfp_meter_entry *meter_entry = NULL;
559 	struct nfp_flower_priv *priv = app->priv;
560 
561 	meter_entry = rhashtable_lookup_fast(&priv->meter_table,
562 					     &meter_id,
563 					     stats_meter_table_params);
564 	if (meter_entry)
565 		return meter_entry;
566 
567 	meter_entry = kzalloc(sizeof(*meter_entry), GFP_KERNEL);
568 	if (!meter_entry)
569 		return NULL;
570 
571 	meter_entry->meter_id = meter_id;
572 	meter_entry->used = jiffies;
573 	if (rhashtable_insert_fast(&priv->meter_table, &meter_entry->ht_node,
574 				   stats_meter_table_params)) {
575 		kfree(meter_entry);
576 		return NULL;
577 	}
578 
579 	priv->qos_rate_limiters++;
580 	if (priv->qos_rate_limiters == 1)
581 		schedule_delayed_work(&priv->qos_stats_work,
582 				      NFP_FL_QOS_UPDATE);
583 
584 	return meter_entry;
585 }
586 
587 static void nfp_flower_del_meter_entry(struct nfp_app *app, u32 meter_id)
588 {
589 	struct nfp_meter_entry *meter_entry = NULL;
590 	struct nfp_flower_priv *priv = app->priv;
591 
592 	meter_entry = rhashtable_lookup_fast(&priv->meter_table, &meter_id,
593 					     stats_meter_table_params);
594 	if (!meter_entry)
595 		return;
596 
597 	rhashtable_remove_fast(&priv->meter_table,
598 			       &meter_entry->ht_node,
599 			       stats_meter_table_params);
600 	kfree(meter_entry);
601 	priv->qos_rate_limiters--;
602 	if (!priv->qos_rate_limiters)
603 		cancel_delayed_work_sync(&priv->qos_stats_work);
604 }
605 
606 int nfp_flower_setup_meter_entry(struct nfp_app *app,
607 				 const struct flow_action_entry *action,
608 				 enum nfp_meter_op op,
609 				 u32 meter_id)
610 {
611 	struct nfp_flower_priv *fl_priv = app->priv;
612 	struct nfp_meter_entry *meter_entry = NULL;
613 	int err = 0;
614 
615 	mutex_lock(&fl_priv->meter_stats_lock);
616 
617 	switch (op) {
618 	case NFP_METER_DEL:
619 		nfp_flower_del_meter_entry(app, meter_id);
620 		goto exit_unlock;
621 	case NFP_METER_ADD:
622 		meter_entry = nfp_flower_add_meter_entry(app, meter_id);
623 		break;
624 	default:
625 		err = -EOPNOTSUPP;
626 		goto exit_unlock;
627 	}
628 
629 	if (!meter_entry) {
630 		err = -ENOMEM;
631 		goto exit_unlock;
632 	}
633 
634 	if (action->police.rate_bytes_ps > 0) {
635 		meter_entry->bps = true;
636 		meter_entry->rate = action->police.rate_bytes_ps;
637 		meter_entry->burst = action->police.burst;
638 	} else {
639 		meter_entry->bps = false;
640 		meter_entry->rate = action->police.rate_pkt_ps;
641 		meter_entry->burst = action->police.burst_pkt;
642 	}
643 
644 exit_unlock:
645 	mutex_unlock(&fl_priv->meter_stats_lock);
646 	return err;
647 }
648 
649 int nfp_init_meter_table(struct nfp_app *app)
650 {
651 	struct nfp_flower_priv *priv = app->priv;
652 
653 	return rhashtable_init(&priv->meter_table, &stats_meter_table_params);
654 }
655 
656 void
657 nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv)
658 {
659 	struct nfp_meter_entry *meter_entry = NULL;
660 	struct rhashtable_iter iter;
661 
662 	mutex_lock(&fl_priv->meter_stats_lock);
663 	rhashtable_walk_enter(&fl_priv->meter_table, &iter);
664 	rhashtable_walk_start(&iter);
665 
666 	while ((meter_entry = rhashtable_walk_next(&iter)) != NULL) {
667 		if (IS_ERR(meter_entry))
668 			continue;
669 		nfp_flower_stats_rlim_request(fl_priv,
670 					      meter_entry->meter_id, false);
671 	}
672 
673 	rhashtable_walk_stop(&iter);
674 	rhashtable_walk_exit(&iter);
675 	mutex_unlock(&fl_priv->meter_stats_lock);
676 }
677 
678 static int
679 nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
680 			struct netlink_ext_ack *extack)
681 {
682 	struct flow_action_entry *paction = &fl_act->action.entries[0];
683 	u32 action_num = fl_act->action.num_entries;
684 	struct nfp_flower_priv *fl_priv = app->priv;
685 	struct flow_action_entry *action = NULL;
686 	u32 burst, i, meter_id;
687 	bool pps_support, pps;
688 	bool add = false;
689 	u64 rate;
690 
691 	pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
692 
693 	for (i = 0 ; i < action_num; i++) {
694 		/* Set qos associate data for this interface */
695 		action = paction + i;
696 		if (action->id != FLOW_ACTION_POLICE) {
697 			NL_SET_ERR_MSG_MOD(extack,
698 					   "unsupported offload: qos rate limit offload requires police action");
699 			continue;
700 		}
701 		if (action->police.rate_bytes_ps > 0) {
702 			rate = action->police.rate_bytes_ps;
703 			burst = action->police.burst;
704 		} else if (action->police.rate_pkt_ps > 0 && pps_support) {
705 			rate = action->police.rate_pkt_ps;
706 			burst = action->police.burst_pkt;
707 		} else {
708 			NL_SET_ERR_MSG_MOD(extack,
709 					   "unsupported offload: unsupported qos rate limit");
710 			continue;
711 		}
712 
713 		if (rate != 0) {
714 			meter_id = action->hw_index;
715 			if (nfp_flower_setup_meter_entry(app, action, NFP_METER_ADD, meter_id))
716 				continue;
717 
718 			pps = false;
719 			if (action->police.rate_pkt_ps > 0)
720 				pps = true;
721 			nfp_flower_offload_one_police(app, false, pps, meter_id,
722 						      rate, burst);
723 			add = true;
724 		}
725 	}
726 
727 	return add ? 0 : -EOPNOTSUPP;
728 }
729 
730 static int
731 nfp_act_remove_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
732 		       struct netlink_ext_ack *extack)
733 {
734 	struct nfp_meter_entry *meter_entry = NULL;
735 	struct nfp_police_config *config;
736 	struct sk_buff *skb;
737 	u32 meter_id;
738 	bool pps;
739 
740 	/* Delete qos associate data for this interface */
741 	if (fl_act->id != FLOW_ACTION_POLICE) {
742 		NL_SET_ERR_MSG_MOD(extack,
743 				   "unsupported offload: qos rate limit offload requires police action");
744 		return -EOPNOTSUPP;
745 	}
746 
747 	meter_id = fl_act->index;
748 	meter_entry = nfp_flower_search_meter_entry(app, meter_id);
749 	if (!meter_entry) {
750 		NL_SET_ERR_MSG_MOD(extack,
751 				   "no meter entry when delete the action index.");
752 		return -ENOENT;
753 	}
754 	pps = !meter_entry->bps;
755 
756 	skb = nfp_flower_cmsg_alloc(app, sizeof(struct nfp_police_config),
757 				    NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
758 	if (!skb)
759 		return -ENOMEM;
760 
761 	config = nfp_flower_cmsg_get_data(skb);
762 	memset(config, 0, sizeof(struct nfp_police_config));
763 	config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_METER);
764 	config->head.meter_id = cpu_to_be32(meter_id);
765 	if (pps)
766 		config->head.flags_opts |= cpu_to_be32(NFP_FL_QOS_PPS);
767 
768 	nfp_ctrl_tx(app->ctrl, skb);
769 	nfp_flower_setup_meter_entry(app, NULL, NFP_METER_DEL, meter_id);
770 
771 	return 0;
772 }
773 
774 void
775 nfp_act_stats_reply(struct nfp_app *app, void *pmsg)
776 {
777 	struct nfp_flower_priv *fl_priv = app->priv;
778 	struct nfp_meter_entry *meter_entry = NULL;
779 	struct nfp_police_stats_reply *msg = pmsg;
780 	u32 meter_id;
781 
782 	meter_id = be32_to_cpu(msg->head.meter_id);
783 	mutex_lock(&fl_priv->meter_stats_lock);
784 
785 	meter_entry = nfp_flower_search_meter_entry(app, meter_id);
786 	if (!meter_entry)
787 		goto exit_unlock;
788 
789 	meter_entry->stats.curr.pkts = be64_to_cpu(msg->pass_pkts) +
790 				       be64_to_cpu(msg->drop_pkts);
791 	meter_entry->stats.curr.bytes = be64_to_cpu(msg->pass_bytes) +
792 					be64_to_cpu(msg->drop_bytes);
793 	meter_entry->stats.curr.drops = be64_to_cpu(msg->drop_pkts);
794 	if (!meter_entry->stats.update) {
795 		meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
796 		meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
797 		meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
798 	}
799 
800 	meter_entry->stats.update = jiffies;
801 
802 exit_unlock:
803 	mutex_unlock(&fl_priv->meter_stats_lock);
804 }
805 
806 static int
807 nfp_act_stats_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
808 		      struct netlink_ext_ack *extack)
809 {
810 	struct nfp_flower_priv *fl_priv = app->priv;
811 	struct nfp_meter_entry *meter_entry = NULL;
812 	u64 diff_bytes, diff_pkts, diff_drops;
813 	int err = 0;
814 
815 	if (fl_act->id != FLOW_ACTION_POLICE) {
816 		NL_SET_ERR_MSG_MOD(extack,
817 				   "unsupported offload: qos rate limit offload requires police action");
818 		return -EOPNOTSUPP;
819 	}
820 
821 	mutex_lock(&fl_priv->meter_stats_lock);
822 	meter_entry = nfp_flower_search_meter_entry(app, fl_act->index);
823 	if (!meter_entry) {
824 		err = -ENOENT;
825 		goto exit_unlock;
826 	}
827 	diff_pkts = meter_entry->stats.curr.pkts > meter_entry->stats.prev.pkts ?
828 		    meter_entry->stats.curr.pkts - meter_entry->stats.prev.pkts : 0;
829 	diff_bytes = meter_entry->stats.curr.bytes > meter_entry->stats.prev.bytes ?
830 		     meter_entry->stats.curr.bytes - meter_entry->stats.prev.bytes : 0;
831 	diff_drops = meter_entry->stats.curr.drops > meter_entry->stats.prev.drops ?
832 		     meter_entry->stats.curr.drops - meter_entry->stats.prev.drops : 0;
833 
834 	flow_stats_update(&fl_act->stats, diff_bytes, diff_pkts, diff_drops,
835 			  meter_entry->stats.update,
836 			  FLOW_ACTION_HW_STATS_DELAYED);
837 
838 	meter_entry->stats.prev.pkts = meter_entry->stats.curr.pkts;
839 	meter_entry->stats.prev.bytes = meter_entry->stats.curr.bytes;
840 	meter_entry->stats.prev.drops = meter_entry->stats.curr.drops;
841 
842 exit_unlock:
843 	mutex_unlock(&fl_priv->meter_stats_lock);
844 	return err;
845 }
846 
847 int nfp_setup_tc_act_offload(struct nfp_app *app,
848 			     struct flow_offload_action *fl_act)
849 {
850 	struct netlink_ext_ack *extack = fl_act->extack;
851 	struct nfp_flower_priv *fl_priv = app->priv;
852 
853 	if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER))
854 		return -EOPNOTSUPP;
855 
856 	switch (fl_act->command) {
857 	case FLOW_ACT_REPLACE:
858 		return nfp_act_install_actions(app, fl_act, extack);
859 	case FLOW_ACT_DESTROY:
860 		return nfp_act_remove_actions(app, fl_act, extack);
861 	case FLOW_ACT_STATS:
862 		return nfp_act_stats_actions(app, fl_act, extack);
863 	default:
864 		return -EOPNOTSUPP;
865 	}
866 }
867