1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3 
4 #include <linux/math64.h>
5 #include <net/pkt_cls.h>
6 #include <net/pkt_sched.h>
7 
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfp_port.h"
11 
12 #define NFP_FL_QOS_UPDATE		msecs_to_jiffies(1000)
13 #define NFP_FL_QOS_PPS  BIT(15)
14 
15 struct nfp_police_cfg_head {
16 	__be32 flags_opts;
17 	__be32 port;
18 };
19 
20 enum NFP_FL_QOS_TYPES {
21 	NFP_FL_QOS_TYPE_BPS,
22 	NFP_FL_QOS_TYPE_PPS,
23 	NFP_FL_QOS_TYPE_MAX,
24 };
25 
26 /* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
27  * See RFC 2698 for more details.
28  * ----------------------------------------------------------------
29  *    3                   2                   1
30  *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
31  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
32  * |             Reserved          |p|         Reserved            |
33  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
34  * |                          Port Ingress                         |
35  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
36  * |                        Token Bucket Peak                      |
37  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
38  * |                     Token Bucket Committed                    |
39  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
40  * |                         Peak Burst Size                       |
41  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
42  * |                      Committed Burst Size                     |
43  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
44  * |                      Peak Information Rate                    |
45  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
46  * |                    Committed Information Rate                 |
47  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
48  * Word[0](FLag options):
49  * [15] p(pps) 1 for pps ,0 for bps
50  *
51  */
52 struct nfp_police_config {
53 	struct nfp_police_cfg_head head;
54 	__be32 bkt_tkn_p;
55 	__be32 bkt_tkn_c;
56 	__be32 pbs;
57 	__be32 cbs;
58 	__be32 pir;
59 	__be32 cir;
60 };
61 
62 struct nfp_police_stats_reply {
63 	struct nfp_police_cfg_head head;
64 	__be64 pass_bytes;
65 	__be64 pass_pkts;
66 	__be64 drop_bytes;
67 	__be64 drop_pkts;
68 };
69 
70 static int
71 nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
72 				struct tc_cls_matchall_offload *flow,
73 				struct netlink_ext_ack *extack)
74 {
75 	struct flow_action_entry *paction = &flow->rule->action.entries[0];
76 	u32 action_num = flow->rule->action.num_entries;
77 	struct nfp_flower_priv *fl_priv = app->priv;
78 	struct flow_action_entry *action = NULL;
79 	struct nfp_flower_repr_priv *repr_priv;
80 	struct nfp_police_config *config;
81 	u32 netdev_port_id, i;
82 	struct nfp_repr *repr;
83 	struct sk_buff *skb;
84 	bool pps_support;
85 	u32 bps_num = 0;
86 	u32 pps_num = 0;
87 	u32 burst;
88 	u64 rate;
89 
90 	if (!nfp_netdev_is_nfp_repr(netdev)) {
91 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
92 		return -EOPNOTSUPP;
93 	}
94 	repr = netdev_priv(netdev);
95 	repr_priv = repr->app_priv;
96 	netdev_port_id = nfp_repr_get_port_id(netdev);
97 	pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
98 
99 	if (repr_priv->block_shared) {
100 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
101 		return -EOPNOTSUPP;
102 	}
103 
104 	if (repr->port->type != NFP_PORT_VF_PORT) {
105 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
106 		return -EOPNOTSUPP;
107 	}
108 
109 	if (pps_support) {
110 		if (action_num > 2 || action_num == 0) {
111 			NL_SET_ERR_MSG_MOD(extack,
112 					   "unsupported offload: qos rate limit offload only support action number 1 or 2");
113 			return -EOPNOTSUPP;
114 		}
115 	} else {
116 		if (!flow_offload_has_one_action(&flow->rule->action)) {
117 			NL_SET_ERR_MSG_MOD(extack,
118 					   "unsupported offload: qos rate limit offload requires a single action");
119 			return -EOPNOTSUPP;
120 		}
121 	}
122 
123 	if (flow->common.prio != 1) {
124 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
125 		return -EOPNOTSUPP;
126 	}
127 
128 	for (i = 0 ; i < action_num; i++) {
129 		action = paction + i;
130 		if (action->id != FLOW_ACTION_POLICE) {
131 			NL_SET_ERR_MSG_MOD(extack,
132 					   "unsupported offload: qos rate limit offload requires police action");
133 			return -EOPNOTSUPP;
134 		}
135 		if (action->police.rate_bytes_ps > 0) {
136 			if (bps_num++) {
137 				NL_SET_ERR_MSG_MOD(extack,
138 						   "unsupported offload: qos rate limit offload only support one BPS action");
139 				return -EOPNOTSUPP;
140 			}
141 		}
142 		if (action->police.rate_pkt_ps > 0) {
143 			if (!pps_support) {
144 				NL_SET_ERR_MSG_MOD(extack,
145 						   "unsupported offload: FW does not support PPS action");
146 				return -EOPNOTSUPP;
147 			}
148 			if (pps_num++) {
149 				NL_SET_ERR_MSG_MOD(extack,
150 						   "unsupported offload: qos rate limit offload only support one PPS action");
151 				return -EOPNOTSUPP;
152 			}
153 		}
154 	}
155 
156 	for (i = 0 ; i < action_num; i++) {
157 		/* Set QoS data for this interface */
158 		action = paction + i;
159 		if (action->police.rate_bytes_ps > 0) {
160 			rate = action->police.rate_bytes_ps;
161 			burst = action->police.burst;
162 		} else if (action->police.rate_pkt_ps > 0) {
163 			rate = action->police.rate_pkt_ps;
164 			burst = action->police.burst_pkt;
165 		} else {
166 			NL_SET_ERR_MSG_MOD(extack,
167 					   "unsupported offload: qos rate limit is not BPS or PPS");
168 			continue;
169 		}
170 
171 		if (rate != 0) {
172 			skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
173 						    NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
174 			if (!skb)
175 				return -ENOMEM;
176 
177 			config = nfp_flower_cmsg_get_data(skb);
178 			memset(config, 0, sizeof(struct nfp_police_config));
179 			if (action->police.rate_pkt_ps > 0)
180 				config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS);
181 			config->head.port = cpu_to_be32(netdev_port_id);
182 			config->bkt_tkn_p = cpu_to_be32(burst);
183 			config->bkt_tkn_c = cpu_to_be32(burst);
184 			config->pbs = cpu_to_be32(burst);
185 			config->cbs = cpu_to_be32(burst);
186 			config->pir = cpu_to_be32(rate);
187 			config->cir = cpu_to_be32(rate);
188 			nfp_ctrl_tx(repr->app->ctrl, skb);
189 		}
190 	}
191 	repr_priv->qos_table.netdev_port_id = netdev_port_id;
192 	fl_priv->qos_rate_limiters++;
193 	if (fl_priv->qos_rate_limiters == 1)
194 		schedule_delayed_work(&fl_priv->qos_stats_work,
195 				      NFP_FL_QOS_UPDATE);
196 
197 	return 0;
198 }
199 
200 static int
201 nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
202 			       struct tc_cls_matchall_offload *flow,
203 			       struct netlink_ext_ack *extack)
204 {
205 	struct nfp_flower_priv *fl_priv = app->priv;
206 	struct nfp_flower_repr_priv *repr_priv;
207 	struct nfp_police_config *config;
208 	u32 netdev_port_id, i;
209 	struct nfp_repr *repr;
210 	struct sk_buff *skb;
211 	bool pps_support;
212 
213 	if (!nfp_netdev_is_nfp_repr(netdev)) {
214 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
215 		return -EOPNOTSUPP;
216 	}
217 	repr = netdev_priv(netdev);
218 
219 	netdev_port_id = nfp_repr_get_port_id(netdev);
220 	repr_priv = repr->app_priv;
221 	pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
222 
223 	if (!repr_priv->qos_table.netdev_port_id) {
224 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
225 		return -EOPNOTSUPP;
226 	}
227 
228 	memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
229 	fl_priv->qos_rate_limiters--;
230 	if (!fl_priv->qos_rate_limiters)
231 		cancel_delayed_work_sync(&fl_priv->qos_stats_work);
232 	for (i = 0 ; i < NFP_FL_QOS_TYPE_MAX; i++) {
233 		if (i == NFP_FL_QOS_TYPE_PPS && !pps_support)
234 			break;
235 		/* 0:bps 1:pps
236 		 * Clear QoS data for this interface.
237 		 * There is no need to check if a specific QOS_TYPE was
238 		 * configured as the firmware handles clearing a QoS entry
239 		 * safely, even if it wasn't explicitly added.
240 		 */
241 		skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
242 					    NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
243 		if (!skb)
244 			return -ENOMEM;
245 
246 		config = nfp_flower_cmsg_get_data(skb);
247 		memset(config, 0, sizeof(struct nfp_police_config));
248 		if (i == NFP_FL_QOS_TYPE_PPS)
249 			config->head.flags_opts = cpu_to_be32(NFP_FL_QOS_PPS);
250 		config->head.port = cpu_to_be32(netdev_port_id);
251 		nfp_ctrl_tx(repr->app->ctrl, skb);
252 	}
253 
254 	return 0;
255 }
256 
257 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
258 {
259 	struct nfp_flower_priv *fl_priv = app->priv;
260 	struct nfp_flower_repr_priv *repr_priv;
261 	struct nfp_police_stats_reply *msg;
262 	struct nfp_stat_pair *curr_stats;
263 	struct nfp_stat_pair *prev_stats;
264 	struct net_device *netdev;
265 	struct nfp_repr *repr;
266 	u32 netdev_port_id;
267 
268 	msg = nfp_flower_cmsg_get_data(skb);
269 	netdev_port_id = be32_to_cpu(msg->head.port);
270 	rcu_read_lock();
271 	netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
272 	if (!netdev)
273 		goto exit_unlock_rcu;
274 
275 	repr = netdev_priv(netdev);
276 	repr_priv = repr->app_priv;
277 	curr_stats = &repr_priv->qos_table.curr_stats;
278 	prev_stats = &repr_priv->qos_table.prev_stats;
279 
280 	spin_lock_bh(&fl_priv->qos_stats_lock);
281 	curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
282 			   be64_to_cpu(msg->drop_pkts);
283 	curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
284 			    be64_to_cpu(msg->drop_bytes);
285 
286 	if (!repr_priv->qos_table.last_update) {
287 		prev_stats->pkts = curr_stats->pkts;
288 		prev_stats->bytes = curr_stats->bytes;
289 	}
290 
291 	repr_priv->qos_table.last_update = jiffies;
292 	spin_unlock_bh(&fl_priv->qos_stats_lock);
293 
294 exit_unlock_rcu:
295 	rcu_read_unlock();
296 }
297 
298 static void
299 nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
300 			      u32 netdev_port_id)
301 {
302 	struct nfp_police_cfg_head *head;
303 	struct sk_buff *skb;
304 
305 	skb = nfp_flower_cmsg_alloc(fl_priv->app,
306 				    sizeof(struct nfp_police_cfg_head),
307 				    NFP_FLOWER_CMSG_TYPE_QOS_STATS,
308 				    GFP_ATOMIC);
309 	if (!skb)
310 		return;
311 
312 	head = nfp_flower_cmsg_get_data(skb);
313 	memset(head, 0, sizeof(struct nfp_police_cfg_head));
314 	head->port = cpu_to_be32(netdev_port_id);
315 
316 	nfp_ctrl_tx(fl_priv->app->ctrl, skb);
317 }
318 
319 static void
320 nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
321 {
322 	struct nfp_reprs *repr_set;
323 	int i;
324 
325 	rcu_read_lock();
326 	repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
327 	if (!repr_set)
328 		goto exit_unlock_rcu;
329 
330 	for (i = 0; i < repr_set->num_reprs; i++) {
331 		struct net_device *netdev;
332 
333 		netdev = rcu_dereference(repr_set->reprs[i]);
334 		if (netdev) {
335 			struct nfp_repr *priv = netdev_priv(netdev);
336 			struct nfp_flower_repr_priv *repr_priv;
337 			u32 netdev_port_id;
338 
339 			repr_priv = priv->app_priv;
340 			netdev_port_id = repr_priv->qos_table.netdev_port_id;
341 			if (!netdev_port_id)
342 				continue;
343 
344 			nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
345 		}
346 	}
347 
348 exit_unlock_rcu:
349 	rcu_read_unlock();
350 }
351 
352 static void update_stats_cache(struct work_struct *work)
353 {
354 	struct delayed_work *delayed_work;
355 	struct nfp_flower_priv *fl_priv;
356 
357 	delayed_work = to_delayed_work(work);
358 	fl_priv = container_of(delayed_work, struct nfp_flower_priv,
359 			       qos_stats_work);
360 
361 	nfp_flower_stats_rlim_request_all(fl_priv);
362 	schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
363 }
364 
365 static int
366 nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
367 			      struct tc_cls_matchall_offload *flow,
368 			      struct netlink_ext_ack *extack)
369 {
370 	struct nfp_flower_priv *fl_priv = app->priv;
371 	struct nfp_flower_repr_priv *repr_priv;
372 	struct nfp_stat_pair *curr_stats;
373 	struct nfp_stat_pair *prev_stats;
374 	u64 diff_bytes, diff_pkts;
375 	struct nfp_repr *repr;
376 
377 	if (!nfp_netdev_is_nfp_repr(netdev)) {
378 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
379 		return -EOPNOTSUPP;
380 	}
381 	repr = netdev_priv(netdev);
382 
383 	repr_priv = repr->app_priv;
384 	if (!repr_priv->qos_table.netdev_port_id) {
385 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
386 		return -EOPNOTSUPP;
387 	}
388 
389 	spin_lock_bh(&fl_priv->qos_stats_lock);
390 	curr_stats = &repr_priv->qos_table.curr_stats;
391 	prev_stats = &repr_priv->qos_table.prev_stats;
392 	diff_pkts = curr_stats->pkts - prev_stats->pkts;
393 	diff_bytes = curr_stats->bytes - prev_stats->bytes;
394 	prev_stats->pkts = curr_stats->pkts;
395 	prev_stats->bytes = curr_stats->bytes;
396 	spin_unlock_bh(&fl_priv->qos_stats_lock);
397 
398 	flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0,
399 			  repr_priv->qos_table.last_update,
400 			  FLOW_ACTION_HW_STATS_DELAYED);
401 	return 0;
402 }
403 
404 void nfp_flower_qos_init(struct nfp_app *app)
405 {
406 	struct nfp_flower_priv *fl_priv = app->priv;
407 
408 	spin_lock_init(&fl_priv->qos_stats_lock);
409 	INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
410 }
411 
412 void nfp_flower_qos_cleanup(struct nfp_app *app)
413 {
414 	struct nfp_flower_priv *fl_priv = app->priv;
415 
416 	cancel_delayed_work_sync(&fl_priv->qos_stats_work);
417 }
418 
419 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
420 				 struct tc_cls_matchall_offload *flow)
421 {
422 	struct netlink_ext_ack *extack = flow->common.extack;
423 	struct nfp_flower_priv *fl_priv = app->priv;
424 
425 	if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
426 		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
427 		return -EOPNOTSUPP;
428 	}
429 
430 	switch (flow->command) {
431 	case TC_CLSMATCHALL_REPLACE:
432 		return nfp_flower_install_rate_limiter(app, netdev, flow,
433 						       extack);
434 	case TC_CLSMATCHALL_DESTROY:
435 		return nfp_flower_remove_rate_limiter(app, netdev, flow,
436 						      extack);
437 	case TC_CLSMATCHALL_STATS:
438 		return nfp_flower_stats_rate_limiter(app, netdev, flow,
439 						     extack);
440 	default:
441 		return -EOPNOTSUPP;
442 	}
443 }
444