1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications.  All rights reserved. */
3 
4 #include "cxgb4.h"
5 #include "cxgb4_tc_matchall.h"
6 #include "sched.h"
7 #include "cxgb4_uld.h"
8 #include "cxgb4_filter.h"
9 #include "cxgb4_tc_flower.h"
10 
11 static int cxgb4_matchall_egress_validate(struct net_device *dev,
12 					  struct tc_cls_matchall_offload *cls)
13 {
14 	struct netlink_ext_ack *extack = cls->common.extack;
15 	struct flow_action *actions = &cls->rule->action;
16 	struct port_info *pi = netdev2pinfo(dev);
17 	struct flow_action_entry *entry;
18 	struct ch_sched_queue qe;
19 	struct sched_class *e;
20 	u64 max_link_rate;
21 	u32 i, speed;
22 	int ret;
23 
24 	if (!flow_action_has_entries(actions)) {
25 		NL_SET_ERR_MSG_MOD(extack,
26 				   "Egress MATCHALL offload needs at least 1 policing action");
27 		return -EINVAL;
28 	} else if (!flow_offload_has_one_action(actions)) {
29 		NL_SET_ERR_MSG_MOD(extack,
30 				   "Egress MATCHALL offload only supports 1 policing action");
31 		return -EINVAL;
32 	} else if (pi->tc_block_shared) {
33 		NL_SET_ERR_MSG_MOD(extack,
34 				   "Egress MATCHALL offload not supported with shared blocks");
35 		return -EINVAL;
36 	}
37 
38 	ret = t4_get_link_params(pi, NULL, &speed, NULL);
39 	if (ret) {
40 		NL_SET_ERR_MSG_MOD(extack,
41 				   "Failed to get max speed supported by the link");
42 		return -EINVAL;
43 	}
44 
45 	/* Convert from Mbps to bps */
46 	max_link_rate = (u64)speed * 1000 * 1000;
47 
48 	flow_action_for_each(i, entry, actions) {
49 		switch (entry->id) {
50 		case FLOW_ACTION_POLICE:
51 			if (entry->police.rate_pkt_ps) {
52 				NL_SET_ERR_MSG_MOD(extack,
53 						   "QoS offload not support packets per second");
54 				return -EOPNOTSUPP;
55 			}
56 			/* Convert bytes per second to bits per second */
57 			if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
58 				NL_SET_ERR_MSG_MOD(extack,
59 						   "Specified policing max rate is larger than underlying link speed");
60 				return -ERANGE;
61 			}
62 			break;
63 		default:
64 			NL_SET_ERR_MSG_MOD(extack,
65 					   "Only policing action supported with Egress MATCHALL offload");
66 			return -EOPNOTSUPP;
67 		}
68 	}
69 
70 	for (i = 0; i < pi->nqsets; i++) {
71 		memset(&qe, 0, sizeof(qe));
72 		qe.queue = i;
73 
74 		e = cxgb4_sched_queue_lookup(dev, &qe);
75 		if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
76 			NL_SET_ERR_MSG_MOD(extack,
77 					   "Some queues are already bound to different class");
78 			return -EBUSY;
79 		}
80 	}
81 
82 	return 0;
83 }
84 
85 static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
86 {
87 	struct port_info *pi = netdev2pinfo(dev);
88 	struct ch_sched_queue qe;
89 	int ret;
90 	u32 i;
91 
92 	for (i = 0; i < pi->nqsets; i++) {
93 		qe.queue = i;
94 		qe.class = tc;
95 		ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
96 		if (ret)
97 			goto out_free;
98 	}
99 
100 	return 0;
101 
102 out_free:
103 	while (i--) {
104 		qe.queue = i;
105 		qe.class = SCHED_CLS_NONE;
106 		cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
107 	}
108 
109 	return ret;
110 }
111 
112 static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
113 {
114 	struct port_info *pi = netdev2pinfo(dev);
115 	struct ch_sched_queue qe;
116 	u32 i;
117 
118 	for (i = 0; i < pi->nqsets; i++) {
119 		qe.queue = i;
120 		qe.class = SCHED_CLS_NONE;
121 		cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
122 	}
123 }
124 
125 static int cxgb4_matchall_alloc_tc(struct net_device *dev,
126 				   struct tc_cls_matchall_offload *cls)
127 {
128 	struct ch_sched_params p = {
129 		.type = SCHED_CLASS_TYPE_PACKET,
130 		.u.params.level = SCHED_CLASS_LEVEL_CH_RL,
131 		.u.params.mode = SCHED_CLASS_MODE_CLASS,
132 		.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
133 		.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
134 		.u.params.class = SCHED_CLS_NONE,
135 		.u.params.minrate = 0,
136 		.u.params.weight = 0,
137 		.u.params.pktsize = dev->mtu,
138 	};
139 	struct netlink_ext_ack *extack = cls->common.extack;
140 	struct cxgb4_tc_port_matchall *tc_port_matchall;
141 	struct port_info *pi = netdev2pinfo(dev);
142 	struct adapter *adap = netdev2adap(dev);
143 	struct flow_action_entry *entry;
144 	struct sched_class *e;
145 	int ret;
146 	u32 i;
147 
148 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
149 
150 	flow_action_for_each(i, entry, &cls->rule->action)
151 		if (entry->id == FLOW_ACTION_POLICE)
152 			break;
153 	if (entry->police.rate_pkt_ps) {
154 		NL_SET_ERR_MSG_MOD(extack,
155 				   "QoS offload not support packets per second");
156 		return -EOPNOTSUPP;
157 	}
158 	/* Convert from bytes per second to Kbps */
159 	p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
160 	p.u.params.channel = pi->tx_chan;
161 	e = cxgb4_sched_class_alloc(dev, &p);
162 	if (!e) {
163 		NL_SET_ERR_MSG_MOD(extack,
164 				   "No free traffic class available for policing action");
165 		return -ENOMEM;
166 	}
167 
168 	ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
169 	if (ret) {
170 		NL_SET_ERR_MSG_MOD(extack,
171 				   "Could not bind queues to traffic class");
172 		goto out_free;
173 	}
174 
175 	tc_port_matchall->egress.hwtc = e->idx;
176 	tc_port_matchall->egress.cookie = cls->cookie;
177 	tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
178 	return 0;
179 
180 out_free:
181 	cxgb4_sched_class_free(dev, e->idx);
182 	return ret;
183 }
184 
185 static void cxgb4_matchall_free_tc(struct net_device *dev)
186 {
187 	struct cxgb4_tc_port_matchall *tc_port_matchall;
188 	struct port_info *pi = netdev2pinfo(dev);
189 	struct adapter *adap = netdev2adap(dev);
190 
191 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
192 	cxgb4_matchall_tc_unbind_queues(dev);
193 	cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
194 
195 	tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
196 	tc_port_matchall->egress.cookie = 0;
197 	tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
198 }
199 
200 static int cxgb4_matchall_mirror_alloc(struct net_device *dev,
201 				       struct tc_cls_matchall_offload *cls)
202 {
203 	struct netlink_ext_ack *extack = cls->common.extack;
204 	struct cxgb4_tc_port_matchall *tc_port_matchall;
205 	struct port_info *pi = netdev2pinfo(dev);
206 	struct adapter *adap = netdev2adap(dev);
207 	struct flow_action_entry *act;
208 	int ret;
209 	u32 i;
210 
211 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
212 	flow_action_for_each(i, act, &cls->rule->action) {
213 		if (act->id == FLOW_ACTION_MIRRED) {
214 			ret = cxgb4_port_mirror_alloc(dev);
215 			if (ret) {
216 				NL_SET_ERR_MSG_MOD(extack,
217 						   "Couldn't allocate mirror");
218 				return ret;
219 			}
220 
221 			tc_port_matchall->ingress.viid_mirror = pi->viid_mirror;
222 			break;
223 		}
224 	}
225 
226 	return 0;
227 }
228 
229 static void cxgb4_matchall_mirror_free(struct net_device *dev)
230 {
231 	struct cxgb4_tc_port_matchall *tc_port_matchall;
232 	struct port_info *pi = netdev2pinfo(dev);
233 	struct adapter *adap = netdev2adap(dev);
234 
235 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
236 	if (!tc_port_matchall->ingress.viid_mirror)
237 		return;
238 
239 	cxgb4_port_mirror_free(dev);
240 	tc_port_matchall->ingress.viid_mirror = 0;
241 }
242 
243 static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
244 {
245 	struct cxgb4_tc_port_matchall *tc_port_matchall;
246 	struct port_info *pi = netdev2pinfo(dev);
247 	struct adapter *adap = netdev2adap(dev);
248 	int ret;
249 
250 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
251 	ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
252 			       &tc_port_matchall->ingress.fs[filter_type]);
253 	if (ret)
254 		return ret;
255 
256 	tc_port_matchall->ingress.tid[filter_type] = 0;
257 	return 0;
258 }
259 
260 static int cxgb4_matchall_add_filter(struct net_device *dev,
261 				     struct tc_cls_matchall_offload *cls,
262 				     u8 filter_type)
263 {
264 	struct netlink_ext_ack *extack = cls->common.extack;
265 	struct cxgb4_tc_port_matchall *tc_port_matchall;
266 	struct port_info *pi = netdev2pinfo(dev);
267 	struct adapter *adap = netdev2adap(dev);
268 	struct ch_filter_specification *fs;
269 	int ret, fidx;
270 
271 	/* Get a free filter entry TID, where we can insert this new
272 	 * rule. Only insert rule if its prio doesn't conflict with
273 	 * existing rules.
274 	 */
275 	fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
276 				   false, cls->common.prio);
277 	if (fidx < 0) {
278 		NL_SET_ERR_MSG_MOD(extack,
279 				   "No free LETCAM index available");
280 		return -ENOMEM;
281 	}
282 
283 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
284 	fs = &tc_port_matchall->ingress.fs[filter_type];
285 	memset(fs, 0, sizeof(*fs));
286 
287 	if (fidx < adap->tids.nhpftids)
288 		fs->prio = 1;
289 	fs->tc_prio = cls->common.prio;
290 	fs->tc_cookie = cls->cookie;
291 	fs->type = filter_type;
292 	fs->hitcnts = 1;
293 
294 	fs->val.pfvf_vld = 1;
295 	fs->val.pf = adap->pf;
296 	fs->val.vf = pi->vin;
297 
298 	cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
299 
300 	ret = cxgb4_set_filter(dev, fidx, fs);
301 	if (ret)
302 		return ret;
303 
304 	tc_port_matchall->ingress.tid[filter_type] = fidx;
305 	return 0;
306 }
307 
308 static int cxgb4_matchall_alloc_filter(struct net_device *dev,
309 				       struct tc_cls_matchall_offload *cls)
310 {
311 	struct cxgb4_tc_port_matchall *tc_port_matchall;
312 	struct port_info *pi = netdev2pinfo(dev);
313 	struct adapter *adap = netdev2adap(dev);
314 	int ret, i;
315 
316 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
317 
318 	ret = cxgb4_matchall_mirror_alloc(dev, cls);
319 	if (ret)
320 		return ret;
321 
322 	for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
323 		ret = cxgb4_matchall_add_filter(dev, cls, i);
324 		if (ret)
325 			goto out_free;
326 	}
327 
328 	tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
329 	return 0;
330 
331 out_free:
332 	while (i-- > 0)
333 		cxgb4_matchall_del_filter(dev, i);
334 
335 	cxgb4_matchall_mirror_free(dev);
336 	return ret;
337 }
338 
339 static int cxgb4_matchall_free_filter(struct net_device *dev)
340 {
341 	struct cxgb4_tc_port_matchall *tc_port_matchall;
342 	struct port_info *pi = netdev2pinfo(dev);
343 	struct adapter *adap = netdev2adap(dev);
344 	int ret;
345 	u8 i;
346 
347 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
348 
349 	for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
350 		ret = cxgb4_matchall_del_filter(dev, i);
351 		if (ret)
352 			return ret;
353 	}
354 
355 	cxgb4_matchall_mirror_free(dev);
356 
357 	tc_port_matchall->ingress.packets = 0;
358 	tc_port_matchall->ingress.bytes = 0;
359 	tc_port_matchall->ingress.last_used = 0;
360 	tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
361 	return 0;
362 }
363 
364 int cxgb4_tc_matchall_replace(struct net_device *dev,
365 			      struct tc_cls_matchall_offload *cls_matchall,
366 			      bool ingress)
367 {
368 	struct netlink_ext_ack *extack = cls_matchall->common.extack;
369 	struct cxgb4_tc_port_matchall *tc_port_matchall;
370 	struct port_info *pi = netdev2pinfo(dev);
371 	struct adapter *adap = netdev2adap(dev);
372 	int ret;
373 
374 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
375 	if (ingress) {
376 		if (tc_port_matchall->ingress.state ==
377 		    CXGB4_MATCHALL_STATE_ENABLED) {
378 			NL_SET_ERR_MSG_MOD(extack,
379 					   "Only 1 Ingress MATCHALL can be offloaded");
380 			return -ENOMEM;
381 		}
382 
383 		ret = cxgb4_validate_flow_actions(dev,
384 						  &cls_matchall->rule->action,
385 						  extack, 1);
386 		if (ret)
387 			return ret;
388 
389 		return cxgb4_matchall_alloc_filter(dev, cls_matchall);
390 	}
391 
392 	if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
393 		NL_SET_ERR_MSG_MOD(extack,
394 				   "Only 1 Egress MATCHALL can be offloaded");
395 		return -ENOMEM;
396 	}
397 
398 	ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
399 	if (ret)
400 		return ret;
401 
402 	return cxgb4_matchall_alloc_tc(dev, cls_matchall);
403 }
404 
405 int cxgb4_tc_matchall_destroy(struct net_device *dev,
406 			      struct tc_cls_matchall_offload *cls_matchall,
407 			      bool ingress)
408 {
409 	struct cxgb4_tc_port_matchall *tc_port_matchall;
410 	struct port_info *pi = netdev2pinfo(dev);
411 	struct adapter *adap = netdev2adap(dev);
412 
413 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
414 	if (ingress) {
415 		/* All the filter types of this matchall rule save the
416 		 * same cookie. So, checking for the first one is
417 		 * enough.
418 		 */
419 		if (cls_matchall->cookie !=
420 		    tc_port_matchall->ingress.fs[0].tc_cookie)
421 			return -ENOENT;
422 
423 		return cxgb4_matchall_free_filter(dev);
424 	}
425 
426 	if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
427 		return -ENOENT;
428 
429 	cxgb4_matchall_free_tc(dev);
430 	return 0;
431 }
432 
433 int cxgb4_tc_matchall_stats(struct net_device *dev,
434 			    struct tc_cls_matchall_offload *cls_matchall)
435 {
436 	u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
437 	struct cxgb4_tc_port_matchall *tc_port_matchall;
438 	struct cxgb4_matchall_ingress_entry *ingress;
439 	struct port_info *pi = netdev2pinfo(dev);
440 	struct adapter *adap = netdev2adap(dev);
441 	int ret;
442 	u8 i;
443 
444 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
445 	if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
446 		return -ENOENT;
447 
448 	ingress = &tc_port_matchall->ingress;
449 	for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
450 		ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
451 						&tmp_packets, &tmp_bytes,
452 						ingress->fs[i].hash);
453 		if (ret)
454 			return ret;
455 
456 		packets += tmp_packets;
457 		bytes += tmp_bytes;
458 	}
459 
460 	if (tc_port_matchall->ingress.packets != packets) {
461 		flow_stats_update(&cls_matchall->stats,
462 				  bytes - tc_port_matchall->ingress.bytes,
463 				  packets - tc_port_matchall->ingress.packets,
464 				  0, tc_port_matchall->ingress.last_used,
465 				  FLOW_ACTION_HW_STATS_IMMEDIATE);
466 
467 		tc_port_matchall->ingress.packets = packets;
468 		tc_port_matchall->ingress.bytes = bytes;
469 		tc_port_matchall->ingress.last_used = jiffies;
470 	}
471 
472 	return 0;
473 }
474 
475 static void cxgb4_matchall_disable_offload(struct net_device *dev)
476 {
477 	struct cxgb4_tc_port_matchall *tc_port_matchall;
478 	struct port_info *pi = netdev2pinfo(dev);
479 	struct adapter *adap = netdev2adap(dev);
480 
481 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
482 	if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
483 		cxgb4_matchall_free_tc(dev);
484 
485 	if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
486 		cxgb4_matchall_free_filter(dev);
487 }
488 
489 int cxgb4_init_tc_matchall(struct adapter *adap)
490 {
491 	struct cxgb4_tc_port_matchall *tc_port_matchall;
492 	struct cxgb4_tc_matchall *tc_matchall;
493 	int ret;
494 
495 	tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
496 	if (!tc_matchall)
497 		return -ENOMEM;
498 
499 	tc_port_matchall = kcalloc(adap->params.nports,
500 				   sizeof(*tc_port_matchall),
501 				   GFP_KERNEL);
502 	if (!tc_port_matchall) {
503 		ret = -ENOMEM;
504 		goto out_free_matchall;
505 	}
506 
507 	tc_matchall->port_matchall = tc_port_matchall;
508 	adap->tc_matchall = tc_matchall;
509 	return 0;
510 
511 out_free_matchall:
512 	kfree(tc_matchall);
513 	return ret;
514 }
515 
516 void cxgb4_cleanup_tc_matchall(struct adapter *adap)
517 {
518 	u8 i;
519 
520 	if (adap->tc_matchall) {
521 		if (adap->tc_matchall->port_matchall) {
522 			for (i = 0; i < adap->params.nports; i++) {
523 				struct net_device *dev = adap->port[i];
524 
525 				if (dev)
526 					cxgb4_matchall_disable_offload(dev);
527 			}
528 			kfree(adap->tc_matchall->port_matchall);
529 		}
530 		kfree(adap->tc_matchall);
531 	}
532 }
533