1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications.  All rights reserved. */
3 
4 #include "cxgb4.h"
5 #include "cxgb4_tc_matchall.h"
6 #include "sched.h"
7 #include "cxgb4_uld.h"
8 #include "cxgb4_filter.h"
9 #include "cxgb4_tc_flower.h"
10 
11 static int cxgb4_matchall_egress_validate(struct net_device *dev,
12 					  struct tc_cls_matchall_offload *cls)
13 {
14 	struct netlink_ext_ack *extack = cls->common.extack;
15 	struct flow_action *actions = &cls->rule->action;
16 	struct port_info *pi = netdev2pinfo(dev);
17 	struct flow_action_entry *entry;
18 	u64 max_link_rate;
19 	u32 i, speed;
20 	int ret;
21 
22 	if (!flow_action_has_entries(actions)) {
23 		NL_SET_ERR_MSG_MOD(extack,
24 				   "Egress MATCHALL offload needs at least 1 policing action");
25 		return -EINVAL;
26 	} else if (!flow_offload_has_one_action(actions)) {
27 		NL_SET_ERR_MSG_MOD(extack,
28 				   "Egress MATCHALL offload only supports 1 policing action");
29 		return -EINVAL;
30 	} else if (pi->tc_block_shared) {
31 		NL_SET_ERR_MSG_MOD(extack,
32 				   "Egress MATCHALL offload not supported with shared blocks");
33 		return -EINVAL;
34 	}
35 
36 	ret = t4_get_link_params(pi, NULL, &speed, NULL);
37 	if (ret) {
38 		NL_SET_ERR_MSG_MOD(extack,
39 				   "Failed to get max speed supported by the link");
40 		return -EINVAL;
41 	}
42 
43 	/* Convert from Mbps to bps */
44 	max_link_rate = (u64)speed * 1000 * 1000;
45 
46 	flow_action_for_each(i, entry, actions) {
47 		switch (entry->id) {
48 		case FLOW_ACTION_POLICE:
49 			/* Convert bytes per second to bits per second */
50 			if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
51 				NL_SET_ERR_MSG_MOD(extack,
52 						   "Specified policing max rate is larger than underlying link speed");
53 				return -ERANGE;
54 			}
55 			break;
56 		default:
57 			NL_SET_ERR_MSG_MOD(extack,
58 					   "Only policing action supported with Egress MATCHALL offload");
59 			return -EOPNOTSUPP;
60 		}
61 	}
62 
63 	return 0;
64 }
65 
66 static int cxgb4_matchall_alloc_tc(struct net_device *dev,
67 				   struct tc_cls_matchall_offload *cls)
68 {
69 	struct ch_sched_params p = {
70 		.type = SCHED_CLASS_TYPE_PACKET,
71 		.u.params.level = SCHED_CLASS_LEVEL_CH_RL,
72 		.u.params.mode = SCHED_CLASS_MODE_CLASS,
73 		.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
74 		.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
75 		.u.params.class = SCHED_CLS_NONE,
76 		.u.params.minrate = 0,
77 		.u.params.weight = 0,
78 		.u.params.pktsize = dev->mtu,
79 	};
80 	struct netlink_ext_ack *extack = cls->common.extack;
81 	struct cxgb4_tc_port_matchall *tc_port_matchall;
82 	struct port_info *pi = netdev2pinfo(dev);
83 	struct adapter *adap = netdev2adap(dev);
84 	struct flow_action_entry *entry;
85 	struct sched_class *e;
86 	u32 i;
87 
88 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
89 
90 	flow_action_for_each(i, entry, &cls->rule->action)
91 		if (entry->id == FLOW_ACTION_POLICE)
92 			break;
93 
94 	/* Convert from bytes per second to Kbps */
95 	p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
96 	p.u.params.channel = pi->tx_chan;
97 	e = cxgb4_sched_class_alloc(dev, &p);
98 	if (!e) {
99 		NL_SET_ERR_MSG_MOD(extack,
100 				   "No free traffic class available for policing action");
101 		return -ENOMEM;
102 	}
103 
104 	tc_port_matchall->egress.hwtc = e->idx;
105 	tc_port_matchall->egress.cookie = cls->cookie;
106 	tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
107 	return 0;
108 }
109 
110 static void cxgb4_matchall_free_tc(struct net_device *dev)
111 {
112 	struct cxgb4_tc_port_matchall *tc_port_matchall;
113 	struct port_info *pi = netdev2pinfo(dev);
114 	struct adapter *adap = netdev2adap(dev);
115 
116 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
117 	cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
118 
119 	tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
120 	tc_port_matchall->egress.cookie = 0;
121 	tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
122 }
123 
124 static int cxgb4_matchall_alloc_filter(struct net_device *dev,
125 				       struct tc_cls_matchall_offload *cls)
126 {
127 	struct netlink_ext_ack *extack = cls->common.extack;
128 	struct cxgb4_tc_port_matchall *tc_port_matchall;
129 	struct port_info *pi = netdev2pinfo(dev);
130 	struct adapter *adap = netdev2adap(dev);
131 	struct ch_filter_specification *fs;
132 	int ret, fidx;
133 
134 	/* Note that TC uses prio 0 to indicate stack to generate
135 	 * automatic prio and hence doesn't pass prio 0 to driver.
136 	 * However, the hardware TCAM index starts from 0. Hence, the
137 	 * -1 here. 1 slot is enough to create a wildcard matchall
138 	 * VIID rule.
139 	 */
140 	if (cls->common.prio <= adap->tids.nftids)
141 		fidx = cls->common.prio - 1;
142 	else
143 		fidx = cxgb4_get_free_ftid(dev, PF_INET);
144 
145 	/* Only insert MATCHALL rule if its priority doesn't conflict
146 	 * with existing rules in the LETCAM.
147 	 */
148 	if (fidx < 0 ||
149 	    !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
150 		NL_SET_ERR_MSG_MOD(extack,
151 				   "No free LETCAM index available");
152 		return -ENOMEM;
153 	}
154 
155 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
156 	fs = &tc_port_matchall->ingress.fs;
157 	memset(fs, 0, sizeof(*fs));
158 
159 	if (fidx < adap->tids.nhpftids)
160 		fs->prio = 1;
161 	fs->tc_prio = cls->common.prio;
162 	fs->tc_cookie = cls->cookie;
163 	fs->hitcnts = 1;
164 
165 	fs->val.pfvf_vld = 1;
166 	fs->val.pf = adap->pf;
167 	fs->val.vf = pi->vin;
168 
169 	cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
170 
171 	ret = cxgb4_set_filter(dev, fidx, fs);
172 	if (ret)
173 		return ret;
174 
175 	tc_port_matchall->ingress.tid = fidx;
176 	tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
177 	return 0;
178 }
179 
180 static int cxgb4_matchall_free_filter(struct net_device *dev)
181 {
182 	struct cxgb4_tc_port_matchall *tc_port_matchall;
183 	struct port_info *pi = netdev2pinfo(dev);
184 	struct adapter *adap = netdev2adap(dev);
185 	int ret;
186 
187 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
188 
189 	ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid,
190 			       &tc_port_matchall->ingress.fs);
191 	if (ret)
192 		return ret;
193 
194 	tc_port_matchall->ingress.packets = 0;
195 	tc_port_matchall->ingress.bytes = 0;
196 	tc_port_matchall->ingress.last_used = 0;
197 	tc_port_matchall->ingress.tid = 0;
198 	tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
199 	return 0;
200 }
201 
202 int cxgb4_tc_matchall_replace(struct net_device *dev,
203 			      struct tc_cls_matchall_offload *cls_matchall,
204 			      bool ingress)
205 {
206 	struct netlink_ext_ack *extack = cls_matchall->common.extack;
207 	struct cxgb4_tc_port_matchall *tc_port_matchall;
208 	struct port_info *pi = netdev2pinfo(dev);
209 	struct adapter *adap = netdev2adap(dev);
210 	int ret;
211 
212 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
213 	if (ingress) {
214 		if (tc_port_matchall->ingress.state ==
215 		    CXGB4_MATCHALL_STATE_ENABLED) {
216 			NL_SET_ERR_MSG_MOD(extack,
217 					   "Only 1 Ingress MATCHALL can be offloaded");
218 			return -ENOMEM;
219 		}
220 
221 		ret = cxgb4_validate_flow_actions(dev,
222 						  &cls_matchall->rule->action);
223 		if (ret)
224 			return ret;
225 
226 		return cxgb4_matchall_alloc_filter(dev, cls_matchall);
227 	}
228 
229 	if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
230 		NL_SET_ERR_MSG_MOD(extack,
231 				   "Only 1 Egress MATCHALL can be offloaded");
232 		return -ENOMEM;
233 	}
234 
235 	ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
236 	if (ret)
237 		return ret;
238 
239 	return cxgb4_matchall_alloc_tc(dev, cls_matchall);
240 }
241 
242 int cxgb4_tc_matchall_destroy(struct net_device *dev,
243 			      struct tc_cls_matchall_offload *cls_matchall,
244 			      bool ingress)
245 {
246 	struct cxgb4_tc_port_matchall *tc_port_matchall;
247 	struct port_info *pi = netdev2pinfo(dev);
248 	struct adapter *adap = netdev2adap(dev);
249 
250 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
251 	if (ingress) {
252 		if (cls_matchall->cookie !=
253 		    tc_port_matchall->ingress.fs.tc_cookie)
254 			return -ENOENT;
255 
256 		return cxgb4_matchall_free_filter(dev);
257 	}
258 
259 	if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
260 		return -ENOENT;
261 
262 	cxgb4_matchall_free_tc(dev);
263 	return 0;
264 }
265 
266 int cxgb4_tc_matchall_stats(struct net_device *dev,
267 			    struct tc_cls_matchall_offload *cls_matchall)
268 {
269 	struct cxgb4_tc_port_matchall *tc_port_matchall;
270 	struct port_info *pi = netdev2pinfo(dev);
271 	struct adapter *adap = netdev2adap(dev);
272 	u64 packets, bytes;
273 	int ret;
274 
275 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
276 	if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
277 		return -ENOENT;
278 
279 	ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid,
280 					&packets, &bytes,
281 					tc_port_matchall->ingress.fs.hash);
282 	if (ret)
283 		return ret;
284 
285 	if (tc_port_matchall->ingress.packets != packets) {
286 		flow_stats_update(&cls_matchall->stats,
287 				  bytes - tc_port_matchall->ingress.bytes,
288 				  packets - tc_port_matchall->ingress.packets,
289 				  tc_port_matchall->ingress.last_used);
290 
291 		tc_port_matchall->ingress.packets = packets;
292 		tc_port_matchall->ingress.bytes = bytes;
293 		tc_port_matchall->ingress.last_used = jiffies;
294 	}
295 
296 	return 0;
297 }
298 
299 static void cxgb4_matchall_disable_offload(struct net_device *dev)
300 {
301 	struct cxgb4_tc_port_matchall *tc_port_matchall;
302 	struct port_info *pi = netdev2pinfo(dev);
303 	struct adapter *adap = netdev2adap(dev);
304 
305 	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
306 	if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
307 		cxgb4_matchall_free_tc(dev);
308 
309 	if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
310 		cxgb4_matchall_free_filter(dev);
311 }
312 
313 int cxgb4_init_tc_matchall(struct adapter *adap)
314 {
315 	struct cxgb4_tc_port_matchall *tc_port_matchall;
316 	struct cxgb4_tc_matchall *tc_matchall;
317 	int ret;
318 
319 	tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
320 	if (!tc_matchall)
321 		return -ENOMEM;
322 
323 	tc_port_matchall = kcalloc(adap->params.nports,
324 				   sizeof(*tc_port_matchall),
325 				   GFP_KERNEL);
326 	if (!tc_port_matchall) {
327 		ret = -ENOMEM;
328 		goto out_free_matchall;
329 	}
330 
331 	tc_matchall->port_matchall = tc_port_matchall;
332 	adap->tc_matchall = tc_matchall;
333 	return 0;
334 
335 out_free_matchall:
336 	kfree(tc_matchall);
337 	return ret;
338 }
339 
340 void cxgb4_cleanup_tc_matchall(struct adapter *adap)
341 {
342 	u8 i;
343 
344 	if (adap->tc_matchall) {
345 		if (adap->tc_matchall->port_matchall) {
346 			for (i = 0; i < adap->params.nports; i++) {
347 				struct net_device *dev = adap->port[i];
348 
349 				if (dev)
350 					cxgb4_matchall_disable_offload(dev);
351 			}
352 			kfree(adap->tc_matchall->port_matchall);
353 		}
354 		kfree(adap->tc_matchall);
355 	}
356 }
357