1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2020, NXP Semiconductors
3  */
4 #include "sja1105.h"
5 #include "sja1105_vl.h"
6 
7 struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
8 				       unsigned long cookie)
9 {
10 	struct sja1105_rule *rule;
11 
12 	list_for_each_entry(rule, &priv->flow_block.rules, list)
13 		if (rule->cookie == cookie)
14 			return rule;
15 
16 	return NULL;
17 }
18 
19 static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
20 {
21 	int i;
22 
23 	for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
24 		if (!priv->flow_block.l2_policer_used[i])
25 			return i;
26 
27 	return -1;
28 }
29 
30 static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
31 				       struct netlink_ext_ack *extack,
32 				       unsigned long cookie, int port,
33 				       u64 rate_bytes_per_sec,
34 				       s64 burst)
35 {
36 	struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
37 	struct sja1105_l2_policing_entry *policing;
38 	bool new_rule = false;
39 	unsigned long p;
40 	int rc;
41 
42 	if (!rule) {
43 		rule = kzalloc(sizeof(*rule), GFP_KERNEL);
44 		if (!rule)
45 			return -ENOMEM;
46 
47 		rule->cookie = cookie;
48 		rule->type = SJA1105_RULE_BCAST_POLICER;
49 		rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
50 		rule->key.type = SJA1105_KEY_BCAST;
51 		new_rule = true;
52 	}
53 
54 	if (rule->bcast_pol.sharindx == -1) {
55 		NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
56 		rc = -ENOSPC;
57 		goto out;
58 	}
59 
60 	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
61 
62 	if (policing[(SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port].sharindx != port) {
63 		NL_SET_ERR_MSG_MOD(extack,
64 				   "Port already has a broadcast policer");
65 		rc = -EEXIST;
66 		goto out;
67 	}
68 
69 	rule->port_mask |= BIT(port);
70 
71 	/* Make the broadcast policers of all ports attached to this block
72 	 * point to the newly allocated policer
73 	 */
74 	for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
75 		int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + p;
76 
77 		policing[bcast].sharindx = rule->bcast_pol.sharindx;
78 	}
79 
80 	policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
81 							  512, 1000000);
82 	policing[rule->bcast_pol.sharindx].smax = div_u64(rate_bytes_per_sec *
83 							  PSCHED_NS2TICKS(burst),
84 							  PSCHED_TICKS_PER_SEC);
85 	/* TODO: support per-flow MTU */
86 	policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
87 						    ETH_FCS_LEN;
88 
89 	rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
90 
91 out:
92 	if (rc == 0 && new_rule) {
93 		priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
94 		list_add(&rule->list, &priv->flow_block.rules);
95 	} else if (new_rule) {
96 		kfree(rule);
97 	}
98 
99 	return rc;
100 }
101 
102 static int sja1105_setup_tc_policer(struct sja1105_private *priv,
103 				    struct netlink_ext_ack *extack,
104 				    unsigned long cookie, int port, int tc,
105 				    u64 rate_bytes_per_sec,
106 				    s64 burst)
107 {
108 	struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
109 	struct sja1105_l2_policing_entry *policing;
110 	bool new_rule = false;
111 	unsigned long p;
112 	int rc;
113 
114 	if (!rule) {
115 		rule = kzalloc(sizeof(*rule), GFP_KERNEL);
116 		if (!rule)
117 			return -ENOMEM;
118 
119 		rule->cookie = cookie;
120 		rule->type = SJA1105_RULE_TC_POLICER;
121 		rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
122 		rule->key.type = SJA1105_KEY_TC;
123 		rule->key.tc.pcp = tc;
124 		new_rule = true;
125 	}
126 
127 	if (rule->tc_pol.sharindx == -1) {
128 		NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
129 		rc = -ENOSPC;
130 		goto out;
131 	}
132 
133 	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
134 
135 	if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
136 		NL_SET_ERR_MSG_MOD(extack,
137 				   "Port-TC pair already has an L2 policer");
138 		rc = -EEXIST;
139 		goto out;
140 	}
141 
142 	rule->port_mask |= BIT(port);
143 
144 	/* Make the policers for traffic class @tc of all ports attached to
145 	 * this block point to the newly allocated policer
146 	 */
147 	for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
148 		int index = (p * SJA1105_NUM_TC) + tc;
149 
150 		policing[index].sharindx = rule->tc_pol.sharindx;
151 	}
152 
153 	policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
154 						       512, 1000000);
155 	policing[rule->tc_pol.sharindx].smax = div_u64(rate_bytes_per_sec *
156 						       PSCHED_NS2TICKS(burst),
157 						       PSCHED_TICKS_PER_SEC);
158 	/* TODO: support per-flow MTU */
159 	policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
160 						 ETH_FCS_LEN;
161 
162 	rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
163 
164 out:
165 	if (rc == 0 && new_rule) {
166 		priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
167 		list_add(&rule->list, &priv->flow_block.rules);
168 	} else if (new_rule) {
169 		kfree(rule);
170 	}
171 
172 	return rc;
173 }
174 
175 static int sja1105_flower_policer(struct sja1105_private *priv, int port,
176 				  struct netlink_ext_ack *extack,
177 				  unsigned long cookie,
178 				  struct sja1105_key *key,
179 				  u64 rate_bytes_per_sec,
180 				  s64 burst)
181 {
182 	switch (key->type) {
183 	case SJA1105_KEY_BCAST:
184 		return sja1105_setup_bcast_policer(priv, extack, cookie, port,
185 						   rate_bytes_per_sec, burst);
186 	case SJA1105_KEY_TC:
187 		return sja1105_setup_tc_policer(priv, extack, cookie, port,
188 						key->tc.pcp, rate_bytes_per_sec,
189 						burst);
190 	default:
191 		NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
192 		return -EOPNOTSUPP;
193 	}
194 }
195 
196 static int sja1105_flower_parse_key(struct sja1105_private *priv,
197 				    struct netlink_ext_ack *extack,
198 				    struct flow_cls_offload *cls,
199 				    struct sja1105_key *key)
200 {
201 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
202 	struct flow_dissector *dissector = rule->match.dissector;
203 	bool is_bcast_dmac = false;
204 	u64 dmac = U64_MAX;
205 	u16 vid = U16_MAX;
206 	u16 pcp = U16_MAX;
207 
208 	if (dissector->used_keys &
209 	    ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
210 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
211 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
212 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
213 		NL_SET_ERR_MSG_MOD(extack,
214 				   "Unsupported keys used");
215 		return -EOPNOTSUPP;
216 	}
217 
218 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
219 		struct flow_match_basic match;
220 
221 		flow_rule_match_basic(rule, &match);
222 		if (match.key->n_proto) {
223 			NL_SET_ERR_MSG_MOD(extack,
224 					   "Matching on protocol not supported");
225 			return -EOPNOTSUPP;
226 		}
227 	}
228 
229 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
230 		u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
231 		u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
232 		struct flow_match_eth_addrs match;
233 
234 		flow_rule_match_eth_addrs(rule, &match);
235 
236 		if (!ether_addr_equal_masked(match.key->src, null,
237 					     match.mask->src)) {
238 			NL_SET_ERR_MSG_MOD(extack,
239 					   "Matching on source MAC not supported");
240 			return -EOPNOTSUPP;
241 		}
242 
243 		if (!ether_addr_equal(match.mask->dst, bcast)) {
244 			NL_SET_ERR_MSG_MOD(extack,
245 					   "Masked matching on MAC not supported");
246 			return -EOPNOTSUPP;
247 		}
248 
249 		dmac = ether_addr_to_u64(match.key->dst);
250 		is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
251 	}
252 
253 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
254 		struct flow_match_vlan match;
255 
256 		flow_rule_match_vlan(rule, &match);
257 
258 		if (match.mask->vlan_id &&
259 		    match.mask->vlan_id != VLAN_VID_MASK) {
260 			NL_SET_ERR_MSG_MOD(extack,
261 					   "Masked matching on VID is not supported");
262 			return -EOPNOTSUPP;
263 		}
264 
265 		if (match.mask->vlan_priority &&
266 		    match.mask->vlan_priority != 0x7) {
267 			NL_SET_ERR_MSG_MOD(extack,
268 					   "Masked matching on PCP is not supported");
269 			return -EOPNOTSUPP;
270 		}
271 
272 		if (match.mask->vlan_id)
273 			vid = match.key->vlan_id;
274 		if (match.mask->vlan_priority)
275 			pcp = match.key->vlan_priority;
276 	}
277 
278 	if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
279 		key->type = SJA1105_KEY_BCAST;
280 		return 0;
281 	}
282 	if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
283 		key->type = SJA1105_KEY_TC;
284 		key->tc.pcp = pcp;
285 		return 0;
286 	}
287 	if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
288 		key->type = SJA1105_KEY_VLAN_AWARE_VL;
289 		key->vl.dmac = dmac;
290 		key->vl.vid = vid;
291 		key->vl.pcp = pcp;
292 		return 0;
293 	}
294 	if (dmac != U64_MAX) {
295 		key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
296 		key->vl.dmac = dmac;
297 		return 0;
298 	}
299 
300 	NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
301 	return -EOPNOTSUPP;
302 }
303 
304 int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
305 			   struct flow_cls_offload *cls, bool ingress)
306 {
307 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
308 	struct netlink_ext_ack *extack = cls->common.extack;
309 	struct sja1105_private *priv = ds->priv;
310 	const struct flow_action_entry *act;
311 	unsigned long cookie = cls->cookie;
312 	bool routing_rule = false;
313 	struct sja1105_key key;
314 	bool gate_rule = false;
315 	bool vl_rule = false;
316 	int rc, i;
317 
318 	rc = sja1105_flower_parse_key(priv, extack, cls, &key);
319 	if (rc)
320 		return rc;
321 
322 	rc = -EOPNOTSUPP;
323 
324 	flow_action_for_each(i, act, &rule->action) {
325 		switch (act->id) {
326 		case FLOW_ACTION_POLICE:
327 			rc = sja1105_flower_policer(priv, port, extack, cookie,
328 						    &key,
329 						    act->police.rate_bytes_ps,
330 						    act->police.burst);
331 			if (rc)
332 				goto out;
333 			break;
334 		case FLOW_ACTION_TRAP: {
335 			int cpu = dsa_upstream_port(ds, port);
336 
337 			routing_rule = true;
338 			vl_rule = true;
339 
340 			rc = sja1105_vl_redirect(priv, port, extack, cookie,
341 						 &key, BIT(cpu), true);
342 			if (rc)
343 				goto out;
344 			break;
345 		}
346 		case FLOW_ACTION_REDIRECT: {
347 			struct dsa_port *to_dp;
348 
349 			to_dp = dsa_port_from_netdev(act->dev);
350 			if (IS_ERR(to_dp)) {
351 				NL_SET_ERR_MSG_MOD(extack,
352 						   "Destination not a switch port");
353 				return -EOPNOTSUPP;
354 			}
355 
356 			routing_rule = true;
357 			vl_rule = true;
358 
359 			rc = sja1105_vl_redirect(priv, port, extack, cookie,
360 						 &key, BIT(to_dp->index), true);
361 			if (rc)
362 				goto out;
363 			break;
364 		}
365 		case FLOW_ACTION_DROP:
366 			vl_rule = true;
367 
368 			rc = sja1105_vl_redirect(priv, port, extack, cookie,
369 						 &key, 0, false);
370 			if (rc)
371 				goto out;
372 			break;
373 		case FLOW_ACTION_GATE:
374 			gate_rule = true;
375 			vl_rule = true;
376 
377 			rc = sja1105_vl_gate(priv, port, extack, cookie,
378 					     &key, act->gate.index,
379 					     act->gate.prio,
380 					     act->gate.basetime,
381 					     act->gate.cycletime,
382 					     act->gate.cycletimeext,
383 					     act->gate.num_entries,
384 					     act->gate.entries);
385 			if (rc)
386 				goto out;
387 			break;
388 		default:
389 			NL_SET_ERR_MSG_MOD(extack,
390 					   "Action not supported");
391 			rc = -EOPNOTSUPP;
392 			goto out;
393 		}
394 	}
395 
396 	if (vl_rule && !rc) {
397 		/* Delay scheduling configuration until DESTPORTS has been
398 		 * populated by all other actions.
399 		 */
400 		if (gate_rule) {
401 			if (!routing_rule) {
402 				NL_SET_ERR_MSG_MOD(extack,
403 						   "Can only offload gate action together with redirect or trap");
404 				return -EOPNOTSUPP;
405 			}
406 			rc = sja1105_init_scheduling(priv);
407 			if (rc)
408 				goto out;
409 		}
410 
411 		rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
412 	}
413 
414 out:
415 	return rc;
416 }
417 
418 int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
419 			   struct flow_cls_offload *cls, bool ingress)
420 {
421 	struct sja1105_private *priv = ds->priv;
422 	struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
423 	struct sja1105_l2_policing_entry *policing;
424 	int old_sharindx;
425 
426 	if (!rule)
427 		return 0;
428 
429 	if (rule->type == SJA1105_RULE_VL)
430 		return sja1105_vl_delete(priv, port, rule, cls->common.extack);
431 
432 	policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
433 
434 	if (rule->type == SJA1105_RULE_BCAST_POLICER) {
435 		int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
436 
437 		old_sharindx = policing[bcast].sharindx;
438 		policing[bcast].sharindx = port;
439 	} else if (rule->type == SJA1105_RULE_TC_POLICER) {
440 		int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
441 
442 		old_sharindx = policing[index].sharindx;
443 		policing[index].sharindx = port;
444 	} else {
445 		return -EINVAL;
446 	}
447 
448 	rule->port_mask &= ~BIT(port);
449 	if (!rule->port_mask) {
450 		priv->flow_block.l2_policer_used[old_sharindx] = false;
451 		list_del(&rule->list);
452 		kfree(rule);
453 	}
454 
455 	return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
456 }
457 
458 int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
459 			     struct flow_cls_offload *cls, bool ingress)
460 {
461 	struct sja1105_private *priv = ds->priv;
462 	struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
463 	int rc;
464 
465 	if (!rule)
466 		return 0;
467 
468 	if (rule->type != SJA1105_RULE_VL)
469 		return 0;
470 
471 	rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
472 			      cls->common.extack);
473 	if (rc)
474 		return rc;
475 
476 	return 0;
477 }
478 
479 void sja1105_flower_setup(struct dsa_switch *ds)
480 {
481 	struct sja1105_private *priv = ds->priv;
482 	int port;
483 
484 	INIT_LIST_HEAD(&priv->flow_block.rules);
485 
486 	for (port = 0; port < SJA1105_NUM_PORTS; port++)
487 		priv->flow_block.l2_policer_used[port] = true;
488 }
489 
490 void sja1105_flower_teardown(struct dsa_switch *ds)
491 {
492 	struct sja1105_private *priv = ds->priv;
493 	struct sja1105_rule *rule;
494 	struct list_head *pos, *n;
495 
496 	list_for_each_safe(pos, n, &priv->flow_block.rules) {
497 		rule = list_entry(pos, struct sja1105_rule, list);
498 		list_del(&rule->list);
499 		kfree(rule);
500 	}
501 }
502