1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DPAA2 Ethernet Switch flower support
4  *
5  * Copyright 2021 NXP
6  *
7  */
8 
9 #include "dpaa2-switch.h"
10 
11 static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
12 					 struct dpsw_acl_key *acl_key)
13 {
14 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
15 	struct flow_dissector *dissector = rule->match.dissector;
16 	struct netlink_ext_ack *extack = cls->common.extack;
17 	struct dpsw_acl_fields *acl_h, *acl_m;
18 
19 	if (dissector->used_keys &
20 	    ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
21 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
22 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
23 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
24 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
25 	      BIT(FLOW_DISSECTOR_KEY_IP) |
26 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
27 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
28 		NL_SET_ERR_MSG_MOD(extack,
29 				   "Unsupported keys used");
30 		return -EOPNOTSUPP;
31 	}
32 
33 	acl_h = &acl_key->match;
34 	acl_m = &acl_key->mask;
35 
36 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
37 		struct flow_match_basic match;
38 
39 		flow_rule_match_basic(rule, &match);
40 		acl_h->l3_protocol = match.key->ip_proto;
41 		acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
42 		acl_m->l3_protocol = match.mask->ip_proto;
43 		acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
44 	}
45 
46 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
47 		struct flow_match_eth_addrs match;
48 
49 		flow_rule_match_eth_addrs(rule, &match);
50 		ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
51 		ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
52 		ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
53 		ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
54 	}
55 
56 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
57 		struct flow_match_vlan match;
58 
59 		flow_rule_match_vlan(rule, &match);
60 		acl_h->l2_vlan_id = match.key->vlan_id;
61 		acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
62 		acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
63 				    match.key->vlan_dei;
64 
65 		acl_m->l2_vlan_id = match.mask->vlan_id;
66 		acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
67 		acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
68 				    match.mask->vlan_dei;
69 	}
70 
71 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
72 		struct flow_match_ipv4_addrs match;
73 
74 		flow_rule_match_ipv4_addrs(rule, &match);
75 		acl_h->l3_source_ip = be32_to_cpu(match.key->src);
76 		acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
77 		acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
78 		acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
79 	}
80 
81 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
82 		struct flow_match_ports match;
83 
84 		flow_rule_match_ports(rule, &match);
85 		acl_h->l4_source_port = be16_to_cpu(match.key->src);
86 		acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
87 		acl_m->l4_source_port = be16_to_cpu(match.mask->src);
88 		acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
89 	}
90 
91 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
92 		struct flow_match_ip match;
93 
94 		flow_rule_match_ip(rule, &match);
95 		if (match.mask->ttl != 0) {
96 			NL_SET_ERR_MSG_MOD(extack,
97 					   "Matching on TTL not supported");
98 			return -EOPNOTSUPP;
99 		}
100 
101 		if ((match.mask->tos & 0x3) != 0) {
102 			NL_SET_ERR_MSG_MOD(extack,
103 					   "Matching on ECN not supported, only DSCP");
104 			return -EOPNOTSUPP;
105 		}
106 
107 		acl_h->l3_dscp = match.key->tos >> 2;
108 		acl_m->l3_dscp = match.mask->tos >> 2;
109 	}
110 
111 	return 0;
112 }
113 
114 int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
115 			       struct dpaa2_switch_acl_entry *entry)
116 {
117 	struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
118 	struct ethsw_core *ethsw = acl_tbl->ethsw;
119 	struct dpsw_acl_key *acl_key = &entry->key;
120 	struct device *dev = ethsw->dev;
121 	u8 *cmd_buff;
122 	int err;
123 
124 	cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
125 	if (!cmd_buff)
126 		return -ENOMEM;
127 
128 	dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
129 
130 	acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
131 						 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
132 						 DMA_TO_DEVICE);
133 	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
134 		dev_err(dev, "DMA mapping failed\n");
135 		return -EFAULT;
136 	}
137 
138 	err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
139 				 acl_tbl->id, acl_entry_cfg);
140 
141 	dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
142 			 DMA_TO_DEVICE);
143 	if (err) {
144 		dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
145 		return err;
146 	}
147 
148 	kfree(cmd_buff);
149 
150 	return 0;
151 }
152 
153 static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
154 					 struct dpaa2_switch_acl_entry *entry)
155 {
156 	struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
157 	struct dpsw_acl_key *acl_key = &entry->key;
158 	struct ethsw_core *ethsw = acl_tbl->ethsw;
159 	struct device *dev = ethsw->dev;
160 	u8 *cmd_buff;
161 	int err;
162 
163 	cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
164 	if (!cmd_buff)
165 		return -ENOMEM;
166 
167 	dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
168 
169 	acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
170 						 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
171 						 DMA_TO_DEVICE);
172 	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
173 		dev_err(dev, "DMA mapping failed\n");
174 		return -EFAULT;
175 	}
176 
177 	err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
178 				    acl_tbl->id, acl_entry_cfg);
179 
180 	dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
181 			 DMA_TO_DEVICE);
182 	if (err) {
183 		dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
184 		return err;
185 	}
186 
187 	kfree(cmd_buff);
188 
189 	return 0;
190 }
191 
192 static int
193 dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl,
194 				   struct dpaa2_switch_acl_entry *entry)
195 {
196 	struct dpaa2_switch_acl_entry *tmp;
197 	struct list_head *pos, *n;
198 	int index = 0;
199 
200 	if (list_empty(&acl_tbl->entries)) {
201 		list_add(&entry->list, &acl_tbl->entries);
202 		return index;
203 	}
204 
205 	list_for_each_safe(pos, n, &acl_tbl->entries) {
206 		tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
207 		if (entry->prio < tmp->prio)
208 			break;
209 		index++;
210 	}
211 	list_add(&entry->list, pos->prev);
212 	return index;
213 }
214 
215 static struct dpaa2_switch_acl_entry*
216 dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl,
217 				    int index)
218 {
219 	struct dpaa2_switch_acl_entry *tmp;
220 	int i = 0;
221 
222 	list_for_each_entry(tmp, &acl_tbl->entries, list) {
223 		if (i == index)
224 			return tmp;
225 		++i;
226 	}
227 
228 	return NULL;
229 }
230 
231 static int
232 dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl,
233 				      struct dpaa2_switch_acl_entry *entry,
234 				      int precedence)
235 {
236 	int err;
237 
238 	err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
239 	if (err)
240 		return err;
241 
242 	entry->cfg.precedence = precedence;
243 	return dpaa2_switch_acl_entry_add(acl_tbl, entry);
244 }
245 
246 static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
247 					  struct dpaa2_switch_acl_entry *entry)
248 {
249 	struct dpaa2_switch_acl_entry *tmp;
250 	int index, i, precedence, err;
251 
252 	/* Add the new ACL entry to the linked list and get its index */
253 	index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry);
254 
255 	/* Move up in priority the ACL entries to make space
256 	 * for the new filter.
257 	 */
258 	precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1;
259 	for (i = 0; i < index; i++) {
260 		tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
261 
262 		err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
263 							    precedence);
264 		if (err)
265 			return err;
266 
267 		precedence++;
268 	}
269 
270 	/* Add the new entry to hardware */
271 	entry->cfg.precedence = precedence;
272 	err = dpaa2_switch_acl_entry_add(acl_tbl, entry);
273 	acl_tbl->num_rules++;
274 
275 	return err;
276 }
277 
278 static struct dpaa2_switch_acl_entry *
279 dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl,
280 					  unsigned long cookie)
281 {
282 	struct dpaa2_switch_acl_entry *tmp, *n;
283 
284 	list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
285 		if (tmp->cookie == cookie)
286 			return tmp;
287 	}
288 	return NULL;
289 }
290 
291 static int
292 dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl,
293 				 struct dpaa2_switch_acl_entry *entry)
294 {
295 	struct dpaa2_switch_acl_entry *tmp, *n;
296 	int index = 0;
297 
298 	list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
299 		if (tmp->cookie == entry->cookie)
300 			return index;
301 		index++;
302 	}
303 	return -ENOENT;
304 }
305 
306 static int
307 dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
308 				  struct dpaa2_switch_acl_entry *entry)
309 {
310 	struct dpaa2_switch_acl_entry *tmp;
311 	int index, i, precedence, err;
312 
313 	index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry);
314 
315 	/* Remove from hardware the ACL entry */
316 	err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
317 	if (err)
318 		return err;
319 
320 	acl_tbl->num_rules--;
321 
322 	/* Remove it from the list also */
323 	list_del(&entry->list);
324 
325 	/* Move down in priority the entries over the deleted one */
326 	precedence = entry->cfg.precedence;
327 	for (i = index - 1; i >= 0; i--) {
328 		tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
329 		err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
330 							    precedence);
331 		if (err)
332 			return err;
333 
334 		precedence--;
335 	}
336 
337 	kfree(entry);
338 
339 	return 0;
340 }
341 
342 static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw,
343 					struct flow_action_entry *cls_act,
344 					struct dpsw_acl_result *dpsw_act,
345 					struct netlink_ext_ack *extack)
346 {
347 	int err = 0;
348 
349 	switch (cls_act->id) {
350 	case FLOW_ACTION_TRAP:
351 		dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
352 		break;
353 	case FLOW_ACTION_REDIRECT:
354 		if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
355 			NL_SET_ERR_MSG_MOD(extack,
356 					   "Destination not a DPAA2 switch port");
357 			return -EOPNOTSUPP;
358 		}
359 
360 		dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
361 		dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
362 		break;
363 	case FLOW_ACTION_DROP:
364 		dpsw_act->action = DPSW_ACL_ACTION_DROP;
365 		break;
366 	default:
367 		NL_SET_ERR_MSG_MOD(extack,
368 				   "Action not supported");
369 		err = -EOPNOTSUPP;
370 		goto out;
371 	}
372 
373 out:
374 	return err;
375 }
376 
377 int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
378 				    struct flow_cls_offload *cls)
379 {
380 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
381 	struct netlink_ext_ack *extack = cls->common.extack;
382 	struct ethsw_core *ethsw = acl_tbl->ethsw;
383 	struct dpaa2_switch_acl_entry *acl_entry;
384 	struct flow_action_entry *act;
385 	int err;
386 
387 	if (!flow_offload_has_one_action(&rule->action)) {
388 		NL_SET_ERR_MSG(extack, "Only singular actions are supported");
389 		return -EOPNOTSUPP;
390 	}
391 
392 	if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
393 		NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
394 		return -ENOMEM;
395 	}
396 
397 	acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
398 	if (!acl_entry)
399 		return -ENOMEM;
400 
401 	err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
402 	if (err)
403 		goto free_acl_entry;
404 
405 	act = &rule->action.entries[0];
406 	err = dpaa2_switch_tc_parse_action(ethsw, act,
407 					   &acl_entry->cfg.result, extack);
408 	if (err)
409 		goto free_acl_entry;
410 
411 	acl_entry->prio = cls->common.prio;
412 	acl_entry->cookie = cls->cookie;
413 
414 	err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
415 	if (err)
416 		goto free_acl_entry;
417 
418 	return 0;
419 
420 free_acl_entry:
421 	kfree(acl_entry);
422 
423 	return err;
424 }
425 
426 int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
427 				    struct flow_cls_offload *cls)
428 {
429 	struct dpaa2_switch_acl_entry *entry;
430 
431 	entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
432 	if (!entry)
433 		return 0;
434 
435 	return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
436 }
437 
438 int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
439 				      struct tc_cls_matchall_offload *cls)
440 {
441 	struct netlink_ext_ack *extack = cls->common.extack;
442 	struct ethsw_core *ethsw = acl_tbl->ethsw;
443 	struct dpaa2_switch_acl_entry *acl_entry;
444 	struct flow_action_entry *act;
445 	int err;
446 
447 	if (!flow_offload_has_one_action(&cls->rule->action)) {
448 		NL_SET_ERR_MSG(extack, "Only singular actions are supported");
449 		return -EOPNOTSUPP;
450 	}
451 
452 	if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
453 		NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
454 		return -ENOMEM;
455 	}
456 
457 	acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
458 	if (!acl_entry)
459 		return -ENOMEM;
460 
461 	act = &cls->rule->action.entries[0];
462 	err = dpaa2_switch_tc_parse_action(ethsw, act,
463 					   &acl_entry->cfg.result, extack);
464 	if (err)
465 		goto free_acl_entry;
466 
467 	acl_entry->prio = cls->common.prio;
468 	acl_entry->cookie = cls->cookie;
469 
470 	err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
471 	if (err)
472 		goto free_acl_entry;
473 
474 	return 0;
475 
476 free_acl_entry:
477 	kfree(acl_entry);
478 
479 	return err;
480 }
481 
482 int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
483 				      struct tc_cls_matchall_offload *cls)
484 {
485 	struct dpaa2_switch_acl_entry *entry;
486 
487 	entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
488 	if (!entry)
489 		return 0;
490 
491 	return  dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
492 }
493