1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DPAA2 Ethernet Switch flower support
4  *
5  * Copyright 2021 NXP
6  *
7  */
8 
9 #include "dpaa2-switch.h"
10 
11 static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
12 					 struct dpsw_acl_key *acl_key)
13 {
14 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
15 	struct flow_dissector *dissector = rule->match.dissector;
16 	struct netlink_ext_ack *extack = cls->common.extack;
17 	struct dpsw_acl_fields *acl_h, *acl_m;
18 
19 	if (dissector->used_keys &
20 	    ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
21 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
22 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
23 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
24 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
25 	      BIT(FLOW_DISSECTOR_KEY_IP) |
26 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
27 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS))) {
28 		NL_SET_ERR_MSG_MOD(extack,
29 				   "Unsupported keys used");
30 		return -EOPNOTSUPP;
31 	}
32 
33 	acl_h = &acl_key->match;
34 	acl_m = &acl_key->mask;
35 
36 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
37 		struct flow_match_basic match;
38 
39 		flow_rule_match_basic(rule, &match);
40 		acl_h->l3_protocol = match.key->ip_proto;
41 		acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto);
42 		acl_m->l3_protocol = match.mask->ip_proto;
43 		acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto);
44 	}
45 
46 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
47 		struct flow_match_eth_addrs match;
48 
49 		flow_rule_match_eth_addrs(rule, &match);
50 		ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]);
51 		ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]);
52 		ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]);
53 		ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]);
54 	}
55 
56 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
57 		struct flow_match_vlan match;
58 
59 		flow_rule_match_vlan(rule, &match);
60 		acl_h->l2_vlan_id = match.key->vlan_id;
61 		acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid);
62 		acl_h->l2_pcp_dei = match.key->vlan_priority << 1 |
63 				    match.key->vlan_dei;
64 
65 		acl_m->l2_vlan_id = match.mask->vlan_id;
66 		acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid);
67 		acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 |
68 				    match.mask->vlan_dei;
69 	}
70 
71 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
72 		struct flow_match_ipv4_addrs match;
73 
74 		flow_rule_match_ipv4_addrs(rule, &match);
75 		acl_h->l3_source_ip = be32_to_cpu(match.key->src);
76 		acl_h->l3_dest_ip = be32_to_cpu(match.key->dst);
77 		acl_m->l3_source_ip = be32_to_cpu(match.mask->src);
78 		acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst);
79 	}
80 
81 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
82 		struct flow_match_ports match;
83 
84 		flow_rule_match_ports(rule, &match);
85 		acl_h->l4_source_port = be16_to_cpu(match.key->src);
86 		acl_h->l4_dest_port = be16_to_cpu(match.key->dst);
87 		acl_m->l4_source_port = be16_to_cpu(match.mask->src);
88 		acl_m->l4_dest_port = be16_to_cpu(match.mask->dst);
89 	}
90 
91 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
92 		struct flow_match_ip match;
93 
94 		flow_rule_match_ip(rule, &match);
95 		if (match.mask->ttl != 0) {
96 			NL_SET_ERR_MSG_MOD(extack,
97 					   "Matching on TTL not supported");
98 			return -EOPNOTSUPP;
99 		}
100 
101 		if ((match.mask->tos & 0x3) != 0) {
102 			NL_SET_ERR_MSG_MOD(extack,
103 					   "Matching on ECN not supported, only DSCP");
104 			return -EOPNOTSUPP;
105 		}
106 
107 		acl_h->l3_dscp = match.key->tos >> 2;
108 		acl_m->l3_dscp = match.mask->tos >> 2;
109 	}
110 
111 	return 0;
112 }
113 
114 int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
115 			       struct dpaa2_switch_acl_entry *entry)
116 {
117 	struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
118 	struct ethsw_core *ethsw = filter_block->ethsw;
119 	struct dpsw_acl_key *acl_key = &entry->key;
120 	struct device *dev = ethsw->dev;
121 	u8 *cmd_buff;
122 	int err;
123 
124 	cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
125 	if (!cmd_buff)
126 		return -ENOMEM;
127 
128 	dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
129 
130 	acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
131 						 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
132 						 DMA_TO_DEVICE);
133 	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
134 		dev_err(dev, "DMA mapping failed\n");
135 		return -EFAULT;
136 	}
137 
138 	err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
139 				 filter_block->acl_id, acl_entry_cfg);
140 
141 	dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
142 			 DMA_TO_DEVICE);
143 	if (err) {
144 		dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
145 		return err;
146 	}
147 
148 	kfree(cmd_buff);
149 
150 	return 0;
151 }
152 
153 static int
154 dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
155 			      struct dpaa2_switch_acl_entry *entry)
156 {
157 	struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
158 	struct dpsw_acl_key *acl_key = &entry->key;
159 	struct ethsw_core *ethsw = block->ethsw;
160 	struct device *dev = ethsw->dev;
161 	u8 *cmd_buff;
162 	int err;
163 
164 	cmd_buff = kzalloc(DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, GFP_KERNEL);
165 	if (!cmd_buff)
166 		return -ENOMEM;
167 
168 	dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff);
169 
170 	acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff,
171 						 DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE,
172 						 DMA_TO_DEVICE);
173 	if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
174 		dev_err(dev, "DMA mapping failed\n");
175 		return -EFAULT;
176 	}
177 
178 	err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
179 				    block->acl_id, acl_entry_cfg);
180 
181 	dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
182 			 DMA_TO_DEVICE);
183 	if (err) {
184 		dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
185 		return err;
186 	}
187 
188 	kfree(cmd_buff);
189 
190 	return 0;
191 }
192 
193 static int
194 dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
195 				   struct dpaa2_switch_acl_entry *entry)
196 {
197 	struct dpaa2_switch_acl_entry *tmp;
198 	struct list_head *pos, *n;
199 	int index = 0;
200 
201 	if (list_empty(&block->acl_entries)) {
202 		list_add(&entry->list, &block->acl_entries);
203 		return index;
204 	}
205 
206 	list_for_each_safe(pos, n, &block->acl_entries) {
207 		tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
208 		if (entry->prio < tmp->prio)
209 			break;
210 		index++;
211 	}
212 	list_add(&entry->list, pos->prev);
213 	return index;
214 }
215 
216 static struct dpaa2_switch_acl_entry*
217 dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
218 				    int index)
219 {
220 	struct dpaa2_switch_acl_entry *tmp;
221 	int i = 0;
222 
223 	list_for_each_entry(tmp, &block->acl_entries, list) {
224 		if (i == index)
225 			return tmp;
226 		++i;
227 	}
228 
229 	return NULL;
230 }
231 
232 static int
233 dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
234 				      struct dpaa2_switch_acl_entry *entry,
235 				      int precedence)
236 {
237 	int err;
238 
239 	err = dpaa2_switch_acl_entry_remove(block, entry);
240 	if (err)
241 		return err;
242 
243 	entry->cfg.precedence = precedence;
244 	return dpaa2_switch_acl_entry_add(block, entry);
245 }
246 
247 static int
248 dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
249 			       struct dpaa2_switch_acl_entry *entry)
250 {
251 	struct dpaa2_switch_acl_entry *tmp;
252 	int index, i, precedence, err;
253 
254 	/* Add the new ACL entry to the linked list and get its index */
255 	index = dpaa2_switch_acl_entry_add_to_list(block, entry);
256 
257 	/* Move up in priority the ACL entries to make space
258 	 * for the new filter.
259 	 */
260 	precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
261 	for (i = 0; i < index; i++) {
262 		tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
263 
264 		err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
265 							    precedence);
266 		if (err)
267 			return err;
268 
269 		precedence++;
270 	}
271 
272 	/* Add the new entry to hardware */
273 	entry->cfg.precedence = precedence;
274 	err = dpaa2_switch_acl_entry_add(block, entry);
275 	block->num_acl_rules++;
276 
277 	return err;
278 }
279 
280 static struct dpaa2_switch_acl_entry *
281 dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
282 					  unsigned long cookie)
283 {
284 	struct dpaa2_switch_acl_entry *tmp, *n;
285 
286 	list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
287 		if (tmp->cookie == cookie)
288 			return tmp;
289 	}
290 	return NULL;
291 }
292 
293 static int
294 dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
295 				 struct dpaa2_switch_acl_entry *entry)
296 {
297 	struct dpaa2_switch_acl_entry *tmp, *n;
298 	int index = 0;
299 
300 	list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
301 		if (tmp->cookie == entry->cookie)
302 			return index;
303 		index++;
304 	}
305 	return -ENOENT;
306 }
307 
308 static struct dpaa2_switch_mirror_entry *
309 dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
310 					 unsigned long cookie)
311 {
312 	struct dpaa2_switch_mirror_entry *tmp, *n;
313 
314 	list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
315 		if (tmp->cookie == cookie)
316 			return tmp;
317 	}
318 	return NULL;
319 }
320 
321 static int
322 dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
323 				  struct dpaa2_switch_acl_entry *entry)
324 {
325 	struct dpaa2_switch_acl_entry *tmp;
326 	int index, i, precedence, err;
327 
328 	index = dpaa2_switch_acl_entry_get_index(block, entry);
329 
330 	/* Remove from hardware the ACL entry */
331 	err = dpaa2_switch_acl_entry_remove(block, entry);
332 	if (err)
333 		return err;
334 
335 	block->num_acl_rules--;
336 
337 	/* Remove it from the list also */
338 	list_del(&entry->list);
339 
340 	/* Move down in priority the entries over the deleted one */
341 	precedence = entry->cfg.precedence;
342 	for (i = index - 1; i >= 0; i--) {
343 		tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
344 		err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
345 							    precedence);
346 		if (err)
347 			return err;
348 
349 		precedence--;
350 	}
351 
352 	kfree(entry);
353 
354 	return 0;
355 }
356 
357 static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
358 					    struct flow_action_entry *cls_act,
359 					    struct dpsw_acl_result *dpsw_act,
360 					    struct netlink_ext_ack *extack)
361 {
362 	int err = 0;
363 
364 	switch (cls_act->id) {
365 	case FLOW_ACTION_TRAP:
366 		dpsw_act->action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
367 		break;
368 	case FLOW_ACTION_REDIRECT:
369 		if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
370 			NL_SET_ERR_MSG_MOD(extack,
371 					   "Destination not a DPAA2 switch port");
372 			return -EOPNOTSUPP;
373 		}
374 
375 		dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
376 		dpsw_act->action = DPSW_ACL_ACTION_REDIRECT;
377 		break;
378 	case FLOW_ACTION_DROP:
379 		dpsw_act->action = DPSW_ACL_ACTION_DROP;
380 		break;
381 	default:
382 		NL_SET_ERR_MSG_MOD(extack,
383 				   "Action not supported");
384 		err = -EOPNOTSUPP;
385 		goto out;
386 	}
387 
388 out:
389 	return err;
390 }
391 
392 static int
393 dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
394 			      struct dpaa2_switch_mirror_entry *entry,
395 			      u16 to, struct netlink_ext_ack *extack)
396 {
397 	unsigned long block_ports = block->ports;
398 	struct ethsw_core *ethsw = block->ethsw;
399 	struct ethsw_port_priv *port_priv;
400 	unsigned long ports_added = 0;
401 	u16 vlan = entry->cfg.vlan_id;
402 	bool mirror_port_enabled;
403 	int err, port;
404 
405 	/* Setup the mirroring port */
406 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
407 	if (!mirror_port_enabled) {
408 		err = dpsw_set_reflection_if(ethsw->mc_io, 0,
409 					     ethsw->dpsw_handle, to);
410 		if (err)
411 			return err;
412 		ethsw->mirror_port = to;
413 	}
414 
415 	/* Setup the same egress mirroring configuration on all the switch
416 	 * ports that share the same filter block.
417 	 */
418 	for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
419 		port_priv = ethsw->ports[port];
420 
421 		/* We cannot add a per VLAN mirroring rule if the VLAN in
422 		 * question is not installed on the switch port.
423 		 */
424 		if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
425 		    !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
426 			NL_SET_ERR_MSG(extack,
427 				       "VLAN must be installed on the switch port");
428 			err = -EINVAL;
429 			goto err_remove_filters;
430 		}
431 
432 		err = dpsw_if_add_reflection(ethsw->mc_io, 0,
433 					     ethsw->dpsw_handle,
434 					     port, &entry->cfg);
435 		if (err)
436 			goto err_remove_filters;
437 
438 		ports_added |= BIT(port);
439 	}
440 
441 	list_add(&entry->list, &block->mirror_entries);
442 
443 	return 0;
444 
445 err_remove_filters:
446 	for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
447 		dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
448 					  port, &entry->cfg);
449 	}
450 
451 	if (!mirror_port_enabled)
452 		ethsw->mirror_port = ethsw->sw_attr.num_ifs;
453 
454 	return err;
455 }
456 
457 static int
458 dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
459 				 struct dpaa2_switch_mirror_entry *entry)
460 {
461 	struct dpsw_reflection_cfg *cfg = &entry->cfg;
462 	unsigned long block_ports = block->ports;
463 	struct ethsw_core *ethsw = block->ethsw;
464 	int port;
465 
466 	/* Remove this mirroring configuration from all the ports belonging to
467 	 * the filter block.
468 	 */
469 	for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
470 		dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
471 					  port, cfg);
472 
473 	/* Also remove it from the list of mirror filters */
474 	list_del(&entry->list);
475 	kfree(entry);
476 
477 	/* If this was the last mirror filter, then unset the mirror port */
478 	if (list_empty(&block->mirror_entries))
479 		ethsw->mirror_port =  ethsw->sw_attr.num_ifs;
480 
481 	return 0;
482 }
483 
484 static int
485 dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
486 				    struct flow_cls_offload *cls)
487 {
488 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
489 	struct netlink_ext_ack *extack = cls->common.extack;
490 	struct dpaa2_switch_acl_entry *acl_entry;
491 	struct ethsw_core *ethsw = block->ethsw;
492 	struct flow_action_entry *act;
493 	int err;
494 
495 	if (dpaa2_switch_acl_tbl_is_full(block)) {
496 		NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
497 		return -ENOMEM;
498 	}
499 
500 	acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
501 	if (!acl_entry)
502 		return -ENOMEM;
503 
504 	err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key);
505 	if (err)
506 		goto free_acl_entry;
507 
508 	act = &rule->action.entries[0];
509 	err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
510 					       &acl_entry->cfg.result, extack);
511 	if (err)
512 		goto free_acl_entry;
513 
514 	acl_entry->prio = cls->common.prio;
515 	acl_entry->cookie = cls->cookie;
516 
517 	err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
518 	if (err)
519 		goto free_acl_entry;
520 
521 	return 0;
522 
523 free_acl_entry:
524 	kfree(acl_entry);
525 
526 	return err;
527 }
528 
529 static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
530 						u16 *vlan)
531 {
532 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
533 	struct flow_dissector *dissector = rule->match.dissector;
534 	struct netlink_ext_ack *extack = cls->common.extack;
535 	int ret = -EOPNOTSUPP;
536 
537 	if (dissector->used_keys &
538 	    ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
539 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
540 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
541 		NL_SET_ERR_MSG_MOD(extack,
542 				   "Mirroring is supported only per VLAN");
543 		return -EOPNOTSUPP;
544 	}
545 
546 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
547 		struct flow_match_vlan match;
548 
549 		flow_rule_match_vlan(rule, &match);
550 
551 		if (match.mask->vlan_priority != 0 ||
552 		    match.mask->vlan_dei != 0) {
553 			NL_SET_ERR_MSG_MOD(extack,
554 					   "Only matching on VLAN ID supported");
555 			return -EOPNOTSUPP;
556 		}
557 
558 		if (match.mask->vlan_id != 0xFFF) {
559 			NL_SET_ERR_MSG_MOD(extack,
560 					   "Masked matching not supported");
561 			return -EOPNOTSUPP;
562 		}
563 
564 		*vlan = (u16)match.key->vlan_id;
565 		ret = 0;
566 	}
567 
568 	return ret;
569 }
570 
571 static int
572 dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
573 				       struct flow_cls_offload *cls)
574 {
575 	struct netlink_ext_ack *extack = cls->common.extack;
576 	struct dpaa2_switch_mirror_entry *mirror_entry;
577 	struct ethsw_core *ethsw = block->ethsw;
578 	struct dpaa2_switch_mirror_entry *tmp;
579 	struct flow_action_entry *cls_act;
580 	struct list_head *pos, *n;
581 	bool mirror_port_enabled;
582 	u16 if_id, vlan;
583 	int err;
584 
585 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
586 	cls_act = &cls->rule->action.entries[0];
587 
588 	/* Offload rules only when the destination is a DPAA2 switch port */
589 	if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
590 		NL_SET_ERR_MSG_MOD(extack,
591 				   "Destination not a DPAA2 switch port");
592 		return -EOPNOTSUPP;
593 	}
594 	if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
595 
596 	/* We have a single mirror port but can configure egress mirroring on
597 	 * all the other switch ports. We need to allow mirroring rules only
598 	 * when the destination port is the same.
599 	 */
600 	if (mirror_port_enabled && ethsw->mirror_port != if_id) {
601 		NL_SET_ERR_MSG_MOD(extack,
602 				   "Multiple mirror ports not supported");
603 		return -EBUSY;
604 	}
605 
606 	/* Parse the key */
607 	err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
608 	if (err)
609 		return err;
610 
611 	/* Make sure that we don't already have a mirror rule with the same
612 	 * configuration.
613 	 */
614 	list_for_each_safe(pos, n, &block->mirror_entries) {
615 		tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
616 
617 		if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
618 		    tmp->cfg.vlan_id == vlan) {
619 			NL_SET_ERR_MSG_MOD(extack,
620 					   "VLAN mirror filter already installed");
621 			return -EBUSY;
622 		}
623 	}
624 
625 	mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
626 	if (!mirror_entry)
627 		return -ENOMEM;
628 
629 	mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
630 	mirror_entry->cfg.vlan_id = vlan;
631 	mirror_entry->cookie = cls->cookie;
632 
633 	return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
634 					     extack);
635 }
636 
637 int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
638 				    struct flow_cls_offload *cls)
639 {
640 	struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
641 	struct netlink_ext_ack *extack = cls->common.extack;
642 	struct flow_action_entry *act;
643 
644 	if (!flow_offload_has_one_action(&rule->action)) {
645 		NL_SET_ERR_MSG(extack, "Only singular actions are supported");
646 		return -EOPNOTSUPP;
647 	}
648 
649 	act = &rule->action.entries[0];
650 	switch (act->id) {
651 	case FLOW_ACTION_REDIRECT:
652 	case FLOW_ACTION_TRAP:
653 	case FLOW_ACTION_DROP:
654 		return dpaa2_switch_cls_flower_replace_acl(block, cls);
655 	case FLOW_ACTION_MIRRED:
656 		return dpaa2_switch_cls_flower_replace_mirror(block, cls);
657 	default:
658 		NL_SET_ERR_MSG_MOD(extack, "Action not supported");
659 		return -EOPNOTSUPP;
660 	}
661 }
662 
663 int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
664 				    struct flow_cls_offload *cls)
665 {
666 	struct dpaa2_switch_mirror_entry *mirror_entry;
667 	struct dpaa2_switch_acl_entry *acl_entry;
668 
669 	/* If this filter is a an ACL one, remove it */
670 	acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
671 							      cls->cookie);
672 	if (acl_entry)
673 		return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
674 
675 	/* If not, then it has to be a mirror */
676 	mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
677 								cls->cookie);
678 	if (mirror_entry)
679 		return dpaa2_switch_block_remove_mirror(block,
680 							mirror_entry);
681 
682 	return 0;
683 }
684 
685 static int
686 dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
687 				      struct tc_cls_matchall_offload *cls)
688 {
689 	struct netlink_ext_ack *extack = cls->common.extack;
690 	struct ethsw_core *ethsw = block->ethsw;
691 	struct dpaa2_switch_acl_entry *acl_entry;
692 	struct flow_action_entry *act;
693 	int err;
694 
695 	if (dpaa2_switch_acl_tbl_is_full(block)) {
696 		NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
697 		return -ENOMEM;
698 	}
699 
700 	acl_entry = kzalloc(sizeof(*acl_entry), GFP_KERNEL);
701 	if (!acl_entry)
702 		return -ENOMEM;
703 
704 	act = &cls->rule->action.entries[0];
705 	err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
706 					       &acl_entry->cfg.result, extack);
707 	if (err)
708 		goto free_acl_entry;
709 
710 	acl_entry->prio = cls->common.prio;
711 	acl_entry->cookie = cls->cookie;
712 
713 	err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
714 	if (err)
715 		goto free_acl_entry;
716 
717 	return 0;
718 
719 free_acl_entry:
720 	kfree(acl_entry);
721 
722 	return err;
723 }
724 
725 static int
726 dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
727 					 struct tc_cls_matchall_offload *cls)
728 {
729 	struct netlink_ext_ack *extack = cls->common.extack;
730 	struct dpaa2_switch_mirror_entry *mirror_entry;
731 	struct ethsw_core *ethsw = block->ethsw;
732 	struct dpaa2_switch_mirror_entry *tmp;
733 	struct flow_action_entry *cls_act;
734 	struct list_head *pos, *n;
735 	bool mirror_port_enabled;
736 	u16 if_id;
737 
738 	mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
739 	cls_act = &cls->rule->action.entries[0];
740 
741 	/* Offload rules only when the destination is a DPAA2 switch port */
742 	if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
743 		NL_SET_ERR_MSG_MOD(extack,
744 				   "Destination not a DPAA2 switch port");
745 		return -EOPNOTSUPP;
746 	}
747 	if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
748 
749 	/* We have a single mirror port but can configure egress mirroring on
750 	 * all the other switch ports. We need to allow mirroring rules only
751 	 * when the destination port is the same.
752 	 */
753 	if (mirror_port_enabled && ethsw->mirror_port != if_id) {
754 		NL_SET_ERR_MSG_MOD(extack,
755 				   "Multiple mirror ports not supported");
756 		return -EBUSY;
757 	}
758 
759 	/* Make sure that we don't already have a mirror rule with the same
760 	 * configuration. One matchall rule per block is the maximum.
761 	 */
762 	list_for_each_safe(pos, n, &block->mirror_entries) {
763 		tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
764 
765 		if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
766 			NL_SET_ERR_MSG_MOD(extack,
767 					   "Matchall mirror filter already installed");
768 			return -EBUSY;
769 		}
770 	}
771 
772 	mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
773 	if (!mirror_entry)
774 		return -ENOMEM;
775 
776 	mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
777 	mirror_entry->cookie = cls->cookie;
778 
779 	return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
780 					     extack);
781 }
782 
783 int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
784 				      struct tc_cls_matchall_offload *cls)
785 {
786 	struct netlink_ext_ack *extack = cls->common.extack;
787 	struct flow_action_entry *act;
788 
789 	if (!flow_offload_has_one_action(&cls->rule->action)) {
790 		NL_SET_ERR_MSG(extack, "Only singular actions are supported");
791 		return -EOPNOTSUPP;
792 	}
793 
794 	act = &cls->rule->action.entries[0];
795 	switch (act->id) {
796 	case FLOW_ACTION_REDIRECT:
797 	case FLOW_ACTION_TRAP:
798 	case FLOW_ACTION_DROP:
799 		return dpaa2_switch_cls_matchall_replace_acl(block, cls);
800 	case FLOW_ACTION_MIRRED:
801 		return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
802 	default:
803 		NL_SET_ERR_MSG_MOD(extack, "Action not supported");
804 		return -EOPNOTSUPP;
805 	}
806 }
807 
808 int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
809 				      struct ethsw_port_priv *port_priv)
810 {
811 	struct ethsw_core *ethsw = port_priv->ethsw_data;
812 	struct dpaa2_switch_mirror_entry *tmp;
813 	int err;
814 
815 	list_for_each_entry(tmp, &block->mirror_entries, list) {
816 		err = dpsw_if_add_reflection(ethsw->mc_io, 0,
817 					     ethsw->dpsw_handle,
818 					     port_priv->idx, &tmp->cfg);
819 		if (err)
820 			goto unwind_add;
821 	}
822 
823 	return 0;
824 
825 unwind_add:
826 	list_for_each_entry(tmp, &block->mirror_entries, list)
827 		dpsw_if_remove_reflection(ethsw->mc_io, 0,
828 					  ethsw->dpsw_handle,
829 					  port_priv->idx, &tmp->cfg);
830 
831 	return err;
832 }
833 
834 int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
835 					struct ethsw_port_priv *port_priv)
836 {
837 	struct ethsw_core *ethsw = port_priv->ethsw_data;
838 	struct dpaa2_switch_mirror_entry *tmp;
839 	int err;
840 
841 	list_for_each_entry(tmp, &block->mirror_entries, list) {
842 		err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
843 						ethsw->dpsw_handle,
844 						port_priv->idx, &tmp->cfg);
845 		if (err)
846 			goto unwind_remove;
847 	}
848 
849 	return 0;
850 
851 unwind_remove:
852 	list_for_each_entry(tmp, &block->mirror_entries, list)
853 		dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
854 				       port_priv->idx, &tmp->cfg);
855 
856 	return err;
857 }
858 
859 int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
860 				      struct tc_cls_matchall_offload *cls)
861 {
862 	struct dpaa2_switch_mirror_entry *mirror_entry;
863 	struct dpaa2_switch_acl_entry *acl_entry;
864 
865 	/* If this filter is a an ACL one, remove it */
866 	acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
867 							      cls->cookie);
868 	if (acl_entry)
869 		return dpaa2_switch_acl_tbl_remove_entry(block,
870 							 acl_entry);
871 
872 	/* If not, then it has to be a mirror */
873 	mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
874 								cls->cookie);
875 	if (mirror_entry)
876 		return dpaa2_switch_block_remove_mirror(block,
877 							mirror_entry);
878 
879 	return 0;
880 }
881