1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/errno.h>
8 #include <linux/rhashtable.h>
9 #include <linux/list.h>
10 #include <linux/idr.h>
11 #include <linux/refcount.h>
12 #include <net/flow_offload.h>
13 
14 #include "item.h"
15 #include "trap.h"
16 #include "core_acl_flex_actions.h"
17 
18 enum mlxsw_afa_set_type {
19 	MLXSW_AFA_SET_TYPE_NEXT,
20 	MLXSW_AFA_SET_TYPE_GOTO,
21 };
22 
23 /* afa_set_type
24  * Type of the record at the end of the action set.
25  */
26 MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4);
27 
28 /* afa_set_next_action_set_ptr
29  * A pointer to the next action set in the KVD Centralized database.
30  */
31 MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24);
32 
33 /* afa_set_goto_g
34  * group - When set, the binding is of an ACL group. When cleared,
35  * the binding is of an ACL.
36  * Must be set to 1 for Spectrum.
37  */
38 MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1);
39 
40 enum mlxsw_afa_set_goto_binding_cmd {
41 	/* continue go the next binding point */
42 	MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE,
43 	/* jump to the next binding point no return */
44 	MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP,
45 	/* terminate the acl binding */
46 	MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4,
47 };
48 
49 /* afa_set_goto_binding_cmd */
50 MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3);
51 
52 /* afa_set_goto_next_binding
53  * ACL/ACL group identifier. If the g bit is set, this field should hold
54  * the acl_group_id, else it should hold the acl_id.
55  */
56 MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16);
57 
58 /* afa_all_action_type
59  * Action Type.
60  */
61 MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6);
62 
63 struct mlxsw_afa {
64 	unsigned int max_acts_per_set;
65 	const struct mlxsw_afa_ops *ops;
66 	void *ops_priv;
67 	struct rhashtable set_ht;
68 	struct rhashtable fwd_entry_ht;
69 	struct rhashtable cookie_ht;
70 	struct idr cookie_idr;
71 };
72 
73 #define MLXSW_AFA_SET_LEN 0xA8
74 
75 struct mlxsw_afa_set_ht_key {
76 	char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */
77 	bool is_first;
78 };
79 
80 /* Set structure holds one action set record. It contains up to three
81  * actions (depends on size of particular actions). The set is either
82  * put directly to a rule, or it is stored in KVD linear area.
83  * To prevent duplicate entries in KVD linear area, a hashtable is
84  * used to track sets that were previously inserted and may be shared.
85  */
86 
87 struct mlxsw_afa_set {
88 	struct rhash_head ht_node;
89 	struct mlxsw_afa_set_ht_key ht_key;
90 	u32 kvdl_index;
91 	bool shared; /* Inserted in hashtable (doesn't mean that
92 		      * kvdl_index is valid).
93 		      */
94 	unsigned int ref_count;
95 	struct mlxsw_afa_set *next; /* Pointer to the next set. */
96 	struct mlxsw_afa_set *prev; /* Pointer to the previous set,
97 				     * note that set may have multiple
98 				     * sets from multiple blocks
99 				     * pointing at it. This is only
100 				     * usable until commit.
101 				     */
102 };
103 
104 static const struct rhashtable_params mlxsw_afa_set_ht_params = {
105 	.key_len = sizeof(struct mlxsw_afa_set_ht_key),
106 	.key_offset = offsetof(struct mlxsw_afa_set, ht_key),
107 	.head_offset = offsetof(struct mlxsw_afa_set, ht_node),
108 	.automatic_shrinking = true,
109 };
110 
111 struct mlxsw_afa_fwd_entry_ht_key {
112 	u8 local_port;
113 };
114 
115 struct mlxsw_afa_fwd_entry {
116 	struct rhash_head ht_node;
117 	struct mlxsw_afa_fwd_entry_ht_key ht_key;
118 	u32 kvdl_index;
119 	unsigned int ref_count;
120 };
121 
122 static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
123 	.key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key),
124 	.key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key),
125 	.head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node),
126 	.automatic_shrinking = true,
127 };
128 
129 struct mlxsw_afa_cookie {
130 	struct rhash_head ht_node;
131 	refcount_t ref_count;
132 	struct rcu_head rcu;
133 	u32 cookie_index;
134 	struct flow_action_cookie fa_cookie;
135 };
136 
137 static u32 mlxsw_afa_cookie_hash(const struct flow_action_cookie *fa_cookie,
138 				 u32 seed)
139 {
140 	return jhash2((u32 *) fa_cookie->cookie,
141 		      fa_cookie->cookie_len / sizeof(u32), seed);
142 }
143 
144 static u32 mlxsw_afa_cookie_key_hashfn(const void *data, u32 len, u32 seed)
145 {
146 	const struct flow_action_cookie *fa_cookie = data;
147 
148 	return mlxsw_afa_cookie_hash(fa_cookie, seed);
149 }
150 
151 static u32 mlxsw_afa_cookie_obj_hashfn(const void *data, u32 len, u32 seed)
152 {
153 	const struct mlxsw_afa_cookie *cookie = data;
154 
155 	return mlxsw_afa_cookie_hash(&cookie->fa_cookie, seed);
156 }
157 
158 static int mlxsw_afa_cookie_obj_cmpfn(struct rhashtable_compare_arg *arg,
159 				      const void *obj)
160 {
161 	const struct flow_action_cookie *fa_cookie = arg->key;
162 	const struct mlxsw_afa_cookie *cookie = obj;
163 
164 	if (cookie->fa_cookie.cookie_len == fa_cookie->cookie_len)
165 		return memcmp(cookie->fa_cookie.cookie, fa_cookie->cookie,
166 			      fa_cookie->cookie_len);
167 	return 1;
168 }
169 
170 static const struct rhashtable_params mlxsw_afa_cookie_ht_params = {
171 	.head_offset = offsetof(struct mlxsw_afa_cookie, ht_node),
172 	.hashfn	= mlxsw_afa_cookie_key_hashfn,
173 	.obj_hashfn = mlxsw_afa_cookie_obj_hashfn,
174 	.obj_cmpfn = mlxsw_afa_cookie_obj_cmpfn,
175 	.automatic_shrinking = true,
176 };
177 
178 struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
179 				   const struct mlxsw_afa_ops *ops,
180 				   void *ops_priv)
181 {
182 	struct mlxsw_afa *mlxsw_afa;
183 	int err;
184 
185 	mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL);
186 	if (!mlxsw_afa)
187 		return ERR_PTR(-ENOMEM);
188 	err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params);
189 	if (err)
190 		goto err_set_rhashtable_init;
191 	err = rhashtable_init(&mlxsw_afa->fwd_entry_ht,
192 			      &mlxsw_afa_fwd_entry_ht_params);
193 	if (err)
194 		goto err_fwd_entry_rhashtable_init;
195 	err = rhashtable_init(&mlxsw_afa->cookie_ht,
196 			      &mlxsw_afa_cookie_ht_params);
197 	if (err)
198 		goto err_cookie_rhashtable_init;
199 	idr_init(&mlxsw_afa->cookie_idr);
200 	mlxsw_afa->max_acts_per_set = max_acts_per_set;
201 	mlxsw_afa->ops = ops;
202 	mlxsw_afa->ops_priv = ops_priv;
203 	return mlxsw_afa;
204 
205 err_cookie_rhashtable_init:
206 	rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
207 err_fwd_entry_rhashtable_init:
208 	rhashtable_destroy(&mlxsw_afa->set_ht);
209 err_set_rhashtable_init:
210 	kfree(mlxsw_afa);
211 	return ERR_PTR(err);
212 }
213 EXPORT_SYMBOL(mlxsw_afa_create);
214 
215 void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
216 {
217 	WARN_ON(!idr_is_empty(&mlxsw_afa->cookie_idr));
218 	idr_destroy(&mlxsw_afa->cookie_idr);
219 	rhashtable_destroy(&mlxsw_afa->cookie_ht);
220 	rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
221 	rhashtable_destroy(&mlxsw_afa->set_ht);
222 	kfree(mlxsw_afa);
223 }
224 EXPORT_SYMBOL(mlxsw_afa_destroy);
225 
226 static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set,
227 				   enum mlxsw_afa_set_goto_binding_cmd cmd,
228 				   u16 group_id)
229 {
230 	char *actions = set->ht_key.enc_actions;
231 
232 	mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO);
233 	mlxsw_afa_set_goto_g_set(actions, true);
234 	mlxsw_afa_set_goto_binding_cmd_set(actions, cmd);
235 	mlxsw_afa_set_goto_next_binding_set(actions, group_id);
236 }
237 
238 static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set,
239 				   u32 next_set_kvdl_index)
240 {
241 	char *actions = set->ht_key.enc_actions;
242 
243 	mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT);
244 	mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index);
245 }
246 
247 static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
248 {
249 	struct mlxsw_afa_set *set;
250 
251 	set = kzalloc(sizeof(*set), GFP_KERNEL);
252 	if (!set)
253 		return NULL;
254 	/* Need to initialize the set to pass by default */
255 	mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
256 	set->ht_key.is_first = is_first;
257 	set->ref_count = 1;
258 	return set;
259 }
260 
261 static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set)
262 {
263 	kfree(set);
264 }
265 
266 static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa,
267 			       struct mlxsw_afa_set *set)
268 {
269 	int err;
270 
271 	err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node,
272 				     mlxsw_afa_set_ht_params);
273 	if (err)
274 		return err;
275 	err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv,
276 					   &set->kvdl_index,
277 					   set->ht_key.enc_actions,
278 					   set->ht_key.is_first);
279 	if (err)
280 		goto err_kvdl_set_add;
281 	set->shared = true;
282 	set->prev = NULL;
283 	return 0;
284 
285 err_kvdl_set_add:
286 	rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
287 			       mlxsw_afa_set_ht_params);
288 	return err;
289 }
290 
291 static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
292 				  struct mlxsw_afa_set *set)
293 {
294 	mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv,
295 				     set->kvdl_index,
296 				     set->ht_key.is_first);
297 	rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
298 			       mlxsw_afa_set_ht_params);
299 	set->shared = false;
300 }
301 
302 static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
303 			      struct mlxsw_afa_set *set)
304 {
305 	if (--set->ref_count)
306 		return;
307 	if (set->shared)
308 		mlxsw_afa_set_unshare(mlxsw_afa, set);
309 	mlxsw_afa_set_destroy(set);
310 }
311 
312 static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
313 					       struct mlxsw_afa_set *orig_set)
314 {
315 	struct mlxsw_afa_set *set;
316 	int err;
317 
318 	/* There is a hashtable of sets maintained. If a set with the exact
319 	 * same encoding exists, we reuse it. Otherwise, the current set
320 	 * is shared by making it available to others using the hash table.
321 	 */
322 	set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
323 				     mlxsw_afa_set_ht_params);
324 	if (set) {
325 		set->ref_count++;
326 		mlxsw_afa_set_put(mlxsw_afa, orig_set);
327 	} else {
328 		set = orig_set;
329 		err = mlxsw_afa_set_share(mlxsw_afa, set);
330 		if (err)
331 			return ERR_PTR(err);
332 	}
333 	return set;
334 }
335 
336 /* Block structure holds a list of action sets. One action block
337  * represents one chain of actions executed upon match of a rule.
338  */
339 
340 struct mlxsw_afa_block {
341 	struct mlxsw_afa *afa;
342 	bool finished;
343 	struct mlxsw_afa_set *first_set;
344 	struct mlxsw_afa_set *cur_set;
345 	unsigned int cur_act_index; /* In current set. */
346 	struct list_head resource_list; /* List of resources held by actions
347 					 * in this block.
348 					 */
349 };
350 
351 struct mlxsw_afa_resource {
352 	struct list_head list;
353 	void (*destructor)(struct mlxsw_afa_block *block,
354 			   struct mlxsw_afa_resource *resource);
355 };
356 
357 static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
358 				   struct mlxsw_afa_resource *resource)
359 {
360 	list_add(&resource->list, &block->resource_list);
361 }
362 
363 static void mlxsw_afa_resource_del(struct mlxsw_afa_resource *resource)
364 {
365 	list_del(&resource->list);
366 }
367 
368 static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
369 {
370 	struct mlxsw_afa_resource *resource, *tmp;
371 
372 	list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
373 		resource->destructor(block, resource);
374 	}
375 }
376 
377 struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
378 {
379 	struct mlxsw_afa_block *block;
380 
381 	block = kzalloc(sizeof(*block), GFP_KERNEL);
382 	if (!block)
383 		return ERR_PTR(-ENOMEM);
384 	INIT_LIST_HEAD(&block->resource_list);
385 	block->afa = mlxsw_afa;
386 
387 	/* At least one action set is always present, so just create it here */
388 	block->first_set = mlxsw_afa_set_create(true);
389 	if (!block->first_set)
390 		goto err_first_set_create;
391 
392 	/* In case user instructs to have dummy first set, we leave it
393 	 * empty here and create another, real, set right away.
394 	 */
395 	if (mlxsw_afa->ops->dummy_first_set) {
396 		block->cur_set = mlxsw_afa_set_create(false);
397 		if (!block->cur_set)
398 			goto err_second_set_create;
399 		block->cur_set->prev = block->first_set;
400 		block->first_set->next = block->cur_set;
401 	} else {
402 		block->cur_set = block->first_set;
403 	}
404 
405 	return block;
406 
407 err_second_set_create:
408 	mlxsw_afa_set_destroy(block->first_set);
409 err_first_set_create:
410 	kfree(block);
411 	return ERR_PTR(-ENOMEM);
412 }
413 EXPORT_SYMBOL(mlxsw_afa_block_create);
414 
415 void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
416 {
417 	struct mlxsw_afa_set *set = block->first_set;
418 	struct mlxsw_afa_set *next_set;
419 
420 	do {
421 		next_set = set->next;
422 		mlxsw_afa_set_put(block->afa, set);
423 		set = next_set;
424 	} while (set);
425 	mlxsw_afa_resources_destroy(block);
426 	kfree(block);
427 }
428 EXPORT_SYMBOL(mlxsw_afa_block_destroy);
429 
430 int mlxsw_afa_block_commit(struct mlxsw_afa_block *block)
431 {
432 	struct mlxsw_afa_set *set = block->cur_set;
433 	struct mlxsw_afa_set *prev_set;
434 
435 	block->cur_set = NULL;
436 	block->finished = true;
437 
438 	/* Go over all linked sets starting from last
439 	 * and try to find existing set in the hash table.
440 	 * In case it is not there, assign a KVD linear index
441 	 * and insert it.
442 	 */
443 	do {
444 		prev_set = set->prev;
445 		set = mlxsw_afa_set_get(block->afa, set);
446 		if (IS_ERR(set))
447 			/* No rollback is needed since the chain is
448 			 * in consistent state and mlxsw_afa_block_destroy
449 			 * will take care of putting it away.
450 			 */
451 			return PTR_ERR(set);
452 		if (prev_set) {
453 			prev_set->next = set;
454 			mlxsw_afa_set_next_set(prev_set, set->kvdl_index);
455 			set = prev_set;
456 		}
457 	} while (prev_set);
458 
459 	block->first_set = set;
460 	return 0;
461 }
462 EXPORT_SYMBOL(mlxsw_afa_block_commit);
463 
464 char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
465 {
466 	return block->first_set->ht_key.enc_actions;
467 }
468 EXPORT_SYMBOL(mlxsw_afa_block_first_set);
469 
470 char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block)
471 {
472 	return block->cur_set->ht_key.enc_actions;
473 }
474 EXPORT_SYMBOL(mlxsw_afa_block_cur_set);
475 
476 u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block)
477 {
478 	/* First set is never in KVD linear. So the first set
479 	 * with valid KVD linear index is always the second one.
480 	 */
481 	if (WARN_ON(!block->first_set->next))
482 		return 0;
483 	return block->first_set->next->kvdl_index;
484 }
485 EXPORT_SYMBOL(mlxsw_afa_block_first_kvdl_index);
486 
487 int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity)
488 {
489 	u32 kvdl_index = mlxsw_afa_block_first_kvdl_index(block);
490 
491 	return block->afa->ops->kvdl_set_activity_get(block->afa->ops_priv,
492 						      kvdl_index, activity);
493 }
494 EXPORT_SYMBOL(mlxsw_afa_block_activity_get);
495 
496 int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
497 {
498 	if (block->finished)
499 		return -EINVAL;
500 	mlxsw_afa_set_goto_set(block->cur_set,
501 			       MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0);
502 	block->finished = true;
503 	return 0;
504 }
505 EXPORT_SYMBOL(mlxsw_afa_block_continue);
506 
507 int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
508 {
509 	if (block->finished)
510 		return -EINVAL;
511 	mlxsw_afa_set_goto_set(block->cur_set,
512 			       MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id);
513 	block->finished = true;
514 	return 0;
515 }
516 EXPORT_SYMBOL(mlxsw_afa_block_jump);
517 
518 int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
519 {
520 	if (block->finished)
521 		return -EINVAL;
522 	mlxsw_afa_set_goto_set(block->cur_set,
523 			       MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
524 	block->finished = true;
525 	return 0;
526 }
527 EXPORT_SYMBOL(mlxsw_afa_block_terminate);
528 
529 static struct mlxsw_afa_fwd_entry *
530 mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
531 {
532 	struct mlxsw_afa_fwd_entry *fwd_entry;
533 	int err;
534 
535 	fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL);
536 	if (!fwd_entry)
537 		return ERR_PTR(-ENOMEM);
538 	fwd_entry->ht_key.local_port = local_port;
539 	fwd_entry->ref_count = 1;
540 
541 	err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
542 				     &fwd_entry->ht_node,
543 				     mlxsw_afa_fwd_entry_ht_params);
544 	if (err)
545 		goto err_rhashtable_insert;
546 
547 	err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv,
548 						 &fwd_entry->kvdl_index,
549 						 local_port);
550 	if (err)
551 		goto err_kvdl_fwd_entry_add;
552 	return fwd_entry;
553 
554 err_kvdl_fwd_entry_add:
555 	rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
556 			       mlxsw_afa_fwd_entry_ht_params);
557 err_rhashtable_insert:
558 	kfree(fwd_entry);
559 	return ERR_PTR(err);
560 }
561 
562 static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
563 					struct mlxsw_afa_fwd_entry *fwd_entry)
564 {
565 	mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv,
566 					   fwd_entry->kvdl_index);
567 	rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
568 			       mlxsw_afa_fwd_entry_ht_params);
569 	kfree(fwd_entry);
570 }
571 
572 static struct mlxsw_afa_fwd_entry *
573 mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port)
574 {
575 	struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
576 	struct mlxsw_afa_fwd_entry *fwd_entry;
577 
578 	ht_key.local_port = local_port;
579 	fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
580 					   mlxsw_afa_fwd_entry_ht_params);
581 	if (fwd_entry) {
582 		fwd_entry->ref_count++;
583 		return fwd_entry;
584 	}
585 	return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
586 }
587 
588 static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
589 				    struct mlxsw_afa_fwd_entry *fwd_entry)
590 {
591 	if (--fwd_entry->ref_count)
592 		return;
593 	mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
594 }
595 
596 struct mlxsw_afa_fwd_entry_ref {
597 	struct mlxsw_afa_resource resource;
598 	struct mlxsw_afa_fwd_entry *fwd_entry;
599 };
600 
601 static void
602 mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
603 				struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
604 {
605 	mlxsw_afa_resource_del(&fwd_entry_ref->resource);
606 	mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
607 	kfree(fwd_entry_ref);
608 }
609 
610 static void
611 mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
612 				   struct mlxsw_afa_resource *resource)
613 {
614 	struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
615 
616 	fwd_entry_ref = container_of(resource, struct mlxsw_afa_fwd_entry_ref,
617 				     resource);
618 	mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
619 }
620 
621 static struct mlxsw_afa_fwd_entry_ref *
622 mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
623 {
624 	struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
625 	struct mlxsw_afa_fwd_entry *fwd_entry;
626 	int err;
627 
628 	fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL);
629 	if (!fwd_entry_ref)
630 		return ERR_PTR(-ENOMEM);
631 	fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port);
632 	if (IS_ERR(fwd_entry)) {
633 		err = PTR_ERR(fwd_entry);
634 		goto err_fwd_entry_get;
635 	}
636 	fwd_entry_ref->fwd_entry = fwd_entry;
637 	fwd_entry_ref->resource.destructor = mlxsw_afa_fwd_entry_ref_destructor;
638 	mlxsw_afa_resource_add(block, &fwd_entry_ref->resource);
639 	return fwd_entry_ref;
640 
641 err_fwd_entry_get:
642 	kfree(fwd_entry_ref);
643 	return ERR_PTR(err);
644 }
645 
646 struct mlxsw_afa_counter {
647 	struct mlxsw_afa_resource resource;
648 	u32 counter_index;
649 };
650 
651 static void
652 mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
653 			  struct mlxsw_afa_counter *counter)
654 {
655 	mlxsw_afa_resource_del(&counter->resource);
656 	block->afa->ops->counter_index_put(block->afa->ops_priv,
657 					   counter->counter_index);
658 	kfree(counter);
659 }
660 
661 static void
662 mlxsw_afa_counter_destructor(struct mlxsw_afa_block *block,
663 			     struct mlxsw_afa_resource *resource)
664 {
665 	struct mlxsw_afa_counter *counter;
666 
667 	counter = container_of(resource, struct mlxsw_afa_counter, resource);
668 	mlxsw_afa_counter_destroy(block, counter);
669 }
670 
671 static struct mlxsw_afa_counter *
672 mlxsw_afa_counter_create(struct mlxsw_afa_block *block)
673 {
674 	struct mlxsw_afa_counter *counter;
675 	int err;
676 
677 	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
678 	if (!counter)
679 		return ERR_PTR(-ENOMEM);
680 
681 	err = block->afa->ops->counter_index_get(block->afa->ops_priv,
682 						 &counter->counter_index);
683 	if (err)
684 		goto err_counter_index_get;
685 	counter->resource.destructor = mlxsw_afa_counter_destructor;
686 	mlxsw_afa_resource_add(block, &counter->resource);
687 	return counter;
688 
689 err_counter_index_get:
690 	kfree(counter);
691 	return ERR_PTR(err);
692 }
693 
694 /* 20 bits is a maximum that hardware can handle in trap with userdef action
695  * and carry along with the trapped packet.
696  */
697 #define MLXSW_AFA_COOKIE_INDEX_BITS 20
698 #define MLXSW_AFA_COOKIE_INDEX_MAX ((1 << MLXSW_AFA_COOKIE_INDEX_BITS) - 1)
699 
700 static struct mlxsw_afa_cookie *
701 mlxsw_afa_cookie_create(struct mlxsw_afa *mlxsw_afa,
702 			const struct flow_action_cookie *fa_cookie)
703 {
704 	struct mlxsw_afa_cookie *cookie;
705 	u32 cookie_index;
706 	int err;
707 
708 	cookie = kzalloc(sizeof(*cookie) + fa_cookie->cookie_len, GFP_KERNEL);
709 	if (!cookie)
710 		return ERR_PTR(-ENOMEM);
711 	refcount_set(&cookie->ref_count, 1);
712 	memcpy(&cookie->fa_cookie, fa_cookie,
713 	       sizeof(*fa_cookie) + fa_cookie->cookie_len);
714 
715 	err = rhashtable_insert_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
716 				     mlxsw_afa_cookie_ht_params);
717 	if (err)
718 		goto err_rhashtable_insert;
719 
720 	/* Start cookie indexes with 1. Leave the 0 index unused. Packets
721 	 * that come from the HW which are not dropped by drop-with-cookie
722 	 * action are going to pass cookie_index 0 to lookup.
723 	 */
724 	cookie_index = 1;
725 	err = idr_alloc_u32(&mlxsw_afa->cookie_idr, cookie, &cookie_index,
726 			    MLXSW_AFA_COOKIE_INDEX_MAX, GFP_KERNEL);
727 	if (err)
728 		goto err_idr_alloc;
729 	cookie->cookie_index = cookie_index;
730 	return cookie;
731 
732 err_idr_alloc:
733 	rhashtable_remove_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
734 			       mlxsw_afa_cookie_ht_params);
735 err_rhashtable_insert:
736 	kfree(cookie);
737 	return ERR_PTR(err);
738 }
739 
740 static void mlxsw_afa_cookie_destroy(struct mlxsw_afa *mlxsw_afa,
741 				     struct mlxsw_afa_cookie *cookie)
742 {
743 	idr_remove(&mlxsw_afa->cookie_idr, cookie->cookie_index);
744 	rhashtable_remove_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
745 			       mlxsw_afa_cookie_ht_params);
746 	kfree_rcu(cookie, rcu);
747 }
748 
749 static struct mlxsw_afa_cookie *
750 mlxsw_afa_cookie_get(struct mlxsw_afa *mlxsw_afa,
751 		     const struct flow_action_cookie *fa_cookie)
752 {
753 	struct mlxsw_afa_cookie *cookie;
754 
755 	cookie = rhashtable_lookup_fast(&mlxsw_afa->cookie_ht, fa_cookie,
756 					mlxsw_afa_cookie_ht_params);
757 	if (cookie) {
758 		refcount_inc(&cookie->ref_count);
759 		return cookie;
760 	}
761 	return mlxsw_afa_cookie_create(mlxsw_afa, fa_cookie);
762 }
763 
764 static void mlxsw_afa_cookie_put(struct mlxsw_afa *mlxsw_afa,
765 				 struct mlxsw_afa_cookie *cookie)
766 {
767 	if (!refcount_dec_and_test(&cookie->ref_count))
768 		return;
769 	mlxsw_afa_cookie_destroy(mlxsw_afa, cookie);
770 }
771 
772 /* RCU read lock must be held */
773 const struct flow_action_cookie *
774 mlxsw_afa_cookie_lookup(struct mlxsw_afa *mlxsw_afa, u32 cookie_index)
775 {
776 	struct mlxsw_afa_cookie *cookie;
777 
778 	/* 0 index means no cookie */
779 	if (!cookie_index)
780 		return NULL;
781 	cookie = idr_find(&mlxsw_afa->cookie_idr, cookie_index);
782 	if (!cookie)
783 		return NULL;
784 	return &cookie->fa_cookie;
785 }
786 EXPORT_SYMBOL(mlxsw_afa_cookie_lookup);
787 
788 struct mlxsw_afa_cookie_ref {
789 	struct mlxsw_afa_resource resource;
790 	struct mlxsw_afa_cookie *cookie;
791 };
792 
793 static void
794 mlxsw_afa_cookie_ref_destroy(struct mlxsw_afa_block *block,
795 			     struct mlxsw_afa_cookie_ref *cookie_ref)
796 {
797 	mlxsw_afa_resource_del(&cookie_ref->resource);
798 	mlxsw_afa_cookie_put(block->afa, cookie_ref->cookie);
799 	kfree(cookie_ref);
800 }
801 
802 static void
803 mlxsw_afa_cookie_ref_destructor(struct mlxsw_afa_block *block,
804 				struct mlxsw_afa_resource *resource)
805 {
806 	struct mlxsw_afa_cookie_ref *cookie_ref;
807 
808 	cookie_ref = container_of(resource, struct mlxsw_afa_cookie_ref,
809 				  resource);
810 	mlxsw_afa_cookie_ref_destroy(block, cookie_ref);
811 }
812 
813 static struct mlxsw_afa_cookie_ref *
814 mlxsw_afa_cookie_ref_create(struct mlxsw_afa_block *block,
815 			    const struct flow_action_cookie *fa_cookie)
816 {
817 	struct mlxsw_afa_cookie_ref *cookie_ref;
818 	struct mlxsw_afa_cookie *cookie;
819 	int err;
820 
821 	cookie_ref = kzalloc(sizeof(*cookie_ref), GFP_KERNEL);
822 	if (!cookie_ref)
823 		return ERR_PTR(-ENOMEM);
824 	cookie = mlxsw_afa_cookie_get(block->afa, fa_cookie);
825 	if (IS_ERR(cookie)) {
826 		err = PTR_ERR(cookie);
827 		goto err_cookie_get;
828 	}
829 	cookie_ref->cookie = cookie;
830 	cookie_ref->resource.destructor = mlxsw_afa_cookie_ref_destructor;
831 	mlxsw_afa_resource_add(block, &cookie_ref->resource);
832 	return cookie_ref;
833 
834 err_cookie_get:
835 	kfree(cookie_ref);
836 	return ERR_PTR(err);
837 }
838 
839 #define MLXSW_AFA_ONE_ACTION_LEN 32
840 #define MLXSW_AFA_PAYLOAD_OFFSET 4
841 
842 static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
843 					   u8 action_code, u8 action_size)
844 {
845 	char *oneact;
846 	char *actions;
847 
848 	if (block->finished)
849 		return ERR_PTR(-EINVAL);
850 	if (block->cur_act_index + action_size >
851 	    block->afa->max_acts_per_set) {
852 		struct mlxsw_afa_set *set;
853 
854 		/* The appended action won't fit into the current action set,
855 		 * so create a new set.
856 		 */
857 		set = mlxsw_afa_set_create(false);
858 		if (!set)
859 			return ERR_PTR(-ENOBUFS);
860 		set->prev = block->cur_set;
861 		block->cur_act_index = 0;
862 		block->cur_set->next = set;
863 		block->cur_set = set;
864 	}
865 
866 	actions = block->cur_set->ht_key.enc_actions;
867 	oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN;
868 	block->cur_act_index += action_size;
869 	mlxsw_afa_all_action_type_set(oneact, action_code);
870 	return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
871 }
872 
873 /* VLAN Action
874  * -----------
875  * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
876  * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs
877  * and more.
878  */
879 
880 #define MLXSW_AFA_VLAN_CODE 0x02
881 #define MLXSW_AFA_VLAN_SIZE 1
882 
883 enum mlxsw_afa_vlan_vlan_tag_cmd {
884 	MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
885 	MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG,
886 	MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG,
887 };
888 
889 enum mlxsw_afa_vlan_cmd {
890 	MLXSW_AFA_VLAN_CMD_NOP,
891 	MLXSW_AFA_VLAN_CMD_SET_OUTER,
892 	MLXSW_AFA_VLAN_CMD_SET_INNER,
893 	MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER,
894 	MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER,
895 	MLXSW_AFA_VLAN_CMD_SWAP,
896 };
897 
898 /* afa_vlan_vlan_tag_cmd
899  * Tag command: push, pop, nop VLAN header.
900  */
901 MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3);
902 
903 /* afa_vlan_vid_cmd */
904 MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3);
905 
906 /* afa_vlan_vid */
907 MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12);
908 
909 /* afa_vlan_ethertype_cmd */
910 MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3);
911 
912 /* afa_vlan_ethertype
913  * Index to EtherTypes in Switch VLAN EtherType Register (SVER).
914  */
915 MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3);
916 
917 /* afa_vlan_pcp_cmd */
918 MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3);
919 
920 /* afa_vlan_pcp */
921 MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3);
922 
923 static inline void
924 mlxsw_afa_vlan_pack(char *payload,
925 		    enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,
926 		    enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid,
927 		    enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp,
928 		    enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype)
929 {
930 	mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd);
931 	mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd);
932 	mlxsw_afa_vlan_vid_set(payload, vid);
933 	mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd);
934 	mlxsw_afa_vlan_pcp_set(payload, pcp);
935 	mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd);
936 	mlxsw_afa_vlan_ethertype_set(payload, ethertype);
937 }
938 
939 int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
940 				       u16 vid, u8 pcp, u8 et,
941 				       struct netlink_ext_ack *extack)
942 {
943 	char *act = mlxsw_afa_block_append_action(block,
944 						  MLXSW_AFA_VLAN_CODE,
945 						  MLXSW_AFA_VLAN_SIZE);
946 
947 	if (IS_ERR(act)) {
948 		NL_SET_ERR_MSG_MOD(extack, "Cannot append vlan_modify action");
949 		return PTR_ERR(act);
950 	}
951 	mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
952 			    MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
953 			    MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
954 			    MLXSW_AFA_VLAN_CMD_SET_OUTER, et);
955 	return 0;
956 }
957 EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
958 
959 /* Trap Action / Trap With Userdef Action
960  * --------------------------------------
961  * The Trap action enables trapping / mirroring packets to the CPU
962  * as well as discarding packets.
963  * The ACL Trap / Discard separates the forward/discard control from CPU
964  * trap control. In addition, the Trap / Discard action enables activating
965  * SPAN (port mirroring).
966  *
967  * The Trap with userdef action action has the same functionality as
968  * the Trap action with addition of user defined value that can be set
969  * and used by higher layer applications.
970  */
971 
972 #define MLXSW_AFA_TRAP_CODE 0x03
973 #define MLXSW_AFA_TRAP_SIZE 1
974 
975 #define MLXSW_AFA_TRAPWU_CODE 0x04
976 #define MLXSW_AFA_TRAPWU_SIZE 2
977 
978 enum mlxsw_afa_trap_trap_action {
979 	MLXSW_AFA_TRAP_TRAP_ACTION_NOP = 0,
980 	MLXSW_AFA_TRAP_TRAP_ACTION_TRAP = 2,
981 };
982 
983 /* afa_trap_trap_action
984  * Trap Action.
985  */
986 MLXSW_ITEM32(afa, trap, trap_action, 0x00, 24, 4);
987 
988 enum mlxsw_afa_trap_forward_action {
989 	MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD = 1,
990 	MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD = 3,
991 };
992 
993 /* afa_trap_forward_action
994  * Forward Action.
995  */
996 MLXSW_ITEM32(afa, trap, forward_action, 0x00, 0, 4);
997 
998 /* afa_trap_trap_id
999  * Trap ID to configure.
1000  */
1001 MLXSW_ITEM32(afa, trap, trap_id, 0x04, 0, 9);
1002 
1003 /* afa_trap_mirror_agent
1004  * Mirror agent.
1005  */
1006 MLXSW_ITEM32(afa, trap, mirror_agent, 0x08, 29, 3);
1007 
1008 /* afa_trap_mirror_enable
1009  * Mirror enable.
1010  */
1011 MLXSW_ITEM32(afa, trap, mirror_enable, 0x08, 24, 1);
1012 
1013 /* user_def_val
1014  * Value for the SW usage. Can be used to pass information of which
1015  * rule has caused a trap. This may be overwritten by later traps.
1016  * This field does a set on the packet's user_def_val only if this
1017  * is the first trap_id or if the trap_id has replaced the previous
1018  * packet's trap_id.
1019  */
1020 MLXSW_ITEM32(afa, trap, user_def_val, 0x0C, 0, 20);
1021 
1022 static inline void
1023 mlxsw_afa_trap_pack(char *payload,
1024 		    enum mlxsw_afa_trap_trap_action trap_action,
1025 		    enum mlxsw_afa_trap_forward_action forward_action,
1026 		    u16 trap_id)
1027 {
1028 	mlxsw_afa_trap_trap_action_set(payload, trap_action);
1029 	mlxsw_afa_trap_forward_action_set(payload, forward_action);
1030 	mlxsw_afa_trap_trap_id_set(payload, trap_id);
1031 }
1032 
1033 static inline void
1034 mlxsw_afa_trapwu_pack(char *payload,
1035 		      enum mlxsw_afa_trap_trap_action trap_action,
1036 		      enum mlxsw_afa_trap_forward_action forward_action,
1037 		      u16 trap_id, u32 user_def_val)
1038 {
1039 	mlxsw_afa_trap_pack(payload, trap_action, forward_action, trap_id);
1040 	mlxsw_afa_trap_user_def_val_set(payload, user_def_val);
1041 }
1042 
1043 static inline void
1044 mlxsw_afa_trap_mirror_pack(char *payload, bool mirror_enable,
1045 			   u8 mirror_agent)
1046 {
1047 	mlxsw_afa_trap_mirror_enable_set(payload, mirror_enable);
1048 	mlxsw_afa_trap_mirror_agent_set(payload, mirror_agent);
1049 }
1050 
1051 static int mlxsw_afa_block_append_drop_plain(struct mlxsw_afa_block *block,
1052 					     bool ingress)
1053 {
1054 	char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
1055 						  MLXSW_AFA_TRAP_SIZE);
1056 
1057 	if (IS_ERR(act))
1058 		return PTR_ERR(act);
1059 	mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
1060 			    MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD,
1061 			    ingress ? MLXSW_TRAP_ID_DISCARD_INGRESS_ACL :
1062 				      MLXSW_TRAP_ID_DISCARD_EGRESS_ACL);
1063 	return 0;
1064 }
1065 
1066 static int
1067 mlxsw_afa_block_append_drop_with_cookie(struct mlxsw_afa_block *block,
1068 					bool ingress,
1069 					const struct flow_action_cookie *fa_cookie,
1070 					struct netlink_ext_ack *extack)
1071 {
1072 	struct mlxsw_afa_cookie_ref *cookie_ref;
1073 	u32 cookie_index;
1074 	char *act;
1075 	int err;
1076 
1077 	cookie_ref = mlxsw_afa_cookie_ref_create(block, fa_cookie);
1078 	if (IS_ERR(cookie_ref)) {
1079 		NL_SET_ERR_MSG_MOD(extack, "Cannot create cookie for drop action");
1080 		return PTR_ERR(cookie_ref);
1081 	}
1082 	cookie_index = cookie_ref->cookie->cookie_index;
1083 
1084 	act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAPWU_CODE,
1085 					    MLXSW_AFA_TRAPWU_SIZE);
1086 	if (IS_ERR(act)) {
1087 		NL_SET_ERR_MSG_MOD(extack, "Cannot append drop with cookie action");
1088 		err = PTR_ERR(act);
1089 		goto err_append_action;
1090 	}
1091 	mlxsw_afa_trapwu_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
1092 			      MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD,
1093 			      ingress ? MLXSW_TRAP_ID_DISCARD_INGRESS_ACL :
1094 					MLXSW_TRAP_ID_DISCARD_EGRESS_ACL,
1095 			      cookie_index);
1096 	return 0;
1097 
1098 err_append_action:
1099 	mlxsw_afa_cookie_ref_destroy(block, cookie_ref);
1100 	return err;
1101 }
1102 
1103 int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block, bool ingress,
1104 				const struct flow_action_cookie *fa_cookie,
1105 				struct netlink_ext_ack *extack)
1106 {
1107 	return fa_cookie ?
1108 	       mlxsw_afa_block_append_drop_with_cookie(block, ingress,
1109 						       fa_cookie, extack) :
1110 	       mlxsw_afa_block_append_drop_plain(block, ingress);
1111 }
1112 EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
1113 
1114 int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
1115 {
1116 	char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
1117 						  MLXSW_AFA_TRAP_SIZE);
1118 
1119 	if (IS_ERR(act))
1120 		return PTR_ERR(act);
1121 	mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
1122 			    MLXSW_AFA_TRAP_FORWARD_ACTION_DISCARD, trap_id);
1123 	return 0;
1124 }
1125 EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
1126 
1127 int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
1128 					    u16 trap_id)
1129 {
1130 	char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
1131 						  MLXSW_AFA_TRAP_SIZE);
1132 
1133 	if (IS_ERR(act))
1134 		return PTR_ERR(act);
1135 	mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_TRAP,
1136 			    MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD, trap_id);
1137 	return 0;
1138 }
1139 EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
1140 
1141 struct mlxsw_afa_mirror {
1142 	struct mlxsw_afa_resource resource;
1143 	int span_id;
1144 	u8 local_in_port;
1145 	bool ingress;
1146 };
1147 
1148 static void
1149 mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
1150 			 struct mlxsw_afa_mirror *mirror)
1151 {
1152 	mlxsw_afa_resource_del(&mirror->resource);
1153 	block->afa->ops->mirror_del(block->afa->ops_priv,
1154 				    mirror->local_in_port,
1155 				    mirror->span_id,
1156 				    mirror->ingress);
1157 	kfree(mirror);
1158 }
1159 
1160 static void
1161 mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
1162 			    struct mlxsw_afa_resource *resource)
1163 {
1164 	struct mlxsw_afa_mirror *mirror;
1165 
1166 	mirror = container_of(resource, struct mlxsw_afa_mirror, resource);
1167 	mlxsw_afa_mirror_destroy(block, mirror);
1168 }
1169 
1170 static struct mlxsw_afa_mirror *
1171 mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port,
1172 			const struct net_device *out_dev, bool ingress)
1173 {
1174 	struct mlxsw_afa_mirror *mirror;
1175 	int err;
1176 
1177 	mirror = kzalloc(sizeof(*mirror), GFP_KERNEL);
1178 	if (!mirror)
1179 		return ERR_PTR(-ENOMEM);
1180 
1181 	err = block->afa->ops->mirror_add(block->afa->ops_priv,
1182 					  local_in_port, out_dev,
1183 					  ingress, &mirror->span_id);
1184 	if (err)
1185 		goto err_mirror_add;
1186 
1187 	mirror->ingress = ingress;
1188 	mirror->local_in_port = local_in_port;
1189 	mirror->resource.destructor = mlxsw_afa_mirror_destructor;
1190 	mlxsw_afa_resource_add(block, &mirror->resource);
1191 	return mirror;
1192 
1193 err_mirror_add:
1194 	kfree(mirror);
1195 	return ERR_PTR(err);
1196 }
1197 
1198 static int
1199 mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
1200 					u8 mirror_agent)
1201 {
1202 	char *act = mlxsw_afa_block_append_action(block,
1203 						  MLXSW_AFA_TRAP_CODE,
1204 						  MLXSW_AFA_TRAP_SIZE);
1205 	if (IS_ERR(act))
1206 		return PTR_ERR(act);
1207 	mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_NOP,
1208 			    MLXSW_AFA_TRAP_FORWARD_ACTION_FORWARD, 0);
1209 	mlxsw_afa_trap_mirror_pack(act, true, mirror_agent);
1210 	return 0;
1211 }
1212 
1213 int
1214 mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
1215 			      const struct net_device *out_dev, bool ingress,
1216 			      struct netlink_ext_ack *extack)
1217 {
1218 	struct mlxsw_afa_mirror *mirror;
1219 	int err;
1220 
1221 	mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev,
1222 					 ingress);
1223 	if (IS_ERR(mirror)) {
1224 		NL_SET_ERR_MSG_MOD(extack, "Cannot create mirror action");
1225 		return PTR_ERR(mirror);
1226 	}
1227 	err = mlxsw_afa_block_append_allocated_mirror(block, mirror->span_id);
1228 	if (err) {
1229 		NL_SET_ERR_MSG_MOD(extack, "Cannot append mirror action");
1230 		goto err_append_allocated_mirror;
1231 	}
1232 
1233 	return 0;
1234 
1235 err_append_allocated_mirror:
1236 	mlxsw_afa_mirror_destroy(block, mirror);
1237 	return err;
1238 }
1239 EXPORT_SYMBOL(mlxsw_afa_block_append_mirror);
1240 
1241 /* QoS Action
1242  * ----------
1243  * The QOS_ACTION is used for manipulating the QoS attributes of a packet. It
1244  * can be used to change the DCSP, ECN, Color and Switch Priority of the packet.
1245  * Note that PCP field can be changed using the VLAN action.
1246  */
1247 
1248 #define MLXSW_AFA_QOS_CODE 0x06
1249 #define MLXSW_AFA_QOS_SIZE 1
1250 
1251 enum mlxsw_afa_qos_ecn_cmd {
1252 	/* Do nothing */
1253 	MLXSW_AFA_QOS_ECN_CMD_NOP,
1254 	/* Set ECN to afa_qos_ecn */
1255 	MLXSW_AFA_QOS_ECN_CMD_SET,
1256 };
1257 
1258 /* afa_qos_ecn_cmd
1259  */
1260 MLXSW_ITEM32(afa, qos, ecn_cmd, 0x04, 29, 3);
1261 
1262 /* afa_qos_ecn
1263  * ECN value.
1264  */
1265 MLXSW_ITEM32(afa, qos, ecn, 0x04, 24, 2);
1266 
1267 enum mlxsw_afa_qos_dscp_cmd {
1268 	/* Do nothing */
1269 	MLXSW_AFA_QOS_DSCP_CMD_NOP,
1270 	/* Set DSCP 3 LSB bits according to dscp[2:0] */
1271 	MLXSW_AFA_QOS_DSCP_CMD_SET_3LSB,
1272 	/* Set DSCP 3 MSB bits according to dscp[5:3] */
1273 	MLXSW_AFA_QOS_DSCP_CMD_SET_3MSB,
1274 	/* Set DSCP 6 bits according to dscp[5:0] */
1275 	MLXSW_AFA_QOS_DSCP_CMD_SET_ALL,
1276 };
1277 
1278 /* afa_qos_dscp_cmd
1279  * DSCP command.
1280  */
1281 MLXSW_ITEM32(afa, qos, dscp_cmd, 0x04, 14, 2);
1282 
1283 /* afa_qos_dscp
1284  * DSCP value.
1285  */
1286 MLXSW_ITEM32(afa, qos, dscp, 0x04, 0, 6);
1287 
1288 enum mlxsw_afa_qos_switch_prio_cmd {
1289 	/* Do nothing */
1290 	MLXSW_AFA_QOS_SWITCH_PRIO_CMD_NOP,
1291 	/* Set Switch Priority to afa_qos_switch_prio */
1292 	MLXSW_AFA_QOS_SWITCH_PRIO_CMD_SET,
1293 };
1294 
1295 /* afa_qos_switch_prio_cmd
1296  */
1297 MLXSW_ITEM32(afa, qos, switch_prio_cmd, 0x08, 14, 2);
1298 
1299 /* afa_qos_switch_prio
1300  * Switch Priority.
1301  */
1302 MLXSW_ITEM32(afa, qos, switch_prio, 0x08, 0, 4);
1303 
1304 enum mlxsw_afa_qos_dscp_rw {
1305 	MLXSW_AFA_QOS_DSCP_RW_PRESERVE,
1306 	MLXSW_AFA_QOS_DSCP_RW_SET,
1307 	MLXSW_AFA_QOS_DSCP_RW_CLEAR,
1308 };
1309 
1310 /* afa_qos_dscp_rw
1311  * DSCP Re-write Enable. Controlling the rewrite_enable for DSCP.
1312  */
1313 MLXSW_ITEM32(afa, qos, dscp_rw, 0x0C, 30, 2);
1314 
1315 static inline void
1316 mlxsw_afa_qos_ecn_pack(char *payload,
1317 		       enum mlxsw_afa_qos_ecn_cmd ecn_cmd, u8 ecn)
1318 {
1319 	mlxsw_afa_qos_ecn_cmd_set(payload, ecn_cmd);
1320 	mlxsw_afa_qos_ecn_set(payload, ecn);
1321 }
1322 
1323 static inline void
1324 mlxsw_afa_qos_dscp_pack(char *payload,
1325 			enum mlxsw_afa_qos_dscp_cmd dscp_cmd, u8 dscp)
1326 {
1327 	mlxsw_afa_qos_dscp_cmd_set(payload, dscp_cmd);
1328 	mlxsw_afa_qos_dscp_set(payload, dscp);
1329 }
1330 
1331 static inline void
1332 mlxsw_afa_qos_switch_prio_pack(char *payload,
1333 			       enum mlxsw_afa_qos_switch_prio_cmd prio_cmd,
1334 			       u8 prio)
1335 {
1336 	mlxsw_afa_qos_switch_prio_cmd_set(payload, prio_cmd);
1337 	mlxsw_afa_qos_switch_prio_set(payload, prio);
1338 }
1339 
1340 static int __mlxsw_afa_block_append_qos_dsfield(struct mlxsw_afa_block *block,
1341 						bool set_dscp, u8 dscp,
1342 						bool set_ecn, u8 ecn,
1343 						struct netlink_ext_ack *extack)
1344 {
1345 	char *act = mlxsw_afa_block_append_action(block,
1346 						  MLXSW_AFA_QOS_CODE,
1347 						  MLXSW_AFA_QOS_SIZE);
1348 
1349 	if (IS_ERR(act)) {
1350 		NL_SET_ERR_MSG_MOD(extack, "Cannot append QOS action");
1351 		return PTR_ERR(act);
1352 	}
1353 
1354 	if (set_ecn)
1355 		mlxsw_afa_qos_ecn_pack(act, MLXSW_AFA_QOS_ECN_CMD_SET, ecn);
1356 	if (set_dscp) {
1357 		mlxsw_afa_qos_dscp_pack(act, MLXSW_AFA_QOS_DSCP_CMD_SET_ALL,
1358 					dscp);
1359 		mlxsw_afa_qos_dscp_rw_set(act, MLXSW_AFA_QOS_DSCP_RW_CLEAR);
1360 	}
1361 
1362 	return 0;
1363 }
1364 
1365 int mlxsw_afa_block_append_qos_dsfield(struct mlxsw_afa_block *block,
1366 				       u8 dsfield,
1367 				       struct netlink_ext_ack *extack)
1368 {
1369 	return __mlxsw_afa_block_append_qos_dsfield(block,
1370 						    true, dsfield >> 2,
1371 						    true, dsfield & 0x03,
1372 						    extack);
1373 }
1374 EXPORT_SYMBOL(mlxsw_afa_block_append_qos_dsfield);
1375 
1376 int mlxsw_afa_block_append_qos_dscp(struct mlxsw_afa_block *block,
1377 				    u8 dscp, struct netlink_ext_ack *extack)
1378 {
1379 	return __mlxsw_afa_block_append_qos_dsfield(block,
1380 						    true, dscp,
1381 						    false, 0,
1382 						    extack);
1383 }
1384 EXPORT_SYMBOL(mlxsw_afa_block_append_qos_dscp);
1385 
1386 int mlxsw_afa_block_append_qos_ecn(struct mlxsw_afa_block *block,
1387 				   u8 ecn, struct netlink_ext_ack *extack)
1388 {
1389 	return __mlxsw_afa_block_append_qos_dsfield(block,
1390 						    false, 0,
1391 						    true, ecn,
1392 						    extack);
1393 }
1394 EXPORT_SYMBOL(mlxsw_afa_block_append_qos_ecn);
1395 
1396 int mlxsw_afa_block_append_qos_switch_prio(struct mlxsw_afa_block *block,
1397 					   u8 prio,
1398 					   struct netlink_ext_ack *extack)
1399 {
1400 	char *act = mlxsw_afa_block_append_action(block,
1401 						  MLXSW_AFA_QOS_CODE,
1402 						  MLXSW_AFA_QOS_SIZE);
1403 
1404 	if (IS_ERR(act)) {
1405 		NL_SET_ERR_MSG_MOD(extack, "Cannot append QOS action");
1406 		return PTR_ERR(act);
1407 	}
1408 	mlxsw_afa_qos_switch_prio_pack(act, MLXSW_AFA_QOS_SWITCH_PRIO_CMD_SET,
1409 				       prio);
1410 	return 0;
1411 }
1412 EXPORT_SYMBOL(mlxsw_afa_block_append_qos_switch_prio);
1413 
1414 /* Forwarding Action
1415  * -----------------
1416  * Forwarding Action can be used to implement Policy Based Switching (PBS)
1417  * as well as OpenFlow related "Output" action.
1418  */
1419 
1420 #define MLXSW_AFA_FORWARD_CODE 0x07
1421 #define MLXSW_AFA_FORWARD_SIZE 1
1422 
1423 enum mlxsw_afa_forward_type {
1424 	/* PBS, Policy Based Switching */
1425 	MLXSW_AFA_FORWARD_TYPE_PBS,
1426 	/* Output, OpenFlow output type */
1427 	MLXSW_AFA_FORWARD_TYPE_OUTPUT,
1428 };
1429 
1430 /* afa_forward_type */
1431 MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2);
1432 
1433 /* afa_forward_pbs_ptr
1434  * A pointer to the PBS entry configured by PPBS register.
1435  * Reserved when in_port is set.
1436  */
1437 MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24);
1438 
1439 /* afa_forward_in_port
1440  * Packet is forwarded back to the ingress port.
1441  */
1442 MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1);
1443 
1444 static inline void
1445 mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
1446 		       u32 pbs_ptr, bool in_port)
1447 {
1448 	mlxsw_afa_forward_type_set(payload, type);
1449 	mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr);
1450 	mlxsw_afa_forward_in_port_set(payload, in_port);
1451 }
1452 
1453 int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
1454 			       u8 local_port, bool in_port,
1455 			       struct netlink_ext_ack *extack)
1456 {
1457 	struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
1458 	u32 kvdl_index;
1459 	char *act;
1460 	int err;
1461 
1462 	if (in_port) {
1463 		NL_SET_ERR_MSG_MOD(extack, "Forwarding to ingress port is not supported");
1464 		return -EOPNOTSUPP;
1465 	}
1466 	fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
1467 	if (IS_ERR(fwd_entry_ref)) {
1468 		NL_SET_ERR_MSG_MOD(extack, "Cannot create forward action");
1469 		return PTR_ERR(fwd_entry_ref);
1470 	}
1471 	kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
1472 
1473 	act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
1474 					    MLXSW_AFA_FORWARD_SIZE);
1475 	if (IS_ERR(act)) {
1476 		NL_SET_ERR_MSG_MOD(extack, "Cannot append forward action");
1477 		err = PTR_ERR(act);
1478 		goto err_append_action;
1479 	}
1480 	mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
1481 			       kvdl_index, in_port);
1482 	return 0;
1483 
1484 err_append_action:
1485 	mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
1486 	return err;
1487 }
1488 EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
1489 
1490 /* Policing and Counting Action
1491  * ----------------------------
1492  * Policing and Counting action is used for binding policer and counter
1493  * to ACL rules.
1494  */
1495 
1496 #define MLXSW_AFA_POLCNT_CODE 0x08
1497 #define MLXSW_AFA_POLCNT_SIZE 1
1498 
1499 enum mlxsw_afa_polcnt_counter_set_type {
1500 	/* No count */
1501 	MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
1502 	/* Count packets and bytes */
1503 	MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
1504 	/* Count only packets */
1505 	MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
1506 };
1507 
1508 /* afa_polcnt_counter_set_type
1509  * Counter set type for flow counters.
1510  */
1511 MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
1512 
1513 /* afa_polcnt_counter_index
1514  * Counter index for flow counters.
1515  */
1516 MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
1517 
1518 static inline void
1519 mlxsw_afa_polcnt_pack(char *payload,
1520 		      enum mlxsw_afa_polcnt_counter_set_type set_type,
1521 		      u32 counter_index)
1522 {
1523 	mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
1524 	mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
1525 }
1526 
1527 int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
1528 					     u32 counter_index)
1529 {
1530 	char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
1531 						  MLXSW_AFA_POLCNT_SIZE);
1532 	if (IS_ERR(act))
1533 		return PTR_ERR(act);
1534 	mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
1535 			      counter_index);
1536 	return 0;
1537 }
1538 EXPORT_SYMBOL(mlxsw_afa_block_append_allocated_counter);
1539 
1540 int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
1541 				   u32 *p_counter_index,
1542 				   struct netlink_ext_ack *extack)
1543 {
1544 	struct mlxsw_afa_counter *counter;
1545 	u32 counter_index;
1546 	int err;
1547 
1548 	counter = mlxsw_afa_counter_create(block);
1549 	if (IS_ERR(counter)) {
1550 		NL_SET_ERR_MSG_MOD(extack, "Cannot create count action");
1551 		return PTR_ERR(counter);
1552 	}
1553 	counter_index = counter->counter_index;
1554 
1555 	err = mlxsw_afa_block_append_allocated_counter(block, counter_index);
1556 	if (err) {
1557 		NL_SET_ERR_MSG_MOD(extack, "Cannot append count action");
1558 		goto err_append_allocated_counter;
1559 	}
1560 	if (p_counter_index)
1561 		*p_counter_index = counter_index;
1562 	return 0;
1563 
1564 err_append_allocated_counter:
1565 	mlxsw_afa_counter_destroy(block, counter);
1566 	return err;
1567 }
1568 EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
1569 
1570 /* Virtual Router and Forwarding Domain Action
1571  * -------------------------------------------
1572  * Virtual Switch action is used for manipulate the Virtual Router (VR),
1573  * MPLS label space and the Forwarding Identifier (FID).
1574  */
1575 
1576 #define MLXSW_AFA_VIRFWD_CODE 0x0E
1577 #define MLXSW_AFA_VIRFWD_SIZE 1
1578 
1579 enum mlxsw_afa_virfwd_fid_cmd {
1580 	/* Do nothing */
1581 	MLXSW_AFA_VIRFWD_FID_CMD_NOOP,
1582 	/* Set the Forwarding Identifier (FID) to fid */
1583 	MLXSW_AFA_VIRFWD_FID_CMD_SET,
1584 };
1585 
1586 /* afa_virfwd_fid_cmd */
1587 MLXSW_ITEM32(afa, virfwd, fid_cmd, 0x08, 29, 3);
1588 
1589 /* afa_virfwd_fid
1590  * The FID value.
1591  */
1592 MLXSW_ITEM32(afa, virfwd, fid, 0x08, 0, 16);
1593 
1594 static inline void mlxsw_afa_virfwd_pack(char *payload,
1595 					 enum mlxsw_afa_virfwd_fid_cmd fid_cmd,
1596 					 u16 fid)
1597 {
1598 	mlxsw_afa_virfwd_fid_cmd_set(payload, fid_cmd);
1599 	mlxsw_afa_virfwd_fid_set(payload, fid);
1600 }
1601 
1602 int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
1603 				   struct netlink_ext_ack *extack)
1604 {
1605 	char *act = mlxsw_afa_block_append_action(block,
1606 						  MLXSW_AFA_VIRFWD_CODE,
1607 						  MLXSW_AFA_VIRFWD_SIZE);
1608 	if (IS_ERR(act)) {
1609 		NL_SET_ERR_MSG_MOD(extack, "Cannot append fid_set action");
1610 		return PTR_ERR(act);
1611 	}
1612 	mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
1613 	return 0;
1614 }
1615 EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
1616 
1617 /* MC Routing Action
1618  * -----------------
1619  * The Multicast router action. Can be used by RMFT_V2 - Router Multicast
1620  * Forwarding Table Version 2 Register.
1621  */
1622 
1623 #define MLXSW_AFA_MCROUTER_CODE 0x10
1624 #define MLXSW_AFA_MCROUTER_SIZE 2
1625 
1626 enum mlxsw_afa_mcrouter_rpf_action {
1627 	MLXSW_AFA_MCROUTER_RPF_ACTION_NOP,
1628 	MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
1629 	MLXSW_AFA_MCROUTER_RPF_ACTION_DISCARD_ERROR,
1630 };
1631 
1632 /* afa_mcrouter_rpf_action */
1633 MLXSW_ITEM32(afa, mcrouter, rpf_action, 0x00, 28, 3);
1634 
1635 /* afa_mcrouter_expected_irif */
1636 MLXSW_ITEM32(afa, mcrouter, expected_irif, 0x00, 0, 16);
1637 
1638 /* afa_mcrouter_min_mtu */
1639 MLXSW_ITEM32(afa, mcrouter, min_mtu, 0x08, 0, 16);
1640 
1641 enum mlxsw_afa_mrouter_vrmid {
1642 	MLXSW_AFA_MCROUTER_VRMID_INVALID,
1643 	MLXSW_AFA_MCROUTER_VRMID_VALID
1644 };
1645 
1646 /* afa_mcrouter_vrmid
1647  * Valid RMID: rigr_rmid_index is used as RMID
1648  */
1649 MLXSW_ITEM32(afa, mcrouter, vrmid, 0x0C, 31, 1);
1650 
1651 /* afa_mcrouter_rigr_rmid_index
1652  * When the vrmid field is set to invalid, the field is used as pointer to
1653  * Router Interface Group (RIGR) Table in the KVD linear.
1654  * When the vrmid is set to valid, the field is used as RMID index, ranged
1655  * from 0 to max_mid - 1. The index is to the Port Group Table.
1656  */
1657 MLXSW_ITEM32(afa, mcrouter, rigr_rmid_index, 0x0C, 0, 24);
1658 
1659 static inline void
1660 mlxsw_afa_mcrouter_pack(char *payload,
1661 			enum mlxsw_afa_mcrouter_rpf_action rpf_action,
1662 			u16 expected_irif, u16 min_mtu,
1663 			enum mlxsw_afa_mrouter_vrmid vrmid, u32 rigr_rmid_index)
1664 
1665 {
1666 	mlxsw_afa_mcrouter_rpf_action_set(payload, rpf_action);
1667 	mlxsw_afa_mcrouter_expected_irif_set(payload, expected_irif);
1668 	mlxsw_afa_mcrouter_min_mtu_set(payload, min_mtu);
1669 	mlxsw_afa_mcrouter_vrmid_set(payload, vrmid);
1670 	mlxsw_afa_mcrouter_rigr_rmid_index_set(payload, rigr_rmid_index);
1671 }
1672 
1673 int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
1674 				    u16 expected_irif, u16 min_mtu,
1675 				    bool rmid_valid, u32 kvdl_index)
1676 {
1677 	char *act = mlxsw_afa_block_append_action(block,
1678 						  MLXSW_AFA_MCROUTER_CODE,
1679 						  MLXSW_AFA_MCROUTER_SIZE);
1680 	if (IS_ERR(act))
1681 		return PTR_ERR(act);
1682 	mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
1683 				expected_irif, min_mtu, rmid_valid, kvdl_index);
1684 	return 0;
1685 }
1686 EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter);
1687