xref: /openbmc/linux/drivers/net/ethernet/sfc/tc.c (revision ac73d4bf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2019 Solarflare Communications Inc.
5  * Copyright 2020-2022 Xilinx Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation, incorporated herein by reference.
10  */
11 
12 #include <net/pkt_cls.h>
13 #include "tc.h"
14 #include "tc_bindings.h"
15 #include "mae.h"
16 #include "ef100_rep.h"
17 #include "efx.h"
18 
19 #define EFX_EFV_PF	NULL
20 /* Look up the representor information (efv) for a device.
21  * May return NULL for the PF (us), or an error pointer for a device that
22  * isn't supported as a TC offload endpoint
23  */
24 static struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx,
25 						struct net_device *dev)
26 {
27 	struct efx_rep *efv;
28 
29 	if (!dev)
30 		return ERR_PTR(-EOPNOTSUPP);
31 	/* Is it us (the PF)? */
32 	if (dev == efx->net_dev)
33 		return EFX_EFV_PF;
34 	/* Is it an efx vfrep at all? */
35 	if (dev->netdev_ops != &efx_ef100_rep_netdev_ops)
36 		return ERR_PTR(-EOPNOTSUPP);
37 	/* Is it ours?  We don't support TC rules that include another
38 	 * EF100's netdevices (not even on another port of the same NIC).
39 	 */
40 	efv = netdev_priv(dev);
41 	if (efv->parent != efx)
42 		return ERR_PTR(-EOPNOTSUPP);
43 	return efv;
44 }
45 
46 /* Convert a driver-internal vport ID into an external device (wire or VF) */
47 static s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv)
48 {
49 	u32 mport;
50 
51 	if (IS_ERR(efv))
52 		return PTR_ERR(efv);
53 	if (!efv) /* device is PF (us) */
54 		efx_mae_mport_wire(efx, &mport);
55 	else /* device is repr */
56 		efx_mae_mport_mport(efx, efv->mport, &mport);
57 	return mport;
58 }
59 
60 static const struct rhashtable_params efx_tc_match_action_ht_params = {
61 	.key_len	= sizeof(unsigned long),
62 	.key_offset	= offsetof(struct efx_tc_flow_rule, cookie),
63 	.head_offset	= offsetof(struct efx_tc_flow_rule, linkage),
64 };
65 
66 static void efx_tc_free_action_set(struct efx_nic *efx,
67 				   struct efx_tc_action_set *act, bool in_hw)
68 {
69 	/* Failure paths calling this on the 'running action' set in_hw=false,
70 	 * because if the alloc had succeeded we'd've put it in acts.list and
71 	 * not still have it in act.
72 	 */
73 	if (in_hw) {
74 		efx_mae_free_action_set(efx, act->fw_id);
75 		/* in_hw is true iff we are on an acts.list; make sure to
76 		 * remove ourselves from that list before we are freed.
77 		 */
78 		list_del(&act->list);
79 	}
80 	kfree(act);
81 }
82 
83 static void efx_tc_free_action_set_list(struct efx_nic *efx,
84 					struct efx_tc_action_set_list *acts,
85 					bool in_hw)
86 {
87 	struct efx_tc_action_set *act, *next;
88 
89 	/* Failure paths set in_hw=false, because usually the acts didn't get
90 	 * to efx_mae_alloc_action_set_list(); if they did, the failure tree
91 	 * has a separate efx_mae_free_action_set_list() before calling us.
92 	 */
93 	if (in_hw)
94 		efx_mae_free_action_set_list(efx, acts);
95 	/* Any act that's on the list will be in_hw even if the list isn't */
96 	list_for_each_entry_safe(act, next, &acts->list, list)
97 		efx_tc_free_action_set(efx, act, true);
98 	/* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
99 }
100 
101 static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
102 {
103 	efx_mae_delete_rule(efx, rule->fw_id);
104 
105 	/* Release entries in subsidiary tables */
106 	efx_tc_free_action_set_list(efx, &rule->acts, true);
107 	rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
108 }
109 
110 static void efx_tc_flow_free(void *ptr, void *arg)
111 {
112 	struct efx_tc_flow_rule *rule = ptr;
113 	struct efx_nic *efx = arg;
114 
115 	netif_err(efx, drv, efx->net_dev,
116 		  "tc rule %lx still present at teardown, removing\n",
117 		  rule->cookie);
118 
119 	efx_mae_delete_rule(efx, rule->fw_id);
120 
121 	/* Release entries in subsidiary tables */
122 	efx_tc_free_action_set_list(efx, &rule->acts, true);
123 
124 	kfree(rule);
125 }
126 
127 static int efx_tc_flower_parse_match(struct efx_nic *efx,
128 				     struct flow_rule *rule,
129 				     struct efx_tc_match *match,
130 				     struct netlink_ext_ack *extack)
131 {
132 	struct flow_dissector *dissector = rule->match.dissector;
133 
134 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
135 		struct flow_match_control fm;
136 
137 		flow_rule_match_control(rule, &fm);
138 
139 		if (fm.mask->flags) {
140 			NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x",
141 					       fm.mask->flags);
142 			return -EOPNOTSUPP;
143 		}
144 	}
145 	if (dissector->used_keys &
146 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
147 	      BIT(FLOW_DISSECTOR_KEY_BASIC))) {
148 		NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#x",
149 				       dissector->used_keys);
150 		return -EOPNOTSUPP;
151 	}
152 
153 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
154 		struct flow_match_basic fm;
155 
156 		flow_rule_match_basic(rule, &fm);
157 		if (fm.mask->n_proto) {
158 			NL_SET_ERR_MSG_MOD(extack, "Unsupported eth_proto match");
159 			return -EOPNOTSUPP;
160 		}
161 		if (fm.mask->ip_proto) {
162 			NL_SET_ERR_MSG_MOD(extack, "Unsupported ip_proto match");
163 			return -EOPNOTSUPP;
164 		}
165 	}
166 
167 	return 0;
168 }
169 
170 static int efx_tc_flower_replace(struct efx_nic *efx,
171 				 struct net_device *net_dev,
172 				 struct flow_cls_offload *tc,
173 				 struct efx_rep *efv)
174 {
175 	struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
176 	struct netlink_ext_ack *extack = tc->common.extack;
177 	struct efx_tc_flow_rule *rule = NULL, *old;
178 	struct efx_tc_action_set *act = NULL;
179 	const struct flow_action_entry *fa;
180 	struct efx_rep *from_efv, *to_efv;
181 	struct efx_tc_match match;
182 	s64 rc;
183 	int i;
184 
185 	if (!tc_can_offload_extack(efx->net_dev, extack))
186 		return -EOPNOTSUPP;
187 	if (WARN_ON(!efx->tc))
188 		return -ENETDOWN;
189 	if (WARN_ON(!efx->tc->up))
190 		return -ENETDOWN;
191 
192 	from_efv = efx_tc_flower_lookup_efv(efx, net_dev);
193 	if (IS_ERR(from_efv)) {
194 		/* Might be a tunnel decap rule from an indirect block.
195 		 * Support for those not implemented yet.
196 		 */
197 		return -EOPNOTSUPP;
198 	}
199 
200 	if (efv != from_efv) {
201 		/* can't happen */
202 		NL_SET_ERR_MSG_FMT_MOD(extack, "for %s efv is %snull but from_efv is %snull (can't happen)",
203 				       netdev_name(net_dev), efv ? "non-" : "",
204 				       from_efv ? "non-" : "");
205 		return -EINVAL;
206 	}
207 
208 	/* Parse match */
209 	memset(&match, 0, sizeof(match));
210 	rc = efx_tc_flower_external_mport(efx, from_efv);
211 	if (rc < 0) {
212 		NL_SET_ERR_MSG_MOD(extack, "Failed to identify ingress m-port");
213 		return rc;
214 	}
215 	match.value.ingress_port = rc;
216 	match.mask.ingress_port = ~0;
217 	rc = efx_tc_flower_parse_match(efx, fr, &match, extack);
218 	if (rc)
219 		return rc;
220 
221 	if (tc->common.chain_index) {
222 		NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index");
223 		return -EOPNOTSUPP;
224 	}
225 	match.mask.recirc_id = 0xff;
226 
227 	rc = efx_mae_match_check_caps(efx, &match.mask, extack);
228 	if (rc)
229 		return rc;
230 
231 	rule = kzalloc(sizeof(*rule), GFP_USER);
232 	if (!rule)
233 		return -ENOMEM;
234 	INIT_LIST_HEAD(&rule->acts.list);
235 	rule->cookie = tc->cookie;
236 	old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
237 						&rule->linkage,
238 						efx_tc_match_action_ht_params);
239 	if (old) {
240 		netif_dbg(efx, drv, efx->net_dev,
241 			  "Already offloaded rule (cookie %lx)\n", tc->cookie);
242 		rc = -EEXIST;
243 		NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
244 		goto release;
245 	}
246 
247 	/* Parse actions */
248 	act = kzalloc(sizeof(*act), GFP_USER);
249 	if (!act) {
250 		rc = -ENOMEM;
251 		goto release;
252 	}
253 
254 	flow_action_for_each(i, fa, &fr->action) {
255 		struct efx_tc_action_set save;
256 
257 		if (!act) {
258 			/* more actions after a non-pipe action */
259 			NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action");
260 			rc = -EINVAL;
261 			goto release;
262 		}
263 
264 		switch (fa->id) {
265 		case FLOW_ACTION_DROP:
266 			rc = efx_mae_alloc_action_set(efx, act);
267 			if (rc) {
268 				NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (drop)");
269 				goto release;
270 			}
271 			list_add_tail(&act->list, &rule->acts.list);
272 			act = NULL; /* end of the line */
273 			break;
274 		case FLOW_ACTION_REDIRECT:
275 		case FLOW_ACTION_MIRRED:
276 			save = *act;
277 			to_efv = efx_tc_flower_lookup_efv(efx, fa->dev);
278 			if (IS_ERR(to_efv)) {
279 				NL_SET_ERR_MSG_MOD(extack, "Mirred egress device not on switch");
280 				rc = PTR_ERR(to_efv);
281 				goto release;
282 			}
283 			rc = efx_tc_flower_external_mport(efx, to_efv);
284 			if (rc < 0) {
285 				NL_SET_ERR_MSG_MOD(extack, "Failed to identify egress m-port");
286 				goto release;
287 			}
288 			act->dest_mport = rc;
289 			act->deliver = 1;
290 			rc = efx_mae_alloc_action_set(efx, act);
291 			if (rc) {
292 				NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (mirred)");
293 				goto release;
294 			}
295 			list_add_tail(&act->list, &rule->acts.list);
296 			act = NULL;
297 			if (fa->id == FLOW_ACTION_REDIRECT)
298 				break; /* end of the line */
299 			/* Mirror, so continue on with saved act */
300 			act = kzalloc(sizeof(*act), GFP_USER);
301 			if (!act) {
302 				rc = -ENOMEM;
303 				goto release;
304 			}
305 			*act = save;
306 			break;
307 		default:
308 			NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u",
309 					       fa->id);
310 			rc = -EOPNOTSUPP;
311 			goto release;
312 		}
313 	}
314 
315 	if (act) {
316 		/* Not shot/redirected, so deliver to default dest */
317 		if (from_efv == EFX_EFV_PF)
318 			/* Rule applies to traffic from the wire,
319 			 * and default dest is thus the PF
320 			 */
321 			efx_mae_mport_uplink(efx, &act->dest_mport);
322 		else
323 			/* Representor, so rule applies to traffic from
324 			 * representee, and default dest is thus the rep.
325 			 * All reps use the same mport for delivery
326 			 */
327 			efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
328 					    &act->dest_mport);
329 		act->deliver = 1;
330 		rc = efx_mae_alloc_action_set(efx, act);
331 		if (rc) {
332 			NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (deliver)");
333 			goto release;
334 		}
335 		list_add_tail(&act->list, &rule->acts.list);
336 		act = NULL; /* Prevent double-free in error path */
337 	}
338 
339 	netif_dbg(efx, drv, efx->net_dev,
340 		  "Successfully parsed filter (cookie %lx)\n",
341 		  tc->cookie);
342 
343 	rule->match = match;
344 
345 	rc = efx_mae_alloc_action_set_list(efx, &rule->acts);
346 	if (rc) {
347 		NL_SET_ERR_MSG_MOD(extack, "Failed to write action set list to hw");
348 		goto release;
349 	}
350 	rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC,
351 				 rule->acts.fw_id, &rule->fw_id);
352 	if (rc) {
353 		NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw");
354 		goto release_acts;
355 	}
356 	return 0;
357 
358 release_acts:
359 	efx_mae_free_action_set_list(efx, &rule->acts);
360 release:
361 	/* We failed to insert the rule, so free up any entries we created in
362 	 * subsidiary tables.
363 	 */
364 	if (act)
365 		efx_tc_free_action_set(efx, act, false);
366 	if (rule) {
367 		rhashtable_remove_fast(&efx->tc->match_action_ht,
368 				       &rule->linkage,
369 				       efx_tc_match_action_ht_params);
370 		efx_tc_free_action_set_list(efx, &rule->acts, false);
371 	}
372 	kfree(rule);
373 	return rc;
374 }
375 
376 static int efx_tc_flower_destroy(struct efx_nic *efx,
377 				 struct net_device *net_dev,
378 				 struct flow_cls_offload *tc)
379 {
380 	struct netlink_ext_ack *extack = tc->common.extack;
381 	struct efx_tc_flow_rule *rule;
382 
383 	rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie,
384 				      efx_tc_match_action_ht_params);
385 	if (!rule) {
386 		/* Only log a message if we're the ingress device.  Otherwise
387 		 * it's a foreign filter and we might just not have been
388 		 * interested (e.g. we might not have been the egress device
389 		 * either).
390 		 */
391 		if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
392 			netif_warn(efx, drv, efx->net_dev,
393 				   "Filter %lx not found to remove\n", tc->cookie);
394 		NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
395 		return -ENOENT;
396 	}
397 
398 	/* Remove it from HW */
399 	efx_tc_delete_rule(efx, rule);
400 	/* Delete it from SW */
401 	rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage,
402 			       efx_tc_match_action_ht_params);
403 	netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie);
404 	kfree(rule);
405 	return 0;
406 }
407 
408 int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
409 		  struct flow_cls_offload *tc, struct efx_rep *efv)
410 {
411 	int rc;
412 
413 	if (!efx->tc)
414 		return -EOPNOTSUPP;
415 
416 	mutex_lock(&efx->tc->mutex);
417 	switch (tc->command) {
418 	case FLOW_CLS_REPLACE:
419 		rc = efx_tc_flower_replace(efx, net_dev, tc, efv);
420 		break;
421 	case FLOW_CLS_DESTROY:
422 		rc = efx_tc_flower_destroy(efx, net_dev, tc);
423 		break;
424 	default:
425 		rc = -EOPNOTSUPP;
426 		break;
427 	}
428 	mutex_unlock(&efx->tc->mutex);
429 	return rc;
430 }
431 
432 static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
433 					 u32 eg_port, struct efx_tc_flow_rule *rule)
434 {
435 	struct efx_tc_action_set_list *acts = &rule->acts;
436 	struct efx_tc_match *match = &rule->match;
437 	struct efx_tc_action_set *act;
438 	int rc;
439 
440 	match->value.ingress_port = ing_port;
441 	match->mask.ingress_port = ~0;
442 	act = kzalloc(sizeof(*act), GFP_KERNEL);
443 	if (!act)
444 		return -ENOMEM;
445 	act->deliver = 1;
446 	act->dest_mport = eg_port;
447 	rc = efx_mae_alloc_action_set(efx, act);
448 	if (rc)
449 		goto fail1;
450 	EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
451 	list_add_tail(&act->list, &acts->list);
452 	rc = efx_mae_alloc_action_set_list(efx, acts);
453 	if (rc)
454 		goto fail2;
455 	rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
456 				 acts->fw_id, &rule->fw_id);
457 	if (rc)
458 		goto fail3;
459 	return 0;
460 fail3:
461 	efx_mae_free_action_set_list(efx, acts);
462 fail2:
463 	list_del(&act->list);
464 	efx_mae_free_action_set(efx, act->fw_id);
465 fail1:
466 	kfree(act);
467 	return rc;
468 }
469 
470 static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
471 {
472 	struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
473 	u32 ing_port, eg_port;
474 
475 	efx_mae_mport_uplink(efx, &ing_port);
476 	efx_mae_mport_wire(efx, &eg_port);
477 	return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
478 }
479 
480 static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
481 {
482 	struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
483 	u32 ing_port, eg_port;
484 
485 	efx_mae_mport_wire(efx, &ing_port);
486 	efx_mae_mport_uplink(efx, &eg_port);
487 	return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
488 }
489 
490 int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
491 {
492 	struct efx_tc_flow_rule *rule = &efv->dflt;
493 	struct efx_nic *efx = efv->parent;
494 	u32 ing_port, eg_port;
495 
496 	efx_mae_mport_mport(efx, efv->mport, &ing_port);
497 	efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
498 	return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
499 }
500 
501 void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
502 				     struct efx_tc_flow_rule *rule)
503 {
504 	if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
505 		efx_tc_delete_rule(efx, rule);
506 	rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
507 }
508 
509 static int efx_tc_configure_rep_mport(struct efx_nic *efx)
510 {
511 	u32 rep_mport_label;
512 	int rc;
513 
514 	rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
515 	if (rc)
516 		return rc;
517 	pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
518 		efx->tc->reps_mport_id, rep_mport_label);
519 	/* Use mport *selector* as vport ID */
520 	efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
521 			    &efx->tc->reps_mport_vport_id);
522 	return 0;
523 }
524 
525 static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
526 {
527 	efx_mae_free_mport(efx, efx->tc->reps_mport_id);
528 	efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
529 }
530 
531 int efx_tc_insert_rep_filters(struct efx_nic *efx)
532 {
533 	struct efx_filter_spec promisc, allmulti;
534 	int rc;
535 
536 	if (efx->type->is_vf)
537 		return 0;
538 	if (!efx->tc)
539 		return 0;
540 	efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
541 	efx_filter_set_uc_def(&promisc);
542 	efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
543 	rc = efx_filter_insert_filter(efx, &promisc, false);
544 	if (rc < 0)
545 		return rc;
546 	efx->tc->reps_filter_uc = rc;
547 	efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
548 	efx_filter_set_mc_def(&allmulti);
549 	efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
550 	rc = efx_filter_insert_filter(efx, &allmulti, false);
551 	if (rc < 0)
552 		return rc;
553 	efx->tc->reps_filter_mc = rc;
554 	return 0;
555 }
556 
557 void efx_tc_remove_rep_filters(struct efx_nic *efx)
558 {
559 	if (efx->type->is_vf)
560 		return;
561 	if (!efx->tc)
562 		return;
563 	if (efx->tc->reps_filter_mc >= 0)
564 		efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
565 	efx->tc->reps_filter_mc = -1;
566 	if (efx->tc->reps_filter_uc >= 0)
567 		efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
568 	efx->tc->reps_filter_uc = -1;
569 }
570 
571 int efx_init_tc(struct efx_nic *efx)
572 {
573 	int rc;
574 
575 	rc = efx_mae_get_caps(efx, efx->tc->caps);
576 	if (rc)
577 		return rc;
578 	if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS)
579 		/* Firmware supports some match fields the driver doesn't know
580 		 * about.  Not fatal, unless any of those fields are required
581 		 * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know.
582 		 */
583 		netif_warn(efx, probe, efx->net_dev,
584 			   "FW reports additional match fields %u\n",
585 			   efx->tc->caps->match_field_count);
586 	if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) {
587 		netif_err(efx, probe, efx->net_dev,
588 			  "Too few action prios supported (have %u, need %u)\n",
589 			  efx->tc->caps->action_prios, EFX_TC_PRIO__NUM);
590 		return -EIO;
591 	}
592 	rc = efx_tc_configure_default_rule_pf(efx);
593 	if (rc)
594 		return rc;
595 	rc = efx_tc_configure_default_rule_wire(efx);
596 	if (rc)
597 		return rc;
598 	rc = efx_tc_configure_rep_mport(efx);
599 	if (rc)
600 		return rc;
601 	efx->tc->up = true;
602 	rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
603 	if (rc)
604 		return rc;
605 	return 0;
606 }
607 
608 void efx_fini_tc(struct efx_nic *efx)
609 {
610 	/* We can get called even if efx_init_struct_tc() failed */
611 	if (!efx->tc)
612 		return;
613 	if (efx->tc->up)
614 		flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind);
615 	efx_tc_deconfigure_rep_mport(efx);
616 	efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
617 	efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
618 	efx->tc->up = false;
619 }
620 
621 int efx_init_struct_tc(struct efx_nic *efx)
622 {
623 	int rc;
624 
625 	if (efx->type->is_vf)
626 		return 0;
627 
628 	efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
629 	if (!efx->tc)
630 		return -ENOMEM;
631 	efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL);
632 	if (!efx->tc->caps) {
633 		rc = -ENOMEM;
634 		goto fail_alloc_caps;
635 	}
636 	INIT_LIST_HEAD(&efx->tc->block_list);
637 
638 	mutex_init(&efx->tc->mutex);
639 	rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
640 	if (rc < 0)
641 		goto fail_match_action_ht;
642 	efx->tc->reps_filter_uc = -1;
643 	efx->tc->reps_filter_mc = -1;
644 	INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
645 	efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
646 	INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
647 	efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
648 	return 0;
649 fail_match_action_ht:
650 	mutex_destroy(&efx->tc->mutex);
651 	kfree(efx->tc->caps);
652 fail_alloc_caps:
653 	kfree(efx->tc);
654 	efx->tc = NULL;
655 	return rc;
656 }
657 
658 void efx_fini_struct_tc(struct efx_nic *efx)
659 {
660 	if (!efx->tc)
661 		return;
662 
663 	mutex_lock(&efx->tc->mutex);
664 	EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
665 			     MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
666 	EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
667 			     MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
668 	rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
669 				    efx);
670 	mutex_unlock(&efx->tc->mutex);
671 	mutex_destroy(&efx->tc->mutex);
672 	kfree(efx->tc->caps);
673 	kfree(efx->tc);
674 	efx->tc = NULL;
675 }
676