1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2021 Marvell.
5  *
6  */
7 
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/inetdevice.h>
11 #include <linux/rhashtable.h>
12 #include <linux/bitfield.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_mirred.h>
17 #include <net/tc_act/tc_vlan.h>
18 #include <net/ipv6.h>
19 
20 #include "cn10k.h"
21 #include "otx2_common.h"
22 #include "qos.h"
23 
24 #define CN10K_MAX_BURST_MANTISSA	0x7FFFULL
25 #define CN10K_MAX_BURST_SIZE		8453888ULL
26 
27 #define CN10K_TLX_BURST_MANTISSA	GENMASK_ULL(43, 29)
28 #define CN10K_TLX_BURST_EXPONENT	GENMASK_ULL(47, 44)
29 
30 struct otx2_tc_flow_stats {
31 	u64 bytes;
32 	u64 pkts;
33 	u64 used;
34 };
35 
36 struct otx2_tc_flow {
37 	struct rhash_head		node;
38 	unsigned long			cookie;
39 	unsigned int			bitpos;
40 	struct rcu_head			rcu;
41 	struct otx2_tc_flow_stats	stats;
42 	spinlock_t			lock; /* lock for stats */
43 	u16				rq;
44 	u16				entry;
45 	u16				leaf_profile;
46 	bool				is_act_police;
47 };
48 
49 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
50 {
51 	struct otx2_tc_info *tc = &nic->tc_info;
52 
53 	if (!nic->flow_cfg->max_flows)
54 		return 0;
55 
56 	/* Max flows changed, free the existing bitmap */
57 	kfree(tc->tc_entries_bitmap);
58 
59 	tc->tc_entries_bitmap =
60 			kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
61 				sizeof(long), GFP_KERNEL);
62 	if (!tc->tc_entries_bitmap) {
63 		netdev_err(nic->netdev,
64 			   "Unable to alloc TC flow entries bitmap\n");
65 		return -ENOMEM;
66 	}
67 
68 	return 0;
69 }
70 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
71 
72 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
73 				      u32 *burst_exp, u32 *burst_mantissa)
74 {
75 	int max_burst, max_mantissa;
76 	unsigned int tmp;
77 
78 	if (is_dev_otx2(nic->pdev)) {
79 		max_burst = MAX_BURST_SIZE;
80 		max_mantissa = MAX_BURST_MANTISSA;
81 	} else {
82 		max_burst = CN10K_MAX_BURST_SIZE;
83 		max_mantissa = CN10K_MAX_BURST_MANTISSA;
84 	}
85 
86 	/* Burst is calculated as
87 	 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
88 	 * Max supported burst size is 130,816 bytes.
89 	 */
90 	burst = min_t(u32, burst, max_burst);
91 	if (burst) {
92 		*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
93 		tmp = burst - rounddown_pow_of_two(burst);
94 		if (burst < max_mantissa)
95 			*burst_mantissa = tmp * 2;
96 		else
97 			*burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
98 	} else {
99 		*burst_exp = MAX_BURST_EXPONENT;
100 		*burst_mantissa = max_mantissa;
101 	}
102 }
103 
104 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
105 				     u32 *mantissa, u32 *div_exp)
106 {
107 	u64 tmp;
108 
109 	/* Rate calculation by hardware
110 	 *
111 	 * PIR_ADD = ((256 + mantissa) << exp) / 256
112 	 * rate = (2 * PIR_ADD) / ( 1 << div_exp)
113 	 * The resultant rate is in Mbps.
114 	 */
115 
116 	/* 2Mbps to 100Gbps can be expressed with div_exp = 0.
117 	 * Setting this to '0' will ease the calculation of
118 	 * exponent and mantissa.
119 	 */
120 	*div_exp = 0;
121 
122 	if (maxrate) {
123 		*exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
124 		tmp = maxrate - rounddown_pow_of_two(maxrate);
125 		if (maxrate < MAX_RATE_MANTISSA)
126 			*mantissa = tmp * 2;
127 		else
128 			*mantissa = tmp / (1ULL << (*exp - 7));
129 	} else {
130 		/* Instead of disabling rate limiting, set all values to max */
131 		*exp = MAX_RATE_EXPONENT;
132 		*mantissa = MAX_RATE_MANTISSA;
133 	}
134 }
135 
136 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
137 				u64 maxrate, u32 burst)
138 {
139 	u32 burst_exp, burst_mantissa;
140 	u32 exp, mantissa, div_exp;
141 	u64 regval = 0;
142 
143 	/* Get exponent and mantissa values from the desired rate */
144 	otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
145 	otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
146 
147 	if (is_dev_otx2(nic->pdev)) {
148 		regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
149 				FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
150 				FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
151 				FIELD_PREP(TLX_RATE_EXPONENT, exp) |
152 				FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
153 	} else {
154 		regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
155 				FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
156 				FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
157 				FIELD_PREP(TLX_RATE_EXPONENT, exp) |
158 				FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
159 	}
160 
161 	return regval;
162 }
163 
164 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
165 					 u32 burst, u64 maxrate)
166 {
167 	struct otx2_hw *hw = &nic->hw;
168 	struct nix_txschq_config *req;
169 	int txschq, err;
170 
171 	/* All SQs share the same TL4, so pick the first scheduler */
172 	txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
173 
174 	mutex_lock(&nic->mbox.lock);
175 	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
176 	if (!req) {
177 		mutex_unlock(&nic->mbox.lock);
178 		return -ENOMEM;
179 	}
180 
181 	req->lvl = NIX_TXSCH_LVL_TL4;
182 	req->num_regs = 1;
183 	req->reg[0] = NIX_AF_TL4X_PIR(txschq);
184 	req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
185 
186 	err = otx2_sync_mbox_msg(&nic->mbox);
187 	mutex_unlock(&nic->mbox.lock);
188 	return err;
189 }
190 
191 static int otx2_tc_validate_flow(struct otx2_nic *nic,
192 				 struct flow_action *actions,
193 				 struct netlink_ext_ack *extack)
194 {
195 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
196 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
197 		return -EINVAL;
198 	}
199 
200 	if (!flow_action_has_entries(actions)) {
201 		NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
202 		return -EINVAL;
203 	}
204 
205 	if (!flow_offload_has_one_action(actions)) {
206 		NL_SET_ERR_MSG_MOD(extack,
207 				   "Egress MATCHALL offload supports only 1 policing action");
208 		return -EINVAL;
209 	}
210 	return 0;
211 }
212 
213 static int otx2_policer_validate(const struct flow_action *action,
214 				 const struct flow_action_entry *act,
215 				 struct netlink_ext_ack *extack)
216 {
217 	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
218 		NL_SET_ERR_MSG_MOD(extack,
219 				   "Offload not supported when exceed action is not drop");
220 		return -EOPNOTSUPP;
221 	}
222 
223 	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
224 	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
225 		NL_SET_ERR_MSG_MOD(extack,
226 				   "Offload not supported when conform action is not pipe or ok");
227 		return -EOPNOTSUPP;
228 	}
229 
230 	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
231 	    !flow_action_is_last_entry(action, act)) {
232 		NL_SET_ERR_MSG_MOD(extack,
233 				   "Offload not supported when conform action is ok, but action is not last");
234 		return -EOPNOTSUPP;
235 	}
236 
237 	if (act->police.peakrate_bytes_ps ||
238 	    act->police.avrate || act->police.overhead) {
239 		NL_SET_ERR_MSG_MOD(extack,
240 				   "Offload not supported when peakrate/avrate/overhead is configured");
241 		return -EOPNOTSUPP;
242 	}
243 
244 	return 0;
245 }
246 
247 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
248 					   struct tc_cls_matchall_offload *cls)
249 {
250 	struct netlink_ext_ack *extack = cls->common.extack;
251 	struct flow_action *actions = &cls->rule->action;
252 	struct flow_action_entry *entry;
253 	int err;
254 
255 	err = otx2_tc_validate_flow(nic, actions, extack);
256 	if (err)
257 		return err;
258 
259 	if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
260 		NL_SET_ERR_MSG_MOD(extack,
261 				   "Only one Egress MATCHALL ratelimiter can be offloaded");
262 		return -ENOMEM;
263 	}
264 
265 	entry = &cls->rule->action.entries[0];
266 	switch (entry->id) {
267 	case FLOW_ACTION_POLICE:
268 		err = otx2_policer_validate(&cls->rule->action, entry, extack);
269 		if (err)
270 			return err;
271 
272 		if (entry->police.rate_pkt_ps) {
273 			NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
274 			return -EOPNOTSUPP;
275 		}
276 		err = otx2_set_matchall_egress_rate(nic, entry->police.burst,
277 						    otx2_convert_rate(entry->police.rate_bytes_ps));
278 		if (err)
279 			return err;
280 		nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
281 		break;
282 	default:
283 		NL_SET_ERR_MSG_MOD(extack,
284 				   "Only police action is supported with Egress MATCHALL offload");
285 		return -EOPNOTSUPP;
286 	}
287 
288 	return 0;
289 }
290 
291 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
292 					  struct tc_cls_matchall_offload *cls)
293 {
294 	struct netlink_ext_ack *extack = cls->common.extack;
295 	int err;
296 
297 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
298 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
299 		return -EINVAL;
300 	}
301 
302 	err = otx2_set_matchall_egress_rate(nic, 0, 0);
303 	nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
304 	return err;
305 }
306 
307 static int otx2_tc_act_set_police(struct otx2_nic *nic,
308 				  struct otx2_tc_flow *node,
309 				  struct flow_cls_offload *f,
310 				  u64 rate, u32 burst, u32 mark,
311 				  struct npc_install_flow_req *req, bool pps)
312 {
313 	struct netlink_ext_ack *extack = f->common.extack;
314 	struct otx2_hw *hw = &nic->hw;
315 	int rq_idx, rc;
316 
317 	rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
318 	if (rq_idx >= hw->rx_queues) {
319 		NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
320 		return -EINVAL;
321 	}
322 
323 	mutex_lock(&nic->mbox.lock);
324 
325 	rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
326 	if (rc) {
327 		mutex_unlock(&nic->mbox.lock);
328 		return rc;
329 	}
330 
331 	rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
332 	if (rc)
333 		goto free_leaf;
334 
335 	rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
336 	if (rc)
337 		goto free_leaf;
338 
339 	mutex_unlock(&nic->mbox.lock);
340 
341 	req->match_id = mark & 0xFFFFULL;
342 	req->index = rq_idx;
343 	req->op = NIX_RX_ACTIONOP_UCAST;
344 	set_bit(rq_idx, &nic->rq_bmap);
345 	node->is_act_police = true;
346 	node->rq = rq_idx;
347 
348 	return 0;
349 
350 free_leaf:
351 	if (cn10k_free_leaf_profile(nic, node->leaf_profile))
352 		netdev_err(nic->netdev,
353 			   "Unable to free leaf bandwidth profile(%d)\n",
354 			   node->leaf_profile);
355 	mutex_unlock(&nic->mbox.lock);
356 	return rc;
357 }
358 
359 static int otx2_tc_parse_actions(struct otx2_nic *nic,
360 				 struct flow_action *flow_action,
361 				 struct npc_install_flow_req *req,
362 				 struct flow_cls_offload *f,
363 				 struct otx2_tc_flow *node)
364 {
365 	struct netlink_ext_ack *extack = f->common.extack;
366 	struct flow_action_entry *act;
367 	struct net_device *target;
368 	struct otx2_nic *priv;
369 	u32 burst, mark = 0;
370 	u8 nr_police = 0;
371 	bool pps = false;
372 	u64 rate;
373 	int err;
374 	int i;
375 
376 	if (!flow_action_has_entries(flow_action)) {
377 		NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
378 		return -EINVAL;
379 	}
380 
381 	flow_action_for_each(i, act, flow_action) {
382 		switch (act->id) {
383 		case FLOW_ACTION_DROP:
384 			req->op = NIX_RX_ACTIONOP_DROP;
385 			return 0;
386 		case FLOW_ACTION_ACCEPT:
387 			req->op = NIX_RX_ACTION_DEFAULT;
388 			return 0;
389 		case FLOW_ACTION_REDIRECT_INGRESS:
390 			target = act->dev;
391 			priv = netdev_priv(target);
392 			/* npc_install_flow_req doesn't support passing a target pcifunc */
393 			if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
394 				NL_SET_ERR_MSG_MOD(extack,
395 						   "can't redirect to other pf/vf");
396 				return -EOPNOTSUPP;
397 			}
398 			req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
399 
400 			/* if op is already set; avoid overwriting the same */
401 			if (!req->op)
402 				req->op = NIX_RX_ACTION_DEFAULT;
403 			break;
404 
405 		case FLOW_ACTION_VLAN_POP:
406 			req->vtag0_valid = true;
407 			/* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
408 			req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
409 			break;
410 		case FLOW_ACTION_POLICE:
411 			/* Ingress ratelimiting is not supported on OcteonTx2 */
412 			if (is_dev_otx2(nic->pdev)) {
413 				NL_SET_ERR_MSG_MOD(extack,
414 					"Ingress policing not supported on this platform");
415 				return -EOPNOTSUPP;
416 			}
417 
418 			err = otx2_policer_validate(flow_action, act, extack);
419 			if (err)
420 				return err;
421 
422 			if (act->police.rate_bytes_ps > 0) {
423 				rate = act->police.rate_bytes_ps * 8;
424 				burst = act->police.burst;
425 			} else if (act->police.rate_pkt_ps > 0) {
426 				/* The algorithm used to calculate rate
427 				 * mantissa, exponent values for a given token
428 				 * rate (token can be byte or packet) requires
429 				 * token rate to be mutiplied by 8.
430 				 */
431 				rate = act->police.rate_pkt_ps * 8;
432 				burst = act->police.burst_pkt;
433 				pps = true;
434 			}
435 			nr_police++;
436 			break;
437 		case FLOW_ACTION_MARK:
438 			mark = act->mark;
439 			break;
440 
441 		case FLOW_ACTION_RX_QUEUE_MAPPING:
442 			req->op = NIX_RX_ACTIONOP_UCAST;
443 			req->index = act->rx_queue;
444 			break;
445 
446 		default:
447 			return -EOPNOTSUPP;
448 		}
449 	}
450 
451 	if (nr_police > 1) {
452 		NL_SET_ERR_MSG_MOD(extack,
453 				   "rate limit police offload requires a single action");
454 		return -EOPNOTSUPP;
455 	}
456 
457 	if (nr_police)
458 		return otx2_tc_act_set_police(nic, node, f, rate, burst,
459 					      mark, req, pps);
460 
461 	return 0;
462 }
463 
464 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
465 				struct flow_cls_offload *f,
466 				struct npc_install_flow_req *req)
467 {
468 	struct netlink_ext_ack *extack = f->common.extack;
469 	struct flow_msg *flow_spec = &req->packet;
470 	struct flow_msg *flow_mask = &req->mask;
471 	struct flow_dissector *dissector;
472 	struct flow_rule *rule;
473 	u8 ip_proto = 0;
474 
475 	rule = flow_cls_offload_flow_rule(f);
476 	dissector = rule->match.dissector;
477 
478 	if ((dissector->used_keys &
479 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
480 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
481 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
482 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
483 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
484 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
485 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
486 	      BIT(FLOW_DISSECTOR_KEY_IP))))  {
487 		netdev_info(nic->netdev, "unsupported flow used key 0x%x",
488 			    dissector->used_keys);
489 		return -EOPNOTSUPP;
490 	}
491 
492 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
493 		struct flow_match_basic match;
494 
495 		flow_rule_match_basic(rule, &match);
496 
497 		/* All EtherTypes can be matched, no hw limitation */
498 		flow_spec->etype = match.key->n_proto;
499 		flow_mask->etype = match.mask->n_proto;
500 		req->features |= BIT_ULL(NPC_ETYPE);
501 
502 		if (match.mask->ip_proto &&
503 		    (match.key->ip_proto != IPPROTO_TCP &&
504 		     match.key->ip_proto != IPPROTO_UDP &&
505 		     match.key->ip_proto != IPPROTO_SCTP &&
506 		     match.key->ip_proto != IPPROTO_ICMP &&
507 		     match.key->ip_proto != IPPROTO_ICMPV6)) {
508 			netdev_info(nic->netdev,
509 				    "ip_proto=0x%x not supported\n",
510 				    match.key->ip_proto);
511 			return -EOPNOTSUPP;
512 		}
513 		if (match.mask->ip_proto)
514 			ip_proto = match.key->ip_proto;
515 
516 		if (ip_proto == IPPROTO_UDP)
517 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
518 		else if (ip_proto == IPPROTO_TCP)
519 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
520 		else if (ip_proto == IPPROTO_SCTP)
521 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
522 		else if (ip_proto == IPPROTO_ICMP)
523 			req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
524 		else if (ip_proto == IPPROTO_ICMPV6)
525 			req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
526 	}
527 
528 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
529 		struct flow_match_control match;
530 
531 		flow_rule_match_control(rule, &match);
532 		if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
533 			NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later");
534 			return -EOPNOTSUPP;
535 		}
536 
537 		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
538 			if (ntohs(flow_spec->etype) == ETH_P_IP) {
539 				flow_spec->ip_flag = IPV4_FLAG_MORE;
540 				flow_mask->ip_flag = IPV4_FLAG_MORE;
541 				req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
542 			} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
543 				flow_spec->next_header = IPPROTO_FRAGMENT;
544 				flow_mask->next_header = 0xff;
545 				req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
546 			} else {
547 				NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6");
548 				return -EOPNOTSUPP;
549 			}
550 		}
551 	}
552 
553 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
554 		struct flow_match_eth_addrs match;
555 
556 		flow_rule_match_eth_addrs(rule, &match);
557 		if (!is_zero_ether_addr(match.mask->src)) {
558 			NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
559 			return -EOPNOTSUPP;
560 		}
561 
562 		if (!is_zero_ether_addr(match.mask->dst)) {
563 			ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
564 			ether_addr_copy(flow_mask->dmac,
565 					(u8 *)&match.mask->dst);
566 			req->features |= BIT_ULL(NPC_DMAC);
567 		}
568 	}
569 
570 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
571 		struct flow_match_ip match;
572 
573 		flow_rule_match_ip(rule, &match);
574 		if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
575 		    match.mask->tos) {
576 			NL_SET_ERR_MSG_MOD(extack, "tos not supported");
577 			return -EOPNOTSUPP;
578 		}
579 		if (match.mask->ttl) {
580 			NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
581 			return -EOPNOTSUPP;
582 		}
583 		flow_spec->tos = match.key->tos;
584 		flow_mask->tos = match.mask->tos;
585 		req->features |= BIT_ULL(NPC_TOS);
586 	}
587 
588 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
589 		struct flow_match_vlan match;
590 		u16 vlan_tci, vlan_tci_mask;
591 
592 		flow_rule_match_vlan(rule, &match);
593 
594 		if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
595 			netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
596 				   ntohs(match.key->vlan_tpid));
597 			return -EOPNOTSUPP;
598 		}
599 
600 		if (!match.mask->vlan_id) {
601 			struct flow_action_entry *act;
602 			int i;
603 
604 			flow_action_for_each(i, act, &rule->action) {
605 				if (act->id == FLOW_ACTION_DROP) {
606 					netdev_err(nic->netdev,
607 						   "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
608 						   ntohs(match.key->vlan_tpid),
609 						   match.key->vlan_id);
610 					return -EOPNOTSUPP;
611 				}
612 			}
613 		}
614 
615 		if (match.mask->vlan_id ||
616 		    match.mask->vlan_dei ||
617 		    match.mask->vlan_priority) {
618 			vlan_tci = match.key->vlan_id |
619 				   match.key->vlan_dei << 12 |
620 				   match.key->vlan_priority << 13;
621 
622 			vlan_tci_mask = match.mask->vlan_id |
623 					match.mask->vlan_dei << 12 |
624 					match.mask->vlan_priority << 13;
625 
626 			flow_spec->vlan_tci = htons(vlan_tci);
627 			flow_mask->vlan_tci = htons(vlan_tci_mask);
628 			req->features |= BIT_ULL(NPC_OUTER_VID);
629 		}
630 	}
631 
632 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
633 		struct flow_match_ipv4_addrs match;
634 
635 		flow_rule_match_ipv4_addrs(rule, &match);
636 
637 		flow_spec->ip4dst = match.key->dst;
638 		flow_mask->ip4dst = match.mask->dst;
639 		req->features |= BIT_ULL(NPC_DIP_IPV4);
640 
641 		flow_spec->ip4src = match.key->src;
642 		flow_mask->ip4src = match.mask->src;
643 		req->features |= BIT_ULL(NPC_SIP_IPV4);
644 	} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
645 		struct flow_match_ipv6_addrs match;
646 
647 		flow_rule_match_ipv6_addrs(rule, &match);
648 
649 		if (ipv6_addr_loopback(&match.key->dst) ||
650 		    ipv6_addr_loopback(&match.key->src)) {
651 			NL_SET_ERR_MSG_MOD(extack,
652 					   "Flow matching IPv6 loopback addr not supported");
653 			return -EOPNOTSUPP;
654 		}
655 
656 		if (!ipv6_addr_any(&match.mask->dst)) {
657 			memcpy(&flow_spec->ip6dst,
658 			       (struct in6_addr *)&match.key->dst,
659 			       sizeof(flow_spec->ip6dst));
660 			memcpy(&flow_mask->ip6dst,
661 			       (struct in6_addr *)&match.mask->dst,
662 			       sizeof(flow_spec->ip6dst));
663 			req->features |= BIT_ULL(NPC_DIP_IPV6);
664 		}
665 
666 		if (!ipv6_addr_any(&match.mask->src)) {
667 			memcpy(&flow_spec->ip6src,
668 			       (struct in6_addr *)&match.key->src,
669 			       sizeof(flow_spec->ip6src));
670 			memcpy(&flow_mask->ip6src,
671 			       (struct in6_addr *)&match.mask->src,
672 			       sizeof(flow_spec->ip6src));
673 			req->features |= BIT_ULL(NPC_SIP_IPV6);
674 		}
675 	}
676 
677 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
678 		struct flow_match_ports match;
679 
680 		flow_rule_match_ports(rule, &match);
681 
682 		flow_spec->dport = match.key->dst;
683 		flow_mask->dport = match.mask->dst;
684 
685 		if (flow_mask->dport) {
686 			if (ip_proto == IPPROTO_UDP)
687 				req->features |= BIT_ULL(NPC_DPORT_UDP);
688 			else if (ip_proto == IPPROTO_TCP)
689 				req->features |= BIT_ULL(NPC_DPORT_TCP);
690 			else if (ip_proto == IPPROTO_SCTP)
691 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
692 		}
693 
694 		flow_spec->sport = match.key->src;
695 		flow_mask->sport = match.mask->src;
696 
697 		if (flow_mask->sport) {
698 			if (ip_proto == IPPROTO_UDP)
699 				req->features |= BIT_ULL(NPC_SPORT_UDP);
700 			else if (ip_proto == IPPROTO_TCP)
701 				req->features |= BIT_ULL(NPC_SPORT_TCP);
702 			else if (ip_proto == IPPROTO_SCTP)
703 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
704 		}
705 	}
706 
707 	return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
708 }
709 
710 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
711 {
712 	struct npc_delete_flow_req *req;
713 	int err;
714 
715 	mutex_lock(&nic->mbox.lock);
716 	req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
717 	if (!req) {
718 		mutex_unlock(&nic->mbox.lock);
719 		return -ENOMEM;
720 	}
721 
722 	req->entry = entry;
723 
724 	/* Send message to AF */
725 	err = otx2_sync_mbox_msg(&nic->mbox);
726 	if (err) {
727 		netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
728 			   entry);
729 		mutex_unlock(&nic->mbox.lock);
730 		return -EFAULT;
731 	}
732 	mutex_unlock(&nic->mbox.lock);
733 
734 	return 0;
735 }
736 
737 static int otx2_tc_del_flow(struct otx2_nic *nic,
738 			    struct flow_cls_offload *tc_flow_cmd)
739 {
740 	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
741 	struct otx2_tc_info *tc_info = &nic->tc_info;
742 	struct otx2_tc_flow *flow_node;
743 	int err;
744 
745 	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
746 					   &tc_flow_cmd->cookie,
747 					   tc_info->flow_ht_params);
748 	if (!flow_node) {
749 		netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
750 			   tc_flow_cmd->cookie);
751 		return -EINVAL;
752 	}
753 
754 	if (flow_node->is_act_police) {
755 		mutex_lock(&nic->mbox.lock);
756 
757 		err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
758 						 flow_node->leaf_profile, false);
759 		if (err)
760 			netdev_err(nic->netdev,
761 				   "Unmapping RQ %d & profile %d failed\n",
762 				   flow_node->rq, flow_node->leaf_profile);
763 
764 		err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
765 		if (err)
766 			netdev_err(nic->netdev,
767 				   "Unable to free leaf bandwidth profile(%d)\n",
768 				   flow_node->leaf_profile);
769 
770 		__clear_bit(flow_node->rq, &nic->rq_bmap);
771 
772 		mutex_unlock(&nic->mbox.lock);
773 	}
774 
775 	otx2_del_mcam_flow_entry(nic, flow_node->entry);
776 
777 	WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
778 				       &flow_node->node,
779 				       nic->tc_info.flow_ht_params));
780 	kfree_rcu(flow_node, rcu);
781 
782 	clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
783 	flow_cfg->nr_flows--;
784 
785 	return 0;
786 }
787 
788 static int otx2_tc_add_flow(struct otx2_nic *nic,
789 			    struct flow_cls_offload *tc_flow_cmd)
790 {
791 	struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
792 	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
793 	struct otx2_tc_info *tc_info = &nic->tc_info;
794 	struct otx2_tc_flow *new_node, *old_node;
795 	struct npc_install_flow_req *req, dummy;
796 	int rc, err;
797 
798 	if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
799 		return -ENOMEM;
800 
801 	if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
802 		NL_SET_ERR_MSG_MOD(extack,
803 				   "Free MCAM entry not available to add the flow");
804 		return -ENOMEM;
805 	}
806 
807 	/* allocate memory for the new flow and it's node */
808 	new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
809 	if (!new_node)
810 		return -ENOMEM;
811 	spin_lock_init(&new_node->lock);
812 	new_node->cookie = tc_flow_cmd->cookie;
813 
814 	memset(&dummy, 0, sizeof(struct npc_install_flow_req));
815 
816 	rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
817 	if (rc) {
818 		kfree_rcu(new_node, rcu);
819 		return rc;
820 	}
821 
822 	/* If a flow exists with the same cookie, delete it */
823 	old_node = rhashtable_lookup_fast(&tc_info->flow_table,
824 					  &tc_flow_cmd->cookie,
825 					  tc_info->flow_ht_params);
826 	if (old_node)
827 		otx2_tc_del_flow(nic, tc_flow_cmd);
828 
829 	mutex_lock(&nic->mbox.lock);
830 	req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
831 	if (!req) {
832 		mutex_unlock(&nic->mbox.lock);
833 		rc = -ENOMEM;
834 		goto free_leaf;
835 	}
836 
837 	memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
838 	memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
839 
840 	new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
841 					       flow_cfg->max_flows);
842 	req->channel = nic->hw.rx_chan_base;
843 	req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
844 	req->intf = NIX_INTF_RX;
845 	req->set_cntr = 1;
846 	new_node->entry = req->entry;
847 
848 	/* Send message to AF */
849 	rc = otx2_sync_mbox_msg(&nic->mbox);
850 	if (rc) {
851 		NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
852 		mutex_unlock(&nic->mbox.lock);
853 		kfree_rcu(new_node, rcu);
854 		goto free_leaf;
855 	}
856 	mutex_unlock(&nic->mbox.lock);
857 
858 	/* add new flow to flow-table */
859 	rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
860 				    nic->tc_info.flow_ht_params);
861 	if (rc) {
862 		otx2_del_mcam_flow_entry(nic, req->entry);
863 		kfree_rcu(new_node, rcu);
864 		goto free_leaf;
865 	}
866 
867 	set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
868 	flow_cfg->nr_flows++;
869 
870 	return 0;
871 
872 free_leaf:
873 	if (new_node->is_act_police) {
874 		mutex_lock(&nic->mbox.lock);
875 
876 		err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
877 						 new_node->leaf_profile, false);
878 		if (err)
879 			netdev_err(nic->netdev,
880 				   "Unmapping RQ %d & profile %d failed\n",
881 				   new_node->rq, new_node->leaf_profile);
882 		err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
883 		if (err)
884 			netdev_err(nic->netdev,
885 				   "Unable to free leaf bandwidth profile(%d)\n",
886 				   new_node->leaf_profile);
887 
888 		__clear_bit(new_node->rq, &nic->rq_bmap);
889 
890 		mutex_unlock(&nic->mbox.lock);
891 	}
892 
893 	return rc;
894 }
895 
896 static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
897 				  struct flow_cls_offload *tc_flow_cmd)
898 {
899 	struct otx2_tc_info *tc_info = &nic->tc_info;
900 	struct npc_mcam_get_stats_req *req;
901 	struct npc_mcam_get_stats_rsp *rsp;
902 	struct otx2_tc_flow_stats *stats;
903 	struct otx2_tc_flow *flow_node;
904 	int err;
905 
906 	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
907 					   &tc_flow_cmd->cookie,
908 					   tc_info->flow_ht_params);
909 	if (!flow_node) {
910 		netdev_info(nic->netdev, "tc flow not found for cookie %lx",
911 			    tc_flow_cmd->cookie);
912 		return -EINVAL;
913 	}
914 
915 	mutex_lock(&nic->mbox.lock);
916 
917 	req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
918 	if (!req) {
919 		mutex_unlock(&nic->mbox.lock);
920 		return -ENOMEM;
921 	}
922 
923 	req->entry = flow_node->entry;
924 
925 	err = otx2_sync_mbox_msg(&nic->mbox);
926 	if (err) {
927 		netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
928 			   req->entry);
929 		mutex_unlock(&nic->mbox.lock);
930 		return -EFAULT;
931 	}
932 
933 	rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
934 		(&nic->mbox.mbox, 0, &req->hdr);
935 	if (IS_ERR(rsp)) {
936 		mutex_unlock(&nic->mbox.lock);
937 		return PTR_ERR(rsp);
938 	}
939 
940 	mutex_unlock(&nic->mbox.lock);
941 
942 	if (!rsp->stat_ena)
943 		return -EINVAL;
944 
945 	stats = &flow_node->stats;
946 
947 	spin_lock(&flow_node->lock);
948 	flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
949 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
950 	stats->pkts = rsp->stat;
951 	spin_unlock(&flow_node->lock);
952 
953 	return 0;
954 }
955 
956 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
957 				    struct flow_cls_offload *cls_flower)
958 {
959 	switch (cls_flower->command) {
960 	case FLOW_CLS_REPLACE:
961 		return otx2_tc_add_flow(nic, cls_flower);
962 	case FLOW_CLS_DESTROY:
963 		return otx2_tc_del_flow(nic, cls_flower);
964 	case FLOW_CLS_STATS:
965 		return otx2_tc_get_flow_stats(nic, cls_flower);
966 	default:
967 		return -EOPNOTSUPP;
968 	}
969 }
970 
971 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
972 					    struct tc_cls_matchall_offload *cls)
973 {
974 	struct netlink_ext_ack *extack = cls->common.extack;
975 	struct flow_action *actions = &cls->rule->action;
976 	struct flow_action_entry *entry;
977 	u64 rate;
978 	int err;
979 
980 	err = otx2_tc_validate_flow(nic, actions, extack);
981 	if (err)
982 		return err;
983 
984 	if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
985 		NL_SET_ERR_MSG_MOD(extack,
986 				   "Only one ingress MATCHALL ratelimitter can be offloaded");
987 		return -ENOMEM;
988 	}
989 
990 	entry = &cls->rule->action.entries[0];
991 	switch (entry->id) {
992 	case FLOW_ACTION_POLICE:
993 		/* Ingress ratelimiting is not supported on OcteonTx2 */
994 		if (is_dev_otx2(nic->pdev)) {
995 			NL_SET_ERR_MSG_MOD(extack,
996 					   "Ingress policing not supported on this platform");
997 			return -EOPNOTSUPP;
998 		}
999 
1000 		err = cn10k_alloc_matchall_ipolicer(nic);
1001 		if (err)
1002 			return err;
1003 
1004 		/* Convert to bits per second */
1005 		rate = entry->police.rate_bytes_ps * 8;
1006 		err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
1007 		if (err)
1008 			return err;
1009 		nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
1010 		break;
1011 	default:
1012 		NL_SET_ERR_MSG_MOD(extack,
1013 				   "Only police action supported with Ingress MATCHALL offload");
1014 		return -EOPNOTSUPP;
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
1021 					   struct tc_cls_matchall_offload *cls)
1022 {
1023 	struct netlink_ext_ack *extack = cls->common.extack;
1024 	int err;
1025 
1026 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
1027 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
1028 		return -EINVAL;
1029 	}
1030 
1031 	err = cn10k_free_matchall_ipolicer(nic);
1032 	nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
1033 	return err;
1034 }
1035 
1036 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
1037 					  struct tc_cls_matchall_offload *cls_matchall)
1038 {
1039 	switch (cls_matchall->command) {
1040 	case TC_CLSMATCHALL_REPLACE:
1041 		return otx2_tc_ingress_matchall_install(nic, cls_matchall);
1042 	case TC_CLSMATCHALL_DESTROY:
1043 		return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
1044 	case TC_CLSMATCHALL_STATS:
1045 	default:
1046 		break;
1047 	}
1048 
1049 	return -EOPNOTSUPP;
1050 }
1051 
1052 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
1053 					  void *type_data, void *cb_priv)
1054 {
1055 	struct otx2_nic *nic = cb_priv;
1056 
1057 	if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1058 		return -EOPNOTSUPP;
1059 
1060 	switch (type) {
1061 	case TC_SETUP_CLSFLOWER:
1062 		return otx2_setup_tc_cls_flower(nic, type_data);
1063 	case TC_SETUP_CLSMATCHALL:
1064 		return otx2_setup_tc_ingress_matchall(nic, type_data);
1065 	default:
1066 		break;
1067 	}
1068 
1069 	return -EOPNOTSUPP;
1070 }
1071 
1072 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
1073 					 struct tc_cls_matchall_offload *cls_matchall)
1074 {
1075 	switch (cls_matchall->command) {
1076 	case TC_CLSMATCHALL_REPLACE:
1077 		return otx2_tc_egress_matchall_install(nic, cls_matchall);
1078 	case TC_CLSMATCHALL_DESTROY:
1079 		return otx2_tc_egress_matchall_delete(nic, cls_matchall);
1080 	case TC_CLSMATCHALL_STATS:
1081 	default:
1082 		break;
1083 	}
1084 
1085 	return -EOPNOTSUPP;
1086 }
1087 
1088 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
1089 					 void *type_data, void *cb_priv)
1090 {
1091 	struct otx2_nic *nic = cb_priv;
1092 
1093 	if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1094 		return -EOPNOTSUPP;
1095 
1096 	switch (type) {
1097 	case TC_SETUP_CLSMATCHALL:
1098 		return otx2_setup_tc_egress_matchall(nic, type_data);
1099 	default:
1100 		break;
1101 	}
1102 
1103 	return -EOPNOTSUPP;
1104 }
1105 
1106 static LIST_HEAD(otx2_block_cb_list);
1107 
1108 static int otx2_setup_tc_block(struct net_device *netdev,
1109 			       struct flow_block_offload *f)
1110 {
1111 	struct otx2_nic *nic = netdev_priv(netdev);
1112 	flow_setup_cb_t *cb;
1113 	bool ingress;
1114 
1115 	if (f->block_shared)
1116 		return -EOPNOTSUPP;
1117 
1118 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1119 		cb = otx2_setup_tc_block_ingress_cb;
1120 		ingress = true;
1121 	} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1122 		cb = otx2_setup_tc_block_egress_cb;
1123 		ingress = false;
1124 	} else {
1125 		return -EOPNOTSUPP;
1126 	}
1127 
1128 	return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
1129 					  nic, nic, ingress);
1130 }
1131 
1132 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
1133 		  void *type_data)
1134 {
1135 	switch (type) {
1136 	case TC_SETUP_BLOCK:
1137 		return otx2_setup_tc_block(netdev, type_data);
1138 	case TC_SETUP_QDISC_HTB:
1139 		return otx2_setup_tc_htb(netdev, type_data);
1140 	default:
1141 		return -EOPNOTSUPP;
1142 	}
1143 }
1144 EXPORT_SYMBOL(otx2_setup_tc);
1145 
1146 static const struct rhashtable_params tc_flow_ht_params = {
1147 	.head_offset = offsetof(struct otx2_tc_flow, node),
1148 	.key_offset = offsetof(struct otx2_tc_flow, cookie),
1149 	.key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
1150 	.automatic_shrinking = true,
1151 };
1152 
1153 int otx2_init_tc(struct otx2_nic *nic)
1154 {
1155 	struct otx2_tc_info *tc = &nic->tc_info;
1156 	int err;
1157 
1158 	/* Exclude receive queue 0 being used for police action */
1159 	set_bit(0, &nic->rq_bmap);
1160 
1161 	if (!nic->flow_cfg) {
1162 		netdev_err(nic->netdev,
1163 			   "Can't init TC, nic->flow_cfg is not setup\n");
1164 		return -EINVAL;
1165 	}
1166 
1167 	err = otx2_tc_alloc_ent_bitmap(nic);
1168 	if (err)
1169 		return err;
1170 
1171 	tc->flow_ht_params = tc_flow_ht_params;
1172 	err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
1173 	if (err) {
1174 		kfree(tc->tc_entries_bitmap);
1175 		tc->tc_entries_bitmap = NULL;
1176 	}
1177 	return err;
1178 }
1179 EXPORT_SYMBOL(otx2_init_tc);
1180 
1181 void otx2_shutdown_tc(struct otx2_nic *nic)
1182 {
1183 	struct otx2_tc_info *tc = &nic->tc_info;
1184 
1185 	kfree(tc->tc_entries_bitmap);
1186 	rhashtable_destroy(&tc->flow_table);
1187 }
1188 EXPORT_SYMBOL(otx2_shutdown_tc);
1189