1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physcial Function ethernet driver
3  *
4  * Copyright (C) 2021 Marvell.
5  */
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/inetdevice.h>
9 #include <linux/rhashtable.h>
10 #include <linux/bitfield.h>
11 #include <net/flow_dissector.h>
12 #include <net/pkt_cls.h>
13 #include <net/tc_act/tc_gact.h>
14 #include <net/tc_act/tc_mirred.h>
15 #include <net/tc_act/tc_vlan.h>
16 #include <net/ipv6.h>
17 
18 #include "otx2_common.h"
19 
20 /* Egress rate limiting definitions */
21 #define MAX_BURST_EXPONENT		0x0FULL
22 #define MAX_BURST_MANTISSA		0xFFULL
23 #define MAX_BURST_SIZE			130816ULL
24 #define MAX_RATE_DIVIDER_EXPONENT	12ULL
25 #define MAX_RATE_EXPONENT		0x0FULL
26 #define MAX_RATE_MANTISSA		0xFFULL
27 
28 /* Bitfields in NIX_TLX_PIR register */
29 #define TLX_RATE_MANTISSA		GENMASK_ULL(8, 1)
30 #define TLX_RATE_EXPONENT		GENMASK_ULL(12, 9)
31 #define TLX_RATE_DIVIDER_EXPONENT	GENMASK_ULL(16, 13)
32 #define TLX_BURST_MANTISSA		GENMASK_ULL(36, 29)
33 #define TLX_BURST_EXPONENT		GENMASK_ULL(40, 37)
34 
35 struct otx2_tc_flow_stats {
36 	u64 bytes;
37 	u64 pkts;
38 	u64 used;
39 };
40 
41 struct otx2_tc_flow {
42 	struct rhash_head		node;
43 	unsigned long			cookie;
44 	u16				entry;
45 	unsigned int			bitpos;
46 	struct rcu_head			rcu;
47 	struct otx2_tc_flow_stats	stats;
48 	spinlock_t			lock; /* lock for stats */
49 };
50 
51 static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
52 				      u32 *burst_mantissa)
53 {
54 	unsigned int tmp;
55 
56 	/* Burst is calculated as
57 	 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
58 	 * Max supported burst size is 130,816 bytes.
59 	 */
60 	burst = min_t(u32, burst, MAX_BURST_SIZE);
61 	if (burst) {
62 		*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
63 		tmp = burst - rounddown_pow_of_two(burst);
64 		if (burst < MAX_BURST_MANTISSA)
65 			*burst_mantissa = tmp * 2;
66 		else
67 			*burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
68 	} else {
69 		*burst_exp = MAX_BURST_EXPONENT;
70 		*burst_mantissa = MAX_BURST_MANTISSA;
71 	}
72 }
73 
74 static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
75 				     u32 *mantissa, u32 *div_exp)
76 {
77 	unsigned int tmp;
78 
79 	/* Rate calculation by hardware
80 	 *
81 	 * PIR_ADD = ((256 + mantissa) << exp) / 256
82 	 * rate = (2 * PIR_ADD) / ( 1 << div_exp)
83 	 * The resultant rate is in Mbps.
84 	 */
85 
86 	/* 2Mbps to 100Gbps can be expressed with div_exp = 0.
87 	 * Setting this to '0' will ease the calculation of
88 	 * exponent and mantissa.
89 	 */
90 	*div_exp = 0;
91 
92 	if (maxrate) {
93 		*exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
94 		tmp = maxrate - rounddown_pow_of_two(maxrate);
95 		if (maxrate < MAX_RATE_MANTISSA)
96 			*mantissa = tmp * 2;
97 		else
98 			*mantissa = tmp / (1ULL << (*exp - 7));
99 	} else {
100 		/* Instead of disabling rate limiting, set all values to max */
101 		*exp = MAX_RATE_EXPONENT;
102 		*mantissa = MAX_RATE_MANTISSA;
103 	}
104 }
105 
106 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
107 {
108 	struct otx2_hw *hw = &nic->hw;
109 	struct nix_txschq_config *req;
110 	u32 burst_exp, burst_mantissa;
111 	u32 exp, mantissa, div_exp;
112 	int txschq, err;
113 
114 	/* All SQs share the same TL4, so pick the first scheduler */
115 	txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
116 
117 	/* Get exponent and mantissa values from the desired rate */
118 	otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
119 	otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
120 
121 	mutex_lock(&nic->mbox.lock);
122 	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
123 	if (!req) {
124 		mutex_unlock(&nic->mbox.lock);
125 		return -ENOMEM;
126 	}
127 
128 	req->lvl = NIX_TXSCH_LVL_TL4;
129 	req->num_regs = 1;
130 	req->reg[0] = NIX_AF_TL4X_PIR(txschq);
131 	req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
132 			 FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
133 			 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
134 			 FIELD_PREP(TLX_RATE_EXPONENT, exp) |
135 			 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
136 
137 	err = otx2_sync_mbox_msg(&nic->mbox);
138 	mutex_unlock(&nic->mbox.lock);
139 	return err;
140 }
141 
142 static int otx2_tc_validate_flow(struct otx2_nic *nic,
143 				 struct flow_action *actions,
144 				 struct netlink_ext_ack *extack)
145 {
146 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
147 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
148 		return -EINVAL;
149 	}
150 
151 	if (!flow_action_has_entries(actions)) {
152 		NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
153 		return -EINVAL;
154 	}
155 
156 	if (!flow_offload_has_one_action(actions)) {
157 		NL_SET_ERR_MSG_MOD(extack,
158 				   "Egress MATCHALL offload supports only 1 policing action");
159 		return -EINVAL;
160 	}
161 	return 0;
162 }
163 
164 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
165 					   struct tc_cls_matchall_offload *cls)
166 {
167 	struct netlink_ext_ack *extack = cls->common.extack;
168 	struct flow_action *actions = &cls->rule->action;
169 	struct flow_action_entry *entry;
170 	u32 rate;
171 	int err;
172 
173 	err = otx2_tc_validate_flow(nic, actions, extack);
174 	if (err)
175 		return err;
176 
177 	if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
178 		NL_SET_ERR_MSG_MOD(extack,
179 				   "Only one Egress MATCHALL ratelimiter can be offloaded");
180 		return -ENOMEM;
181 	}
182 
183 	entry = &cls->rule->action.entries[0];
184 	switch (entry->id) {
185 	case FLOW_ACTION_POLICE:
186 		if (entry->police.rate_pkt_ps) {
187 			NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
188 			return -EOPNOTSUPP;
189 		}
190 		/* Convert bytes per second to Mbps */
191 		rate = entry->police.rate_bytes_ps * 8;
192 		rate = max_t(u32, rate / 1000000, 1);
193 		err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
194 		if (err)
195 			return err;
196 		nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
197 		break;
198 	default:
199 		NL_SET_ERR_MSG_MOD(extack,
200 				   "Only police action is supported with Egress MATCHALL offload");
201 		return -EOPNOTSUPP;
202 	}
203 
204 	return 0;
205 }
206 
207 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
208 					  struct tc_cls_matchall_offload *cls)
209 {
210 	struct netlink_ext_ack *extack = cls->common.extack;
211 	int err;
212 
213 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
214 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
215 		return -EINVAL;
216 	}
217 
218 	err = otx2_set_matchall_egress_rate(nic, 0, 0);
219 	nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
220 	return err;
221 }
222 
223 static int otx2_tc_parse_actions(struct otx2_nic *nic,
224 				 struct flow_action *flow_action,
225 				 struct npc_install_flow_req *req)
226 {
227 	struct flow_action_entry *act;
228 	struct net_device *target;
229 	struct otx2_nic *priv;
230 	int i;
231 
232 	if (!flow_action_has_entries(flow_action)) {
233 		netdev_info(nic->netdev, "no tc actions specified");
234 		return -EINVAL;
235 	}
236 
237 	flow_action_for_each(i, act, flow_action) {
238 		switch (act->id) {
239 		case FLOW_ACTION_DROP:
240 			req->op = NIX_RX_ACTIONOP_DROP;
241 			return 0;
242 		case FLOW_ACTION_ACCEPT:
243 			req->op = NIX_RX_ACTION_DEFAULT;
244 			return 0;
245 		case FLOW_ACTION_REDIRECT_INGRESS:
246 			target = act->dev;
247 			priv = netdev_priv(target);
248 			/* npc_install_flow_req doesn't support passing a target pcifunc */
249 			if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
250 				netdev_info(nic->netdev,
251 					    "can't redirect to other pf/vf\n");
252 				return -EOPNOTSUPP;
253 			}
254 			req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
255 			req->op = NIX_RX_ACTION_DEFAULT;
256 			return 0;
257 		case FLOW_ACTION_VLAN_POP:
258 			req->vtag0_valid = true;
259 			/* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
260 			req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
261 			break;
262 		default:
263 			return -EOPNOTSUPP;
264 		}
265 	}
266 
267 	return 0;
268 }
269 
270 static int otx2_tc_prepare_flow(struct otx2_nic *nic,
271 				struct flow_cls_offload *f,
272 				struct npc_install_flow_req *req)
273 {
274 	struct flow_msg *flow_spec = &req->packet;
275 	struct flow_msg *flow_mask = &req->mask;
276 	struct flow_dissector *dissector;
277 	struct flow_rule *rule;
278 	u8 ip_proto = 0;
279 
280 	rule = flow_cls_offload_flow_rule(f);
281 	dissector = rule->match.dissector;
282 
283 	if ((dissector->used_keys &
284 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
285 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
286 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
287 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
288 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
289 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
290 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
291 	      BIT(FLOW_DISSECTOR_KEY_IP))))  {
292 		netdev_info(nic->netdev, "unsupported flow used key 0x%x",
293 			    dissector->used_keys);
294 		return -EOPNOTSUPP;
295 	}
296 
297 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
298 		struct flow_match_basic match;
299 
300 		flow_rule_match_basic(rule, &match);
301 
302 		/* All EtherTypes can be matched, no hw limitation */
303 		flow_spec->etype = match.key->n_proto;
304 		flow_mask->etype = match.mask->n_proto;
305 		req->features |= BIT_ULL(NPC_ETYPE);
306 
307 		if (match.mask->ip_proto &&
308 		    (match.key->ip_proto != IPPROTO_TCP &&
309 		     match.key->ip_proto != IPPROTO_UDP &&
310 		     match.key->ip_proto != IPPROTO_SCTP &&
311 		     match.key->ip_proto != IPPROTO_ICMP &&
312 		     match.key->ip_proto != IPPROTO_ICMPV6)) {
313 			netdev_info(nic->netdev,
314 				    "ip_proto=0x%x not supported\n",
315 				    match.key->ip_proto);
316 			return -EOPNOTSUPP;
317 		}
318 		if (match.mask->ip_proto)
319 			ip_proto = match.key->ip_proto;
320 
321 		if (ip_proto == IPPROTO_UDP)
322 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
323 		else if (ip_proto == IPPROTO_TCP)
324 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
325 		else if (ip_proto == IPPROTO_SCTP)
326 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
327 		else if (ip_proto == IPPROTO_ICMP)
328 			req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
329 		else if (ip_proto == IPPROTO_ICMPV6)
330 			req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
331 	}
332 
333 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
334 		struct flow_match_eth_addrs match;
335 
336 		flow_rule_match_eth_addrs(rule, &match);
337 		if (!is_zero_ether_addr(match.mask->src)) {
338 			netdev_err(nic->netdev, "src mac match not supported\n");
339 			return -EOPNOTSUPP;
340 		}
341 
342 		if (!is_zero_ether_addr(match.mask->dst)) {
343 			ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
344 			ether_addr_copy(flow_mask->dmac,
345 					(u8 *)&match.mask->dst);
346 			req->features |= BIT_ULL(NPC_DMAC);
347 		}
348 	}
349 
350 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
351 		struct flow_match_ip match;
352 
353 		flow_rule_match_ip(rule, &match);
354 		if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
355 		    match.mask->tos) {
356 			netdev_err(nic->netdev, "tos not supported\n");
357 			return -EOPNOTSUPP;
358 		}
359 		if (match.mask->ttl) {
360 			netdev_err(nic->netdev, "ttl not supported\n");
361 			return -EOPNOTSUPP;
362 		}
363 		flow_spec->tos = match.key->tos;
364 		flow_mask->tos = match.mask->tos;
365 		req->features |= BIT_ULL(NPC_TOS);
366 	}
367 
368 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
369 		struct flow_match_vlan match;
370 		u16 vlan_tci, vlan_tci_mask;
371 
372 		flow_rule_match_vlan(rule, &match);
373 
374 		if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
375 			netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
376 				   ntohs(match.key->vlan_tpid));
377 			return -EOPNOTSUPP;
378 		}
379 
380 		if (match.mask->vlan_id ||
381 		    match.mask->vlan_dei ||
382 		    match.mask->vlan_priority) {
383 			vlan_tci = match.key->vlan_id |
384 				   match.key->vlan_dei << 12 |
385 				   match.key->vlan_priority << 13;
386 
387 			vlan_tci_mask = match.mask->vlan_id |
388 					match.key->vlan_dei << 12 |
389 					match.key->vlan_priority << 13;
390 
391 			flow_spec->vlan_tci = htons(vlan_tci);
392 			flow_mask->vlan_tci = htons(vlan_tci_mask);
393 			req->features |= BIT_ULL(NPC_OUTER_VID);
394 		}
395 	}
396 
397 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
398 		struct flow_match_ipv4_addrs match;
399 
400 		flow_rule_match_ipv4_addrs(rule, &match);
401 
402 		flow_spec->ip4dst = match.key->dst;
403 		flow_mask->ip4dst = match.mask->dst;
404 		req->features |= BIT_ULL(NPC_DIP_IPV4);
405 
406 		flow_spec->ip4src = match.key->src;
407 		flow_mask->ip4src = match.mask->src;
408 		req->features |= BIT_ULL(NPC_SIP_IPV4);
409 	} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
410 		struct flow_match_ipv6_addrs match;
411 
412 		flow_rule_match_ipv6_addrs(rule, &match);
413 
414 		if (ipv6_addr_loopback(&match.key->dst) ||
415 		    ipv6_addr_loopback(&match.key->src)) {
416 			netdev_err(nic->netdev,
417 				   "Flow matching on IPv6 loopback addr is not supported\n");
418 			return -EOPNOTSUPP;
419 		}
420 
421 		if (!ipv6_addr_any(&match.mask->dst)) {
422 			memcpy(&flow_spec->ip6dst,
423 			       (struct in6_addr *)&match.key->dst,
424 			       sizeof(flow_spec->ip6dst));
425 			memcpy(&flow_mask->ip6dst,
426 			       (struct in6_addr *)&match.mask->dst,
427 			       sizeof(flow_spec->ip6dst));
428 			req->features |= BIT_ULL(NPC_DIP_IPV6);
429 		}
430 
431 		if (!ipv6_addr_any(&match.mask->src)) {
432 			memcpy(&flow_spec->ip6src,
433 			       (struct in6_addr *)&match.key->src,
434 			       sizeof(flow_spec->ip6src));
435 			memcpy(&flow_mask->ip6src,
436 			       (struct in6_addr *)&match.mask->src,
437 			       sizeof(flow_spec->ip6src));
438 			req->features |= BIT_ULL(NPC_SIP_IPV6);
439 		}
440 	}
441 
442 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
443 		struct flow_match_ports match;
444 
445 		flow_rule_match_ports(rule, &match);
446 
447 		flow_spec->dport = match.key->dst;
448 		flow_mask->dport = match.mask->dst;
449 		if (ip_proto == IPPROTO_UDP)
450 			req->features |= BIT_ULL(NPC_DPORT_UDP);
451 		else if (ip_proto == IPPROTO_TCP)
452 			req->features |= BIT_ULL(NPC_DPORT_TCP);
453 		else if (ip_proto == IPPROTO_SCTP)
454 			req->features |= BIT_ULL(NPC_DPORT_SCTP);
455 
456 		flow_spec->sport = match.key->src;
457 		flow_mask->sport = match.mask->src;
458 		if (ip_proto == IPPROTO_UDP)
459 			req->features |= BIT_ULL(NPC_SPORT_UDP);
460 		else if (ip_proto == IPPROTO_TCP)
461 			req->features |= BIT_ULL(NPC_SPORT_TCP);
462 		else if (ip_proto == IPPROTO_SCTP)
463 			req->features |= BIT_ULL(NPC_SPORT_SCTP);
464 	}
465 
466 	return otx2_tc_parse_actions(nic, &rule->action, req);
467 }
468 
469 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
470 {
471 	struct npc_delete_flow_req *req;
472 	int err;
473 
474 	mutex_lock(&nic->mbox.lock);
475 	req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
476 	if (!req) {
477 		mutex_unlock(&nic->mbox.lock);
478 		return -ENOMEM;
479 	}
480 
481 	req->entry = entry;
482 
483 	/* Send message to AF */
484 	err = otx2_sync_mbox_msg(&nic->mbox);
485 	if (err) {
486 		netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
487 			   entry);
488 		mutex_unlock(&nic->mbox.lock);
489 		return -EFAULT;
490 	}
491 	mutex_unlock(&nic->mbox.lock);
492 
493 	return 0;
494 }
495 
496 static int otx2_tc_del_flow(struct otx2_nic *nic,
497 			    struct flow_cls_offload *tc_flow_cmd)
498 {
499 	struct otx2_tc_info *tc_info = &nic->tc_info;
500 	struct otx2_tc_flow *flow_node;
501 
502 	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
503 					   &tc_flow_cmd->cookie,
504 					   tc_info->flow_ht_params);
505 	if (!flow_node) {
506 		netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
507 			   tc_flow_cmd->cookie);
508 		return -EINVAL;
509 	}
510 
511 	otx2_del_mcam_flow_entry(nic, flow_node->entry);
512 
513 	WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
514 				       &flow_node->node,
515 				       nic->tc_info.flow_ht_params));
516 	kfree_rcu(flow_node, rcu);
517 
518 	clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
519 	tc_info->num_entries--;
520 
521 	return 0;
522 }
523 
524 static int otx2_tc_add_flow(struct otx2_nic *nic,
525 			    struct flow_cls_offload *tc_flow_cmd)
526 {
527 	struct otx2_tc_info *tc_info = &nic->tc_info;
528 	struct otx2_tc_flow *new_node, *old_node;
529 	struct npc_install_flow_req *req;
530 	int rc;
531 
532 	if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
533 		return -ENOMEM;
534 
535 	/* allocate memory for the new flow and it's node */
536 	new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
537 	if (!new_node)
538 		return -ENOMEM;
539 	spin_lock_init(&new_node->lock);
540 	new_node->cookie = tc_flow_cmd->cookie;
541 
542 	mutex_lock(&nic->mbox.lock);
543 	req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
544 	if (!req) {
545 		mutex_unlock(&nic->mbox.lock);
546 		return -ENOMEM;
547 	}
548 
549 	rc = otx2_tc_prepare_flow(nic, tc_flow_cmd, req);
550 	if (rc) {
551 		otx2_mbox_reset(&nic->mbox.mbox, 0);
552 		mutex_unlock(&nic->mbox.lock);
553 		return rc;
554 	}
555 
556 	/* If a flow exists with the same cookie, delete it */
557 	old_node = rhashtable_lookup_fast(&tc_info->flow_table,
558 					  &tc_flow_cmd->cookie,
559 					  tc_info->flow_ht_params);
560 	if (old_node)
561 		otx2_tc_del_flow(nic, tc_flow_cmd);
562 
563 	if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
564 		netdev_err(nic->netdev, "Not enough MCAM space to add the flow\n");
565 		otx2_mbox_reset(&nic->mbox.mbox, 0);
566 		mutex_unlock(&nic->mbox.lock);
567 		return -ENOMEM;
568 	}
569 
570 	new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
571 					       nic->flow_cfg->tc_max_flows);
572 	req->channel = nic->hw.rx_chan_base;
573 	req->entry = nic->flow_cfg->entry[nic->flow_cfg->tc_flower_offset +
574 					  nic->flow_cfg->tc_max_flows - new_node->bitpos];
575 	req->intf = NIX_INTF_RX;
576 	req->set_cntr = 1;
577 	new_node->entry = req->entry;
578 
579 	/* Send message to AF */
580 	rc = otx2_sync_mbox_msg(&nic->mbox);
581 	if (rc) {
582 		netdev_err(nic->netdev, "Failed to install MCAM flow entry\n");
583 		mutex_unlock(&nic->mbox.lock);
584 		goto out;
585 	}
586 	mutex_unlock(&nic->mbox.lock);
587 
588 	/* add new flow to flow-table */
589 	rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
590 				    nic->tc_info.flow_ht_params);
591 	if (rc) {
592 		otx2_del_mcam_flow_entry(nic, req->entry);
593 		kfree_rcu(new_node, rcu);
594 		goto out;
595 	}
596 
597 	set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
598 	tc_info->num_entries++;
599 out:
600 	return rc;
601 }
602 
603 static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
604 				  struct flow_cls_offload *tc_flow_cmd)
605 {
606 	struct otx2_tc_info *tc_info = &nic->tc_info;
607 	struct npc_mcam_get_stats_req *req;
608 	struct npc_mcam_get_stats_rsp *rsp;
609 	struct otx2_tc_flow_stats *stats;
610 	struct otx2_tc_flow *flow_node;
611 	int err;
612 
613 	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
614 					   &tc_flow_cmd->cookie,
615 					   tc_info->flow_ht_params);
616 	if (!flow_node) {
617 		netdev_info(nic->netdev, "tc flow not found for cookie %lx",
618 			    tc_flow_cmd->cookie);
619 		return -EINVAL;
620 	}
621 
622 	mutex_lock(&nic->mbox.lock);
623 
624 	req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
625 	if (!req) {
626 		mutex_unlock(&nic->mbox.lock);
627 		return -ENOMEM;
628 	}
629 
630 	req->entry = flow_node->entry;
631 
632 	err = otx2_sync_mbox_msg(&nic->mbox);
633 	if (err) {
634 		netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
635 			   req->entry);
636 		mutex_unlock(&nic->mbox.lock);
637 		return -EFAULT;
638 	}
639 
640 	rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
641 		(&nic->mbox.mbox, 0, &req->hdr);
642 	if (IS_ERR(rsp)) {
643 		mutex_unlock(&nic->mbox.lock);
644 		return PTR_ERR(rsp);
645 	}
646 
647 	mutex_unlock(&nic->mbox.lock);
648 
649 	if (!rsp->stat_ena)
650 		return -EINVAL;
651 
652 	stats = &flow_node->stats;
653 
654 	spin_lock(&flow_node->lock);
655 	flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
656 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
657 	stats->pkts = rsp->stat;
658 	spin_unlock(&flow_node->lock);
659 
660 	return 0;
661 }
662 
663 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
664 				    struct flow_cls_offload *cls_flower)
665 {
666 	switch (cls_flower->command) {
667 	case FLOW_CLS_REPLACE:
668 		return otx2_tc_add_flow(nic, cls_flower);
669 	case FLOW_CLS_DESTROY:
670 		return otx2_tc_del_flow(nic, cls_flower);
671 	case FLOW_CLS_STATS:
672 		return otx2_tc_get_flow_stats(nic, cls_flower);
673 	default:
674 		return -EOPNOTSUPP;
675 	}
676 }
677 
678 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
679 					  void *type_data, void *cb_priv)
680 {
681 	struct otx2_nic *nic = cb_priv;
682 
683 	if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
684 		return -EOPNOTSUPP;
685 
686 	switch (type) {
687 	case TC_SETUP_CLSFLOWER:
688 		return otx2_setup_tc_cls_flower(nic, type_data);
689 	default:
690 		break;
691 	}
692 
693 	return -EOPNOTSUPP;
694 }
695 
696 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
697 					 struct tc_cls_matchall_offload *cls_matchall)
698 {
699 	switch (cls_matchall->command) {
700 	case TC_CLSMATCHALL_REPLACE:
701 		return otx2_tc_egress_matchall_install(nic, cls_matchall);
702 	case TC_CLSMATCHALL_DESTROY:
703 		return otx2_tc_egress_matchall_delete(nic, cls_matchall);
704 	case TC_CLSMATCHALL_STATS:
705 	default:
706 		break;
707 	}
708 
709 	return -EOPNOTSUPP;
710 }
711 
712 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
713 					 void *type_data, void *cb_priv)
714 {
715 	struct otx2_nic *nic = cb_priv;
716 
717 	if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
718 		return -EOPNOTSUPP;
719 
720 	switch (type) {
721 	case TC_SETUP_CLSMATCHALL:
722 		return otx2_setup_tc_egress_matchall(nic, type_data);
723 	default:
724 		break;
725 	}
726 
727 	return -EOPNOTSUPP;
728 }
729 
730 static LIST_HEAD(otx2_block_cb_list);
731 
732 static int otx2_setup_tc_block(struct net_device *netdev,
733 			       struct flow_block_offload *f)
734 {
735 	struct otx2_nic *nic = netdev_priv(netdev);
736 	flow_setup_cb_t *cb;
737 	bool ingress;
738 
739 	if (f->block_shared)
740 		return -EOPNOTSUPP;
741 
742 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
743 		cb = otx2_setup_tc_block_ingress_cb;
744 		ingress = true;
745 	} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
746 		cb = otx2_setup_tc_block_egress_cb;
747 		ingress = false;
748 	} else {
749 		return -EOPNOTSUPP;
750 	}
751 
752 	return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
753 					  nic, nic, ingress);
754 }
755 
756 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
757 		  void *type_data)
758 {
759 	switch (type) {
760 	case TC_SETUP_BLOCK:
761 		return otx2_setup_tc_block(netdev, type_data);
762 	default:
763 		return -EOPNOTSUPP;
764 	}
765 }
766 
767 static const struct rhashtable_params tc_flow_ht_params = {
768 	.head_offset = offsetof(struct otx2_tc_flow, node),
769 	.key_offset = offsetof(struct otx2_tc_flow, cookie),
770 	.key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
771 	.automatic_shrinking = true,
772 };
773 
774 int otx2_init_tc(struct otx2_nic *nic)
775 {
776 	struct otx2_tc_info *tc = &nic->tc_info;
777 
778 	tc->flow_ht_params = tc_flow_ht_params;
779 	return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
780 }
781 
782 void otx2_shutdown_tc(struct otx2_nic *nic)
783 {
784 	struct otx2_tc_info *tc = &nic->tc_info;
785 
786 	rhashtable_destroy(&tc->flow_table);
787 }
788