1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physcial Function ethernet driver
3  *
4  * Copyright (C) 2021 Marvell.
5  */
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/inetdevice.h>
9 #include <linux/rhashtable.h>
10 #include <linux/bitfield.h>
11 #include <net/flow_dissector.h>
12 #include <net/pkt_cls.h>
13 #include <net/tc_act/tc_gact.h>
14 #include <net/tc_act/tc_mirred.h>
15 #include <net/tc_act/tc_vlan.h>
16 #include <net/ipv6.h>
17 
18 #include "cn10k.h"
19 #include "otx2_common.h"
20 
21 /* Egress rate limiting definitions */
22 #define MAX_BURST_EXPONENT		0x0FULL
23 #define MAX_BURST_MANTISSA		0xFFULL
24 #define MAX_BURST_SIZE			130816ULL
25 #define MAX_RATE_DIVIDER_EXPONENT	12ULL
26 #define MAX_RATE_EXPONENT		0x0FULL
27 #define MAX_RATE_MANTISSA		0xFFULL
28 
29 /* Bitfields in NIX_TLX_PIR register */
30 #define TLX_RATE_MANTISSA		GENMASK_ULL(8, 1)
31 #define TLX_RATE_EXPONENT		GENMASK_ULL(12, 9)
32 #define TLX_RATE_DIVIDER_EXPONENT	GENMASK_ULL(16, 13)
33 #define TLX_BURST_MANTISSA		GENMASK_ULL(36, 29)
34 #define TLX_BURST_EXPONENT		GENMASK_ULL(40, 37)
35 
36 struct otx2_tc_flow_stats {
37 	u64 bytes;
38 	u64 pkts;
39 	u64 used;
40 };
41 
42 struct otx2_tc_flow {
43 	struct rhash_head		node;
44 	unsigned long			cookie;
45 	unsigned int			bitpos;
46 	struct rcu_head			rcu;
47 	struct otx2_tc_flow_stats	stats;
48 	spinlock_t			lock; /* lock for stats */
49 	u16				rq;
50 	u16				entry;
51 	u16				leaf_profile;
52 	bool				is_act_police;
53 };
54 
55 static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
56 				      u32 *burst_mantissa)
57 {
58 	unsigned int tmp;
59 
60 	/* Burst is calculated as
61 	 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
62 	 * Max supported burst size is 130,816 bytes.
63 	 */
64 	burst = min_t(u32, burst, MAX_BURST_SIZE);
65 	if (burst) {
66 		*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
67 		tmp = burst - rounddown_pow_of_two(burst);
68 		if (burst < MAX_BURST_MANTISSA)
69 			*burst_mantissa = tmp * 2;
70 		else
71 			*burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
72 	} else {
73 		*burst_exp = MAX_BURST_EXPONENT;
74 		*burst_mantissa = MAX_BURST_MANTISSA;
75 	}
76 }
77 
78 static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
79 				     u32 *mantissa, u32 *div_exp)
80 {
81 	unsigned int tmp;
82 
83 	/* Rate calculation by hardware
84 	 *
85 	 * PIR_ADD = ((256 + mantissa) << exp) / 256
86 	 * rate = (2 * PIR_ADD) / ( 1 << div_exp)
87 	 * The resultant rate is in Mbps.
88 	 */
89 
90 	/* 2Mbps to 100Gbps can be expressed with div_exp = 0.
91 	 * Setting this to '0' will ease the calculation of
92 	 * exponent and mantissa.
93 	 */
94 	*div_exp = 0;
95 
96 	if (maxrate) {
97 		*exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
98 		tmp = maxrate - rounddown_pow_of_two(maxrate);
99 		if (maxrate < MAX_RATE_MANTISSA)
100 			*mantissa = tmp * 2;
101 		else
102 			*mantissa = tmp / (1ULL << (*exp - 7));
103 	} else {
104 		/* Instead of disabling rate limiting, set all values to max */
105 		*exp = MAX_RATE_EXPONENT;
106 		*mantissa = MAX_RATE_MANTISSA;
107 	}
108 }
109 
110 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
111 {
112 	struct otx2_hw *hw = &nic->hw;
113 	struct nix_txschq_config *req;
114 	u32 burst_exp, burst_mantissa;
115 	u32 exp, mantissa, div_exp;
116 	int txschq, err;
117 
118 	/* All SQs share the same TL4, so pick the first scheduler */
119 	txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
120 
121 	/* Get exponent and mantissa values from the desired rate */
122 	otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
123 	otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
124 
125 	mutex_lock(&nic->mbox.lock);
126 	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
127 	if (!req) {
128 		mutex_unlock(&nic->mbox.lock);
129 		return -ENOMEM;
130 	}
131 
132 	req->lvl = NIX_TXSCH_LVL_TL4;
133 	req->num_regs = 1;
134 	req->reg[0] = NIX_AF_TL4X_PIR(txschq);
135 	req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
136 			 FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
137 			 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
138 			 FIELD_PREP(TLX_RATE_EXPONENT, exp) |
139 			 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
140 
141 	err = otx2_sync_mbox_msg(&nic->mbox);
142 	mutex_unlock(&nic->mbox.lock);
143 	return err;
144 }
145 
146 static int otx2_tc_validate_flow(struct otx2_nic *nic,
147 				 struct flow_action *actions,
148 				 struct netlink_ext_ack *extack)
149 {
150 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
151 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
152 		return -EINVAL;
153 	}
154 
155 	if (!flow_action_has_entries(actions)) {
156 		NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
157 		return -EINVAL;
158 	}
159 
160 	if (!flow_offload_has_one_action(actions)) {
161 		NL_SET_ERR_MSG_MOD(extack,
162 				   "Egress MATCHALL offload supports only 1 policing action");
163 		return -EINVAL;
164 	}
165 	return 0;
166 }
167 
168 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
169 					   struct tc_cls_matchall_offload *cls)
170 {
171 	struct netlink_ext_ack *extack = cls->common.extack;
172 	struct flow_action *actions = &cls->rule->action;
173 	struct flow_action_entry *entry;
174 	u32 rate;
175 	int err;
176 
177 	err = otx2_tc_validate_flow(nic, actions, extack);
178 	if (err)
179 		return err;
180 
181 	if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
182 		NL_SET_ERR_MSG_MOD(extack,
183 				   "Only one Egress MATCHALL ratelimiter can be offloaded");
184 		return -ENOMEM;
185 	}
186 
187 	entry = &cls->rule->action.entries[0];
188 	switch (entry->id) {
189 	case FLOW_ACTION_POLICE:
190 		if (entry->police.rate_pkt_ps) {
191 			NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
192 			return -EOPNOTSUPP;
193 		}
194 		/* Convert bytes per second to Mbps */
195 		rate = entry->police.rate_bytes_ps * 8;
196 		rate = max_t(u32, rate / 1000000, 1);
197 		err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
198 		if (err)
199 			return err;
200 		nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
201 		break;
202 	default:
203 		NL_SET_ERR_MSG_MOD(extack,
204 				   "Only police action is supported with Egress MATCHALL offload");
205 		return -EOPNOTSUPP;
206 	}
207 
208 	return 0;
209 }
210 
211 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
212 					  struct tc_cls_matchall_offload *cls)
213 {
214 	struct netlink_ext_ack *extack = cls->common.extack;
215 	int err;
216 
217 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
218 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
219 		return -EINVAL;
220 	}
221 
222 	err = otx2_set_matchall_egress_rate(nic, 0, 0);
223 	nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
224 	return err;
225 }
226 
227 static int otx2_tc_act_set_police(struct otx2_nic *nic,
228 				  struct otx2_tc_flow *node,
229 				  struct flow_cls_offload *f,
230 				  u64 rate, u32 burst, u32 mark,
231 				  struct npc_install_flow_req *req, bool pps)
232 {
233 	struct netlink_ext_ack *extack = f->common.extack;
234 	struct otx2_hw *hw = &nic->hw;
235 	int rq_idx, rc;
236 
237 	rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
238 	if (rq_idx >= hw->rx_queues) {
239 		NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
240 		return -EINVAL;
241 	}
242 
243 	mutex_lock(&nic->mbox.lock);
244 
245 	rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
246 	if (rc) {
247 		mutex_unlock(&nic->mbox.lock);
248 		return rc;
249 	}
250 
251 	rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
252 	if (rc)
253 		goto free_leaf;
254 
255 	rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
256 	if (rc)
257 		goto free_leaf;
258 
259 	mutex_unlock(&nic->mbox.lock);
260 
261 	req->match_id = mark & 0xFFFFULL;
262 	req->index = rq_idx;
263 	req->op = NIX_RX_ACTIONOP_UCAST;
264 	set_bit(rq_idx, &nic->rq_bmap);
265 	node->is_act_police = true;
266 	node->rq = rq_idx;
267 
268 	return 0;
269 
270 free_leaf:
271 	if (cn10k_free_leaf_profile(nic, node->leaf_profile))
272 		netdev_err(nic->netdev,
273 			   "Unable to free leaf bandwidth profile(%d)\n",
274 			   node->leaf_profile);
275 	mutex_unlock(&nic->mbox.lock);
276 	return rc;
277 }
278 
279 static int otx2_tc_parse_actions(struct otx2_nic *nic,
280 				 struct flow_action *flow_action,
281 				 struct npc_install_flow_req *req,
282 				 struct flow_cls_offload *f,
283 				 struct otx2_tc_flow *node)
284 {
285 	struct netlink_ext_ack *extack = f->common.extack;
286 	struct flow_action_entry *act;
287 	struct net_device *target;
288 	struct otx2_nic *priv;
289 	u32 burst, mark = 0;
290 	u8 nr_police = 0;
291 	bool pps = false;
292 	u64 rate;
293 	int i;
294 
295 	if (!flow_action_has_entries(flow_action)) {
296 		NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
297 		return -EINVAL;
298 	}
299 
300 	flow_action_for_each(i, act, flow_action) {
301 		switch (act->id) {
302 		case FLOW_ACTION_DROP:
303 			req->op = NIX_RX_ACTIONOP_DROP;
304 			return 0;
305 		case FLOW_ACTION_ACCEPT:
306 			req->op = NIX_RX_ACTION_DEFAULT;
307 			return 0;
308 		case FLOW_ACTION_REDIRECT_INGRESS:
309 			target = act->dev;
310 			priv = netdev_priv(target);
311 			/* npc_install_flow_req doesn't support passing a target pcifunc */
312 			if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
313 				NL_SET_ERR_MSG_MOD(extack,
314 						   "can't redirect to other pf/vf");
315 				return -EOPNOTSUPP;
316 			}
317 			req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
318 			req->op = NIX_RX_ACTION_DEFAULT;
319 			return 0;
320 		case FLOW_ACTION_VLAN_POP:
321 			req->vtag0_valid = true;
322 			/* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
323 			req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
324 			break;
325 		case FLOW_ACTION_POLICE:
326 			/* Ingress ratelimiting is not supported on OcteonTx2 */
327 			if (is_dev_otx2(nic->pdev)) {
328 				NL_SET_ERR_MSG_MOD(extack,
329 					"Ingress policing not supported on this platform");
330 				return -EOPNOTSUPP;
331 			}
332 
333 			if (act->police.rate_bytes_ps > 0) {
334 				rate = act->police.rate_bytes_ps * 8;
335 				burst = act->police.burst;
336 			} else if (act->police.rate_pkt_ps > 0) {
337 				/* The algorithm used to calculate rate
338 				 * mantissa, exponent values for a given token
339 				 * rate (token can be byte or packet) requires
340 				 * token rate to be mutiplied by 8.
341 				 */
342 				rate = act->police.rate_pkt_ps * 8;
343 				burst = act->police.burst_pkt;
344 				pps = true;
345 			}
346 			nr_police++;
347 			break;
348 		case FLOW_ACTION_MARK:
349 			mark = act->mark;
350 			break;
351 		default:
352 			return -EOPNOTSUPP;
353 		}
354 	}
355 
356 	if (nr_police > 1) {
357 		NL_SET_ERR_MSG_MOD(extack,
358 				   "rate limit police offload requires a single action");
359 		return -EOPNOTSUPP;
360 	}
361 
362 	if (nr_police)
363 		return otx2_tc_act_set_police(nic, node, f, rate, burst,
364 					      mark, req, pps);
365 
366 	return 0;
367 }
368 
369 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
370 				struct flow_cls_offload *f,
371 				struct npc_install_flow_req *req)
372 {
373 	struct netlink_ext_ack *extack = f->common.extack;
374 	struct flow_msg *flow_spec = &req->packet;
375 	struct flow_msg *flow_mask = &req->mask;
376 	struct flow_dissector *dissector;
377 	struct flow_rule *rule;
378 	u8 ip_proto = 0;
379 
380 	rule = flow_cls_offload_flow_rule(f);
381 	dissector = rule->match.dissector;
382 
383 	if ((dissector->used_keys &
384 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
385 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
386 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
387 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
388 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
389 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
390 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
391 	      BIT(FLOW_DISSECTOR_KEY_IP))))  {
392 		netdev_info(nic->netdev, "unsupported flow used key 0x%x",
393 			    dissector->used_keys);
394 		return -EOPNOTSUPP;
395 	}
396 
397 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
398 		struct flow_match_basic match;
399 
400 		flow_rule_match_basic(rule, &match);
401 
402 		/* All EtherTypes can be matched, no hw limitation */
403 		flow_spec->etype = match.key->n_proto;
404 		flow_mask->etype = match.mask->n_proto;
405 		req->features |= BIT_ULL(NPC_ETYPE);
406 
407 		if (match.mask->ip_proto &&
408 		    (match.key->ip_proto != IPPROTO_TCP &&
409 		     match.key->ip_proto != IPPROTO_UDP &&
410 		     match.key->ip_proto != IPPROTO_SCTP &&
411 		     match.key->ip_proto != IPPROTO_ICMP &&
412 		     match.key->ip_proto != IPPROTO_ICMPV6)) {
413 			netdev_info(nic->netdev,
414 				    "ip_proto=0x%x not supported\n",
415 				    match.key->ip_proto);
416 			return -EOPNOTSUPP;
417 		}
418 		if (match.mask->ip_proto)
419 			ip_proto = match.key->ip_proto;
420 
421 		if (ip_proto == IPPROTO_UDP)
422 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
423 		else if (ip_proto == IPPROTO_TCP)
424 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
425 		else if (ip_proto == IPPROTO_SCTP)
426 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
427 		else if (ip_proto == IPPROTO_ICMP)
428 			req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
429 		else if (ip_proto == IPPROTO_ICMPV6)
430 			req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
431 	}
432 
433 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
434 		struct flow_match_eth_addrs match;
435 
436 		flow_rule_match_eth_addrs(rule, &match);
437 		if (!is_zero_ether_addr(match.mask->src)) {
438 			NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
439 			return -EOPNOTSUPP;
440 		}
441 
442 		if (!is_zero_ether_addr(match.mask->dst)) {
443 			ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
444 			ether_addr_copy(flow_mask->dmac,
445 					(u8 *)&match.mask->dst);
446 			req->features |= BIT_ULL(NPC_DMAC);
447 		}
448 	}
449 
450 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
451 		struct flow_match_ip match;
452 
453 		flow_rule_match_ip(rule, &match);
454 		if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
455 		    match.mask->tos) {
456 			NL_SET_ERR_MSG_MOD(extack, "tos not supported");
457 			return -EOPNOTSUPP;
458 		}
459 		if (match.mask->ttl) {
460 			NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
461 			return -EOPNOTSUPP;
462 		}
463 		flow_spec->tos = match.key->tos;
464 		flow_mask->tos = match.mask->tos;
465 		req->features |= BIT_ULL(NPC_TOS);
466 	}
467 
468 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
469 		struct flow_match_vlan match;
470 		u16 vlan_tci, vlan_tci_mask;
471 
472 		flow_rule_match_vlan(rule, &match);
473 
474 		if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
475 			netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
476 				   ntohs(match.key->vlan_tpid));
477 			return -EOPNOTSUPP;
478 		}
479 
480 		if (match.mask->vlan_id ||
481 		    match.mask->vlan_dei ||
482 		    match.mask->vlan_priority) {
483 			vlan_tci = match.key->vlan_id |
484 				   match.key->vlan_dei << 12 |
485 				   match.key->vlan_priority << 13;
486 
487 			vlan_tci_mask = match.mask->vlan_id |
488 					match.key->vlan_dei << 12 |
489 					match.key->vlan_priority << 13;
490 
491 			flow_spec->vlan_tci = htons(vlan_tci);
492 			flow_mask->vlan_tci = htons(vlan_tci_mask);
493 			req->features |= BIT_ULL(NPC_OUTER_VID);
494 		}
495 	}
496 
497 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
498 		struct flow_match_ipv4_addrs match;
499 
500 		flow_rule_match_ipv4_addrs(rule, &match);
501 
502 		flow_spec->ip4dst = match.key->dst;
503 		flow_mask->ip4dst = match.mask->dst;
504 		req->features |= BIT_ULL(NPC_DIP_IPV4);
505 
506 		flow_spec->ip4src = match.key->src;
507 		flow_mask->ip4src = match.mask->src;
508 		req->features |= BIT_ULL(NPC_SIP_IPV4);
509 	} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
510 		struct flow_match_ipv6_addrs match;
511 
512 		flow_rule_match_ipv6_addrs(rule, &match);
513 
514 		if (ipv6_addr_loopback(&match.key->dst) ||
515 		    ipv6_addr_loopback(&match.key->src)) {
516 			NL_SET_ERR_MSG_MOD(extack,
517 					   "Flow matching IPv6 loopback addr not supported");
518 			return -EOPNOTSUPP;
519 		}
520 
521 		if (!ipv6_addr_any(&match.mask->dst)) {
522 			memcpy(&flow_spec->ip6dst,
523 			       (struct in6_addr *)&match.key->dst,
524 			       sizeof(flow_spec->ip6dst));
525 			memcpy(&flow_mask->ip6dst,
526 			       (struct in6_addr *)&match.mask->dst,
527 			       sizeof(flow_spec->ip6dst));
528 			req->features |= BIT_ULL(NPC_DIP_IPV6);
529 		}
530 
531 		if (!ipv6_addr_any(&match.mask->src)) {
532 			memcpy(&flow_spec->ip6src,
533 			       (struct in6_addr *)&match.key->src,
534 			       sizeof(flow_spec->ip6src));
535 			memcpy(&flow_mask->ip6src,
536 			       (struct in6_addr *)&match.mask->src,
537 			       sizeof(flow_spec->ip6src));
538 			req->features |= BIT_ULL(NPC_SIP_IPV6);
539 		}
540 	}
541 
542 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
543 		struct flow_match_ports match;
544 
545 		flow_rule_match_ports(rule, &match);
546 
547 		flow_spec->dport = match.key->dst;
548 		flow_mask->dport = match.mask->dst;
549 		if (ip_proto == IPPROTO_UDP)
550 			req->features |= BIT_ULL(NPC_DPORT_UDP);
551 		else if (ip_proto == IPPROTO_TCP)
552 			req->features |= BIT_ULL(NPC_DPORT_TCP);
553 		else if (ip_proto == IPPROTO_SCTP)
554 			req->features |= BIT_ULL(NPC_DPORT_SCTP);
555 
556 		flow_spec->sport = match.key->src;
557 		flow_mask->sport = match.mask->src;
558 		if (ip_proto == IPPROTO_UDP)
559 			req->features |= BIT_ULL(NPC_SPORT_UDP);
560 		else if (ip_proto == IPPROTO_TCP)
561 			req->features |= BIT_ULL(NPC_SPORT_TCP);
562 		else if (ip_proto == IPPROTO_SCTP)
563 			req->features |= BIT_ULL(NPC_SPORT_SCTP);
564 	}
565 
566 	return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
567 }
568 
569 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
570 {
571 	struct npc_delete_flow_req *req;
572 	int err;
573 
574 	mutex_lock(&nic->mbox.lock);
575 	req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
576 	if (!req) {
577 		mutex_unlock(&nic->mbox.lock);
578 		return -ENOMEM;
579 	}
580 
581 	req->entry = entry;
582 
583 	/* Send message to AF */
584 	err = otx2_sync_mbox_msg(&nic->mbox);
585 	if (err) {
586 		netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
587 			   entry);
588 		mutex_unlock(&nic->mbox.lock);
589 		return -EFAULT;
590 	}
591 	mutex_unlock(&nic->mbox.lock);
592 
593 	return 0;
594 }
595 
596 static int otx2_tc_del_flow(struct otx2_nic *nic,
597 			    struct flow_cls_offload *tc_flow_cmd)
598 {
599 	struct otx2_tc_info *tc_info = &nic->tc_info;
600 	struct otx2_tc_flow *flow_node;
601 	int err;
602 
603 	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
604 					   &tc_flow_cmd->cookie,
605 					   tc_info->flow_ht_params);
606 	if (!flow_node) {
607 		netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
608 			   tc_flow_cmd->cookie);
609 		return -EINVAL;
610 	}
611 
612 	if (flow_node->is_act_police) {
613 		mutex_lock(&nic->mbox.lock);
614 
615 		err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
616 						 flow_node->leaf_profile, false);
617 		if (err)
618 			netdev_err(nic->netdev,
619 				   "Unmapping RQ %d & profile %d failed\n",
620 				   flow_node->rq, flow_node->leaf_profile);
621 
622 		err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
623 		if (err)
624 			netdev_err(nic->netdev,
625 				   "Unable to free leaf bandwidth profile(%d)\n",
626 				   flow_node->leaf_profile);
627 
628 		__clear_bit(flow_node->rq, &nic->rq_bmap);
629 
630 		mutex_unlock(&nic->mbox.lock);
631 	}
632 
633 	otx2_del_mcam_flow_entry(nic, flow_node->entry);
634 
635 	WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
636 				       &flow_node->node,
637 				       nic->tc_info.flow_ht_params));
638 	kfree_rcu(flow_node, rcu);
639 
640 	clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
641 	tc_info->num_entries--;
642 
643 	return 0;
644 }
645 
646 static int otx2_tc_add_flow(struct otx2_nic *nic,
647 			    struct flow_cls_offload *tc_flow_cmd)
648 {
649 	struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
650 	struct otx2_tc_info *tc_info = &nic->tc_info;
651 	struct otx2_tc_flow *new_node, *old_node;
652 	struct npc_install_flow_req *req, dummy;
653 	int rc, err;
654 
655 	if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
656 		return -ENOMEM;
657 
658 	if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
659 		NL_SET_ERR_MSG_MOD(extack,
660 				   "Not enough MCAM space to add the flow");
661 		return -ENOMEM;
662 	}
663 
664 	/* allocate memory for the new flow and it's node */
665 	new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
666 	if (!new_node)
667 		return -ENOMEM;
668 	spin_lock_init(&new_node->lock);
669 	new_node->cookie = tc_flow_cmd->cookie;
670 
671 	memset(&dummy, 0, sizeof(struct npc_install_flow_req));
672 
673 	rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
674 	if (rc) {
675 		kfree_rcu(new_node, rcu);
676 		return rc;
677 	}
678 
679 	/* If a flow exists with the same cookie, delete it */
680 	old_node = rhashtable_lookup_fast(&tc_info->flow_table,
681 					  &tc_flow_cmd->cookie,
682 					  tc_info->flow_ht_params);
683 	if (old_node)
684 		otx2_tc_del_flow(nic, tc_flow_cmd);
685 
686 	mutex_lock(&nic->mbox.lock);
687 	req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
688 	if (!req) {
689 		mutex_unlock(&nic->mbox.lock);
690 		rc = -ENOMEM;
691 		goto free_leaf;
692 	}
693 
694 	memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
695 	memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
696 
697 	new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
698 					       nic->flow_cfg->tc_max_flows);
699 	req->channel = nic->hw.rx_chan_base;
700 	req->entry = nic->flow_cfg->flow_ent[nic->flow_cfg->tc_flower_offset +
701 				nic->flow_cfg->tc_max_flows - new_node->bitpos];
702 	req->intf = NIX_INTF_RX;
703 	req->set_cntr = 1;
704 	new_node->entry = req->entry;
705 
706 	/* Send message to AF */
707 	rc = otx2_sync_mbox_msg(&nic->mbox);
708 	if (rc) {
709 		NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
710 		mutex_unlock(&nic->mbox.lock);
711 		kfree_rcu(new_node, rcu);
712 		goto free_leaf;
713 	}
714 	mutex_unlock(&nic->mbox.lock);
715 
716 	/* add new flow to flow-table */
717 	rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
718 				    nic->tc_info.flow_ht_params);
719 	if (rc) {
720 		otx2_del_mcam_flow_entry(nic, req->entry);
721 		kfree_rcu(new_node, rcu);
722 		goto free_leaf;
723 	}
724 
725 	set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
726 	tc_info->num_entries++;
727 
728 	return 0;
729 
730 free_leaf:
731 	if (new_node->is_act_police) {
732 		mutex_lock(&nic->mbox.lock);
733 
734 		err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
735 						 new_node->leaf_profile, false);
736 		if (err)
737 			netdev_err(nic->netdev,
738 				   "Unmapping RQ %d & profile %d failed\n",
739 				   new_node->rq, new_node->leaf_profile);
740 		err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
741 		if (err)
742 			netdev_err(nic->netdev,
743 				   "Unable to free leaf bandwidth profile(%d)\n",
744 				   new_node->leaf_profile);
745 
746 		__clear_bit(new_node->rq, &nic->rq_bmap);
747 
748 		mutex_unlock(&nic->mbox.lock);
749 	}
750 
751 	return rc;
752 }
753 
754 static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
755 				  struct flow_cls_offload *tc_flow_cmd)
756 {
757 	struct otx2_tc_info *tc_info = &nic->tc_info;
758 	struct npc_mcam_get_stats_req *req;
759 	struct npc_mcam_get_stats_rsp *rsp;
760 	struct otx2_tc_flow_stats *stats;
761 	struct otx2_tc_flow *flow_node;
762 	int err;
763 
764 	flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
765 					   &tc_flow_cmd->cookie,
766 					   tc_info->flow_ht_params);
767 	if (!flow_node) {
768 		netdev_info(nic->netdev, "tc flow not found for cookie %lx",
769 			    tc_flow_cmd->cookie);
770 		return -EINVAL;
771 	}
772 
773 	mutex_lock(&nic->mbox.lock);
774 
775 	req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
776 	if (!req) {
777 		mutex_unlock(&nic->mbox.lock);
778 		return -ENOMEM;
779 	}
780 
781 	req->entry = flow_node->entry;
782 
783 	err = otx2_sync_mbox_msg(&nic->mbox);
784 	if (err) {
785 		netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
786 			   req->entry);
787 		mutex_unlock(&nic->mbox.lock);
788 		return -EFAULT;
789 	}
790 
791 	rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
792 		(&nic->mbox.mbox, 0, &req->hdr);
793 	if (IS_ERR(rsp)) {
794 		mutex_unlock(&nic->mbox.lock);
795 		return PTR_ERR(rsp);
796 	}
797 
798 	mutex_unlock(&nic->mbox.lock);
799 
800 	if (!rsp->stat_ena)
801 		return -EINVAL;
802 
803 	stats = &flow_node->stats;
804 
805 	spin_lock(&flow_node->lock);
806 	flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
807 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
808 	stats->pkts = rsp->stat;
809 	spin_unlock(&flow_node->lock);
810 
811 	return 0;
812 }
813 
814 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
815 				    struct flow_cls_offload *cls_flower)
816 {
817 	switch (cls_flower->command) {
818 	case FLOW_CLS_REPLACE:
819 		return otx2_tc_add_flow(nic, cls_flower);
820 	case FLOW_CLS_DESTROY:
821 		return otx2_tc_del_flow(nic, cls_flower);
822 	case FLOW_CLS_STATS:
823 		return otx2_tc_get_flow_stats(nic, cls_flower);
824 	default:
825 		return -EOPNOTSUPP;
826 	}
827 }
828 
829 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
830 					    struct tc_cls_matchall_offload *cls)
831 {
832 	struct netlink_ext_ack *extack = cls->common.extack;
833 	struct flow_action *actions = &cls->rule->action;
834 	struct flow_action_entry *entry;
835 	u64 rate;
836 	int err;
837 
838 	err = otx2_tc_validate_flow(nic, actions, extack);
839 	if (err)
840 		return err;
841 
842 	if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
843 		NL_SET_ERR_MSG_MOD(extack,
844 				   "Only one ingress MATCHALL ratelimitter can be offloaded");
845 		return -ENOMEM;
846 	}
847 
848 	entry = &cls->rule->action.entries[0];
849 	switch (entry->id) {
850 	case FLOW_ACTION_POLICE:
851 		/* Ingress ratelimiting is not supported on OcteonTx2 */
852 		if (is_dev_otx2(nic->pdev)) {
853 			NL_SET_ERR_MSG_MOD(extack,
854 					   "Ingress policing not supported on this platform");
855 			return -EOPNOTSUPP;
856 		}
857 
858 		err = cn10k_alloc_matchall_ipolicer(nic);
859 		if (err)
860 			return err;
861 
862 		/* Convert to bits per second */
863 		rate = entry->police.rate_bytes_ps * 8;
864 		err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
865 		if (err)
866 			return err;
867 		nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
868 		break;
869 	default:
870 		NL_SET_ERR_MSG_MOD(extack,
871 				   "Only police action supported with Ingress MATCHALL offload");
872 		return -EOPNOTSUPP;
873 	}
874 
875 	return 0;
876 }
877 
878 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
879 					   struct tc_cls_matchall_offload *cls)
880 {
881 	struct netlink_ext_ack *extack = cls->common.extack;
882 	int err;
883 
884 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
885 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
886 		return -EINVAL;
887 	}
888 
889 	err = cn10k_free_matchall_ipolicer(nic);
890 	nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
891 	return err;
892 }
893 
894 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
895 					  struct tc_cls_matchall_offload *cls_matchall)
896 {
897 	switch (cls_matchall->command) {
898 	case TC_CLSMATCHALL_REPLACE:
899 		return otx2_tc_ingress_matchall_install(nic, cls_matchall);
900 	case TC_CLSMATCHALL_DESTROY:
901 		return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
902 	case TC_CLSMATCHALL_STATS:
903 	default:
904 		break;
905 	}
906 
907 	return -EOPNOTSUPP;
908 }
909 
910 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
911 					  void *type_data, void *cb_priv)
912 {
913 	struct otx2_nic *nic = cb_priv;
914 
915 	if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
916 		return -EOPNOTSUPP;
917 
918 	switch (type) {
919 	case TC_SETUP_CLSFLOWER:
920 		return otx2_setup_tc_cls_flower(nic, type_data);
921 	case TC_SETUP_CLSMATCHALL:
922 		return otx2_setup_tc_ingress_matchall(nic, type_data);
923 	default:
924 		break;
925 	}
926 
927 	return -EOPNOTSUPP;
928 }
929 
930 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
931 					 struct tc_cls_matchall_offload *cls_matchall)
932 {
933 	switch (cls_matchall->command) {
934 	case TC_CLSMATCHALL_REPLACE:
935 		return otx2_tc_egress_matchall_install(nic, cls_matchall);
936 	case TC_CLSMATCHALL_DESTROY:
937 		return otx2_tc_egress_matchall_delete(nic, cls_matchall);
938 	case TC_CLSMATCHALL_STATS:
939 	default:
940 		break;
941 	}
942 
943 	return -EOPNOTSUPP;
944 }
945 
946 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
947 					 void *type_data, void *cb_priv)
948 {
949 	struct otx2_nic *nic = cb_priv;
950 
951 	if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
952 		return -EOPNOTSUPP;
953 
954 	switch (type) {
955 	case TC_SETUP_CLSMATCHALL:
956 		return otx2_setup_tc_egress_matchall(nic, type_data);
957 	default:
958 		break;
959 	}
960 
961 	return -EOPNOTSUPP;
962 }
963 
964 static LIST_HEAD(otx2_block_cb_list);
965 
966 static int otx2_setup_tc_block(struct net_device *netdev,
967 			       struct flow_block_offload *f)
968 {
969 	struct otx2_nic *nic = netdev_priv(netdev);
970 	flow_setup_cb_t *cb;
971 	bool ingress;
972 
973 	if (f->block_shared)
974 		return -EOPNOTSUPP;
975 
976 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
977 		cb = otx2_setup_tc_block_ingress_cb;
978 		ingress = true;
979 	} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
980 		cb = otx2_setup_tc_block_egress_cb;
981 		ingress = false;
982 	} else {
983 		return -EOPNOTSUPP;
984 	}
985 
986 	return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
987 					  nic, nic, ingress);
988 }
989 
990 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
991 		  void *type_data)
992 {
993 	switch (type) {
994 	case TC_SETUP_BLOCK:
995 		return otx2_setup_tc_block(netdev, type_data);
996 	default:
997 		return -EOPNOTSUPP;
998 	}
999 }
1000 
1001 static const struct rhashtable_params tc_flow_ht_params = {
1002 	.head_offset = offsetof(struct otx2_tc_flow, node),
1003 	.key_offset = offsetof(struct otx2_tc_flow, cookie),
1004 	.key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
1005 	.automatic_shrinking = true,
1006 };
1007 
1008 int otx2_init_tc(struct otx2_nic *nic)
1009 {
1010 	struct otx2_tc_info *tc = &nic->tc_info;
1011 
1012 	/* Exclude receive queue 0 being used for police action */
1013 	set_bit(0, &nic->rq_bmap);
1014 
1015 	tc->flow_ht_params = tc_flow_ht_params;
1016 	return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
1017 }
1018 
1019 void otx2_shutdown_tc(struct otx2_nic *nic)
1020 {
1021 	struct otx2_tc_info *tc = &nic->tc_info;
1022 
1023 	rhashtable_destroy(&tc->flow_table);
1024 }
1025