1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 #include <linux/sort.h>
9 
10 #include "otx2_common.h"
11 
12 #define OTX2_DEFAULT_ACTION	0x1
13 
14 struct otx2_flow {
15 	struct ethtool_rx_flow_spec flow_spec;
16 	struct list_head list;
17 	u32 location;
18 	u16 entry;
19 	bool is_vf;
20 	u8 rss_ctx_id;
21 	int vf;
22 	bool dmac_filter;
23 };
24 
25 enum dmac_req {
26 	DMAC_ADDR_UPDATE,
27 	DMAC_ADDR_DEL
28 };
29 
30 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
31 {
32 	devm_kfree(pfvf->dev, flow_cfg->flow_ent);
33 	flow_cfg->flow_ent = NULL;
34 	flow_cfg->max_flows = 0;
35 }
36 
37 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
38 {
39 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
40 	struct npc_mcam_free_entry_req *req;
41 	int ent, err;
42 
43 	if (!flow_cfg->max_flows)
44 		return 0;
45 
46 	mutex_lock(&pfvf->mbox.lock);
47 	for (ent = 0; ent < flow_cfg->max_flows; ent++) {
48 		req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
49 		if (!req)
50 			break;
51 
52 		req->entry = flow_cfg->flow_ent[ent];
53 
54 		/* Send message to AF to free MCAM entries */
55 		err = otx2_sync_mbox_msg(&pfvf->mbox);
56 		if (err)
57 			break;
58 	}
59 	mutex_unlock(&pfvf->mbox.lock);
60 	otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
61 	return 0;
62 }
63 
64 static int mcam_entry_cmp(const void *a, const void *b)
65 {
66 	return *(u16 *)a - *(u16 *)b;
67 }
68 
69 static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
70 {
71 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
72 	struct npc_mcam_alloc_entry_req *req;
73 	struct npc_mcam_alloc_entry_rsp *rsp;
74 	int ent, allocated = 0;
75 
76 	/* Free current ones and allocate new ones with requested count */
77 	otx2_free_ntuple_mcam_entries(pfvf);
78 
79 	if (!count)
80 		return 0;
81 
82 	flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
83 						sizeof(u16), GFP_KERNEL);
84 	if (!flow_cfg->flow_ent)
85 		return -ENOMEM;
86 
87 	mutex_lock(&pfvf->mbox.lock);
88 
89 	/* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
90 	 * can only be allocated.
91 	 */
92 	while (allocated < count) {
93 		req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
94 		if (!req)
95 			goto exit;
96 
97 		req->contig = false;
98 		req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
99 				NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
100 
101 		/* Allocate higher priority entries for PFs, so that VF's entries
102 		 * will be on top of PF.
103 		 */
104 		if (!is_otx2_vf(pfvf->pcifunc)) {
105 			req->priority = NPC_MCAM_HIGHER_PRIO;
106 			req->ref_entry = flow_cfg->def_ent[0];
107 		}
108 
109 		/* Send message to AF */
110 		if (otx2_sync_mbox_msg(&pfvf->mbox))
111 			goto exit;
112 
113 		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
114 			(&pfvf->mbox.mbox, 0, &req->hdr);
115 
116 		for (ent = 0; ent < rsp->count; ent++)
117 			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
118 
119 		allocated += rsp->count;
120 
121 		/* If this request is not fulfilled, no need to send
122 		 * further requests.
123 		 */
124 		if (rsp->count != req->count)
125 			break;
126 	}
127 
128 	/* Multiple MCAM entry alloc requests could result in non-sequential
129 	 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
130 	 * otherwise user installed ntuple filter index and MCAM entry index will
131 	 * not be in sync.
132 	 */
133 	if (allocated)
134 		sort(&flow_cfg->flow_ent[0], allocated,
135 		     sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
136 
137 exit:
138 	mutex_unlock(&pfvf->mbox.lock);
139 
140 	flow_cfg->max_flows = allocated;
141 
142 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
143 	pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
144 
145 	if (allocated != count)
146 		netdev_info(pfvf->netdev,
147 			    "Unable to allocate %d MCAM entries, got only %d\n",
148 			    count, allocated);
149 	return allocated;
150 }
151 
152 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
153 {
154 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
155 	struct npc_mcam_alloc_entry_req *req;
156 	struct npc_mcam_alloc_entry_rsp *rsp;
157 	int vf_vlan_max_flows;
158 	int ent, count;
159 
160 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
161 	count = OTX2_MAX_UNICAST_FLOWS +
162 			OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
163 
164 	flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
165 					       sizeof(u16), GFP_KERNEL);
166 	if (!flow_cfg->def_ent)
167 		return -ENOMEM;
168 
169 	mutex_lock(&pfvf->mbox.lock);
170 
171 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
172 	if (!req) {
173 		mutex_unlock(&pfvf->mbox.lock);
174 		return -ENOMEM;
175 	}
176 
177 	req->contig = false;
178 	req->count = count;
179 
180 	/* Send message to AF */
181 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
182 		mutex_unlock(&pfvf->mbox.lock);
183 		return -EINVAL;
184 	}
185 
186 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
187 	       (&pfvf->mbox.mbox, 0, &req->hdr);
188 
189 	if (rsp->count != req->count) {
190 		netdev_info(pfvf->netdev,
191 			    "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
192 		mutex_unlock(&pfvf->mbox.lock);
193 		devm_kfree(pfvf->dev, flow_cfg->def_ent);
194 		return 0;
195 	}
196 
197 	for (ent = 0; ent < rsp->count; ent++)
198 		flow_cfg->def_ent[ent] = rsp->entry_list[ent];
199 
200 	flow_cfg->vf_vlan_offset = 0;
201 	flow_cfg->unicast_offset = vf_vlan_max_flows;
202 	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
203 					OTX2_MAX_UNICAST_FLOWS;
204 	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
205 	pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
206 	pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
207 
208 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
209 	mutex_unlock(&pfvf->mbox.lock);
210 
211 	/* Allocate entries for Ntuple filters */
212 	count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
213 	if (count <= 0) {
214 		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
215 		return 0;
216 	}
217 
218 	pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
219 
220 	return 0;
221 }
222 
223 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
224 {
225 	struct otx2_flow_config *flow_cfg;
226 	int count;
227 
228 	pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
229 				      sizeof(struct otx2_flow_config),
230 				      GFP_KERNEL);
231 	if (!pfvf->flow_cfg)
232 		return -ENOMEM;
233 
234 	flow_cfg = pfvf->flow_cfg;
235 	INIT_LIST_HEAD(&flow_cfg->flow_list);
236 	flow_cfg->max_flows = 0;
237 
238 	count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
239 	if (count <= 0)
240 		return -ENOMEM;
241 
242 	return 0;
243 }
244 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
245 
246 int otx2_mcam_flow_init(struct otx2_nic *pf)
247 {
248 	int err;
249 
250 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
251 				    GFP_KERNEL);
252 	if (!pf->flow_cfg)
253 		return -ENOMEM;
254 
255 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
256 
257 	err = otx2_alloc_mcam_entries(pf);
258 	if (err)
259 		return err;
260 
261 	/* Check if MCAM entries are allocate or not */
262 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
263 		return 0;
264 
265 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
266 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
267 	if (!pf->mac_table)
268 		return -ENOMEM;
269 
270 	otx2_dmacflt_get_max_cnt(pf);
271 
272 	/* DMAC filters are not allocated */
273 	if (!pf->flow_cfg->dmacflt_max_flows)
274 		return 0;
275 
276 	pf->flow_cfg->bmap_to_dmacindex =
277 			devm_kzalloc(pf->dev, sizeof(u8) *
278 				     pf->flow_cfg->dmacflt_max_flows,
279 				     GFP_KERNEL);
280 
281 	if (!pf->flow_cfg->bmap_to_dmacindex)
282 		return -ENOMEM;
283 
284 	pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
285 
286 	return 0;
287 }
288 
289 void otx2_mcam_flow_del(struct otx2_nic *pf)
290 {
291 	otx2_destroy_mcam_flows(pf);
292 }
293 EXPORT_SYMBOL(otx2_mcam_flow_del);
294 
295 /*  On success adds mcam entry
296  *  On failure enable promisous mode
297  */
298 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
299 {
300 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
301 	struct npc_install_flow_req *req;
302 	int err, i;
303 
304 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
305 		return -ENOMEM;
306 
307 	/* dont have free mcam entries or uc list is greater than alloted */
308 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
309 		return -ENOMEM;
310 
311 	mutex_lock(&pf->mbox.lock);
312 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
313 	if (!req) {
314 		mutex_unlock(&pf->mbox.lock);
315 		return -ENOMEM;
316 	}
317 
318 	/* unicast offset starts with 32 0..31 for ntuple */
319 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
320 		if (pf->mac_table[i].inuse)
321 			continue;
322 		ether_addr_copy(pf->mac_table[i].addr, mac);
323 		pf->mac_table[i].inuse = true;
324 		pf->mac_table[i].mcam_entry =
325 			flow_cfg->def_ent[i + flow_cfg->unicast_offset];
326 		req->entry =  pf->mac_table[i].mcam_entry;
327 		break;
328 	}
329 
330 	ether_addr_copy(req->packet.dmac, mac);
331 	eth_broadcast_addr((u8 *)&req->mask.dmac);
332 	req->features = BIT_ULL(NPC_DMAC);
333 	req->channel = pf->hw.rx_chan_base;
334 	req->intf = NIX_INTF_RX;
335 	req->op = NIX_RX_ACTION_DEFAULT;
336 	req->set_cntr = 1;
337 
338 	err = otx2_sync_mbox_msg(&pf->mbox);
339 	mutex_unlock(&pf->mbox.lock);
340 
341 	return err;
342 }
343 
344 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
345 {
346 	struct otx2_nic *pf = netdev_priv(netdev);
347 
348 	if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
349 			  pf->flow_cfg->dmacflt_max_flows))
350 		netdev_warn(netdev,
351 			    "Add %pM to CGX/RPM DMAC filters list as well\n",
352 			    mac);
353 
354 	return otx2_do_add_macfilter(pf, mac);
355 }
356 
357 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
358 				       int *mcam_entry)
359 {
360 	int i;
361 
362 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
363 		if (!pf->mac_table[i].inuse)
364 			continue;
365 
366 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
367 			*mcam_entry = pf->mac_table[i].mcam_entry;
368 			pf->mac_table[i].inuse = false;
369 			return true;
370 		}
371 	}
372 	return false;
373 }
374 
375 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
376 {
377 	struct otx2_nic *pf = netdev_priv(netdev);
378 	struct npc_delete_flow_req *req;
379 	int err, mcam_entry;
380 
381 	/* check does mcam entry exists for given mac */
382 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
383 		return 0;
384 
385 	mutex_lock(&pf->mbox.lock);
386 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
387 	if (!req) {
388 		mutex_unlock(&pf->mbox.lock);
389 		return -ENOMEM;
390 	}
391 	req->entry = mcam_entry;
392 	/* Send message to AF */
393 	err = otx2_sync_mbox_msg(&pf->mbox);
394 	mutex_unlock(&pf->mbox.lock);
395 
396 	return err;
397 }
398 
399 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
400 {
401 	struct otx2_flow *iter;
402 
403 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
404 		if (iter->location == location)
405 			return iter;
406 	}
407 
408 	return NULL;
409 }
410 
411 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
412 {
413 	struct list_head *head = &pfvf->flow_cfg->flow_list;
414 	struct otx2_flow *iter;
415 
416 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
417 		if (iter->location > flow->location)
418 			break;
419 		head = &iter->list;
420 	}
421 
422 	list_add(&flow->list, head);
423 }
424 
425 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
426 {
427 	if (!flow_cfg)
428 		return 0;
429 
430 	if (flow_cfg->nr_flows == flow_cfg->max_flows ||
431 	    bitmap_weight(&flow_cfg->dmacflt_bmap,
432 			  flow_cfg->dmacflt_max_flows))
433 		return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
434 	else
435 		return flow_cfg->max_flows;
436 }
437 EXPORT_SYMBOL(otx2_get_maxflows);
438 
439 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
440 		  u32 location)
441 {
442 	struct otx2_flow *iter;
443 
444 	if (location >= otx2_get_maxflows(pfvf->flow_cfg))
445 		return -EINVAL;
446 
447 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
448 		if (iter->location == location) {
449 			nfc->fs = iter->flow_spec;
450 			nfc->rss_context = iter->rss_ctx_id;
451 			return 0;
452 		}
453 	}
454 
455 	return -ENOENT;
456 }
457 
458 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
459 		       u32 *rule_locs)
460 {
461 	u32 rule_cnt = nfc->rule_cnt;
462 	u32 location = 0;
463 	int idx = 0;
464 	int err = 0;
465 
466 	nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
467 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
468 		err = otx2_get_flow(pfvf, nfc, location);
469 		if (!err)
470 			rule_locs[idx++] = location;
471 		location++;
472 	}
473 	nfc->rule_cnt = rule_cnt;
474 
475 	return err;
476 }
477 
478 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
479 				  struct npc_install_flow_req *req,
480 				  u32 flow_type)
481 {
482 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
483 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
484 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
485 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
486 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
487 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
488 	struct flow_msg *pmask = &req->mask;
489 	struct flow_msg *pkt = &req->packet;
490 
491 	switch (flow_type) {
492 	case IP_USER_FLOW:
493 		if (ipv4_usr_mask->ip4src) {
494 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
495 			       sizeof(pkt->ip4src));
496 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
497 			       sizeof(pmask->ip4src));
498 			req->features |= BIT_ULL(NPC_SIP_IPV4);
499 		}
500 		if (ipv4_usr_mask->ip4dst) {
501 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
502 			       sizeof(pkt->ip4dst));
503 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
504 			       sizeof(pmask->ip4dst));
505 			req->features |= BIT_ULL(NPC_DIP_IPV4);
506 		}
507 		if (ipv4_usr_mask->tos) {
508 			pkt->tos = ipv4_usr_hdr->tos;
509 			pmask->tos = ipv4_usr_mask->tos;
510 			req->features |= BIT_ULL(NPC_TOS);
511 		}
512 		if (ipv4_usr_mask->proto) {
513 			switch (ipv4_usr_hdr->proto) {
514 			case IPPROTO_ICMP:
515 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
516 				break;
517 			case IPPROTO_TCP:
518 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
519 				break;
520 			case IPPROTO_UDP:
521 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
522 				break;
523 			case IPPROTO_SCTP:
524 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
525 				break;
526 			case IPPROTO_AH:
527 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
528 				break;
529 			case IPPROTO_ESP:
530 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
531 				break;
532 			default:
533 				return -EOPNOTSUPP;
534 			}
535 		}
536 		pkt->etype = cpu_to_be16(ETH_P_IP);
537 		pmask->etype = cpu_to_be16(0xFFFF);
538 		req->features |= BIT_ULL(NPC_ETYPE);
539 		break;
540 	case TCP_V4_FLOW:
541 	case UDP_V4_FLOW:
542 	case SCTP_V4_FLOW:
543 		pkt->etype = cpu_to_be16(ETH_P_IP);
544 		pmask->etype = cpu_to_be16(0xFFFF);
545 		req->features |= BIT_ULL(NPC_ETYPE);
546 		if (ipv4_l4_mask->ip4src) {
547 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
548 			       sizeof(pkt->ip4src));
549 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
550 			       sizeof(pmask->ip4src));
551 			req->features |= BIT_ULL(NPC_SIP_IPV4);
552 		}
553 		if (ipv4_l4_mask->ip4dst) {
554 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
555 			       sizeof(pkt->ip4dst));
556 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
557 			       sizeof(pmask->ip4dst));
558 			req->features |= BIT_ULL(NPC_DIP_IPV4);
559 		}
560 		if (ipv4_l4_mask->tos) {
561 			pkt->tos = ipv4_l4_hdr->tos;
562 			pmask->tos = ipv4_l4_mask->tos;
563 			req->features |= BIT_ULL(NPC_TOS);
564 		}
565 		if (ipv4_l4_mask->psrc) {
566 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
567 			       sizeof(pkt->sport));
568 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
569 			       sizeof(pmask->sport));
570 			if (flow_type == UDP_V4_FLOW)
571 				req->features |= BIT_ULL(NPC_SPORT_UDP);
572 			else if (flow_type == TCP_V4_FLOW)
573 				req->features |= BIT_ULL(NPC_SPORT_TCP);
574 			else
575 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
576 		}
577 		if (ipv4_l4_mask->pdst) {
578 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
579 			       sizeof(pkt->dport));
580 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
581 			       sizeof(pmask->dport));
582 			if (flow_type == UDP_V4_FLOW)
583 				req->features |= BIT_ULL(NPC_DPORT_UDP);
584 			else if (flow_type == TCP_V4_FLOW)
585 				req->features |= BIT_ULL(NPC_DPORT_TCP);
586 			else
587 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
588 		}
589 		if (flow_type == UDP_V4_FLOW)
590 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
591 		else if (flow_type == TCP_V4_FLOW)
592 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
593 		else
594 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
595 		break;
596 	case AH_V4_FLOW:
597 	case ESP_V4_FLOW:
598 		pkt->etype = cpu_to_be16(ETH_P_IP);
599 		pmask->etype = cpu_to_be16(0xFFFF);
600 		req->features |= BIT_ULL(NPC_ETYPE);
601 		if (ah_esp_mask->ip4src) {
602 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
603 			       sizeof(pkt->ip4src));
604 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
605 			       sizeof(pmask->ip4src));
606 			req->features |= BIT_ULL(NPC_SIP_IPV4);
607 		}
608 		if (ah_esp_mask->ip4dst) {
609 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
610 			       sizeof(pkt->ip4dst));
611 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
612 			       sizeof(pmask->ip4dst));
613 			req->features |= BIT_ULL(NPC_DIP_IPV4);
614 		}
615 		if (ah_esp_mask->tos) {
616 			pkt->tos = ah_esp_hdr->tos;
617 			pmask->tos = ah_esp_mask->tos;
618 			req->features |= BIT_ULL(NPC_TOS);
619 		}
620 
621 		/* NPC profile doesn't extract AH/ESP header fields */
622 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
623 			return -EOPNOTSUPP;
624 
625 		if (flow_type == AH_V4_FLOW)
626 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
627 		else
628 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
629 		break;
630 	default:
631 		break;
632 	}
633 
634 	return 0;
635 }
636 
637 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
638 				  struct npc_install_flow_req *req,
639 				  u32 flow_type)
640 {
641 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
642 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
643 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
644 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
645 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
646 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
647 	struct flow_msg *pmask = &req->mask;
648 	struct flow_msg *pkt = &req->packet;
649 
650 	switch (flow_type) {
651 	case IPV6_USER_FLOW:
652 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
653 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
654 			       sizeof(pkt->ip6src));
655 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
656 			       sizeof(pmask->ip6src));
657 			req->features |= BIT_ULL(NPC_SIP_IPV6);
658 		}
659 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
660 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
661 			       sizeof(pkt->ip6dst));
662 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
663 			       sizeof(pmask->ip6dst));
664 			req->features |= BIT_ULL(NPC_DIP_IPV6);
665 		}
666 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
667 		pmask->etype = cpu_to_be16(0xFFFF);
668 		req->features |= BIT_ULL(NPC_ETYPE);
669 		break;
670 	case TCP_V6_FLOW:
671 	case UDP_V6_FLOW:
672 	case SCTP_V6_FLOW:
673 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
674 		pmask->etype = cpu_to_be16(0xFFFF);
675 		req->features |= BIT_ULL(NPC_ETYPE);
676 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
677 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
678 			       sizeof(pkt->ip6src));
679 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
680 			       sizeof(pmask->ip6src));
681 			req->features |= BIT_ULL(NPC_SIP_IPV6);
682 		}
683 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
684 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
685 			       sizeof(pkt->ip6dst));
686 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
687 			       sizeof(pmask->ip6dst));
688 			req->features |= BIT_ULL(NPC_DIP_IPV6);
689 		}
690 		if (ipv6_l4_mask->psrc) {
691 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
692 			       sizeof(pkt->sport));
693 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
694 			       sizeof(pmask->sport));
695 			if (flow_type == UDP_V6_FLOW)
696 				req->features |= BIT_ULL(NPC_SPORT_UDP);
697 			else if (flow_type == TCP_V6_FLOW)
698 				req->features |= BIT_ULL(NPC_SPORT_TCP);
699 			else
700 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
701 		}
702 		if (ipv6_l4_mask->pdst) {
703 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
704 			       sizeof(pkt->dport));
705 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
706 			       sizeof(pmask->dport));
707 			if (flow_type == UDP_V6_FLOW)
708 				req->features |= BIT_ULL(NPC_DPORT_UDP);
709 			else if (flow_type == TCP_V6_FLOW)
710 				req->features |= BIT_ULL(NPC_DPORT_TCP);
711 			else
712 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
713 		}
714 		if (flow_type == UDP_V6_FLOW)
715 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
716 		else if (flow_type == TCP_V6_FLOW)
717 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
718 		else
719 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
720 		break;
721 	case AH_V6_FLOW:
722 	case ESP_V6_FLOW:
723 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
724 		pmask->etype = cpu_to_be16(0xFFFF);
725 		req->features |= BIT_ULL(NPC_ETYPE);
726 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
727 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
728 			       sizeof(pkt->ip6src));
729 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
730 			       sizeof(pmask->ip6src));
731 			req->features |= BIT_ULL(NPC_SIP_IPV6);
732 		}
733 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
734 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
735 			       sizeof(pkt->ip6dst));
736 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
737 			       sizeof(pmask->ip6dst));
738 			req->features |= BIT_ULL(NPC_DIP_IPV6);
739 		}
740 
741 		/* NPC profile doesn't extract AH/ESP header fields */
742 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
743 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
744 			return -EOPNOTSUPP;
745 
746 		if (flow_type == AH_V6_FLOW)
747 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
748 		else
749 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
750 		break;
751 	default:
752 		break;
753 	}
754 
755 	return 0;
756 }
757 
758 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
759 			      struct npc_install_flow_req *req)
760 {
761 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
762 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
763 	struct flow_msg *pmask = &req->mask;
764 	struct flow_msg *pkt = &req->packet;
765 	u32 flow_type;
766 	int ret;
767 
768 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
769 	switch (flow_type) {
770 	/* bits not set in mask are don't care */
771 	case ETHER_FLOW:
772 		if (!is_zero_ether_addr(eth_mask->h_source)) {
773 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
774 			ether_addr_copy(pmask->smac, eth_mask->h_source);
775 			req->features |= BIT_ULL(NPC_SMAC);
776 		}
777 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
778 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
779 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
780 			req->features |= BIT_ULL(NPC_DMAC);
781 		}
782 		if (eth_hdr->h_proto) {
783 			memcpy(&pkt->etype, &eth_hdr->h_proto,
784 			       sizeof(pkt->etype));
785 			memcpy(&pmask->etype, &eth_mask->h_proto,
786 			       sizeof(pmask->etype));
787 			req->features |= BIT_ULL(NPC_ETYPE);
788 		}
789 		break;
790 	case IP_USER_FLOW:
791 	case TCP_V4_FLOW:
792 	case UDP_V4_FLOW:
793 	case SCTP_V4_FLOW:
794 	case AH_V4_FLOW:
795 	case ESP_V4_FLOW:
796 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
797 		if (ret)
798 			return ret;
799 		break;
800 	case IPV6_USER_FLOW:
801 	case TCP_V6_FLOW:
802 	case UDP_V6_FLOW:
803 	case SCTP_V6_FLOW:
804 	case AH_V6_FLOW:
805 	case ESP_V6_FLOW:
806 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
807 		if (ret)
808 			return ret;
809 		break;
810 	default:
811 		return -EOPNOTSUPP;
812 	}
813 	if (fsp->flow_type & FLOW_EXT) {
814 		if (fsp->m_ext.vlan_etype)
815 			return -EINVAL;
816 		if (fsp->m_ext.vlan_tci) {
817 			if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
818 				return -EINVAL;
819 			if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
820 				return -EINVAL;
821 
822 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
823 			       sizeof(pkt->vlan_tci));
824 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
825 			       sizeof(pmask->vlan_tci));
826 			req->features |= BIT_ULL(NPC_OUTER_VID);
827 		}
828 
829 		/* Not Drop/Direct to queue but use action in default entry */
830 		if (fsp->m_ext.data[1] &&
831 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
832 			req->op = NIX_RX_ACTION_DEFAULT;
833 	}
834 
835 	if (fsp->flow_type & FLOW_MAC_EXT &&
836 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
837 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
838 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
839 		req->features |= BIT_ULL(NPC_DMAC);
840 	}
841 
842 	if (!req->features)
843 		return -EOPNOTSUPP;
844 
845 	return 0;
846 }
847 
848 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
849 					struct ethtool_rx_flow_spec *fsp)
850 {
851 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
852 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
853 	u64 ring_cookie = fsp->ring_cookie;
854 	u32 flow_type;
855 
856 	if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
857 		return false;
858 
859 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
860 
861 	/* CGX/RPM block dmac filtering configured for white listing
862 	 * check for action other than DROP
863 	 */
864 	if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
865 	    !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
866 		if (is_zero_ether_addr(eth_mask->h_dest) &&
867 		    is_valid_ether_addr(eth_hdr->h_dest))
868 			return true;
869 	}
870 
871 	return false;
872 }
873 
874 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
875 {
876 	u64 ring_cookie = flow->flow_spec.ring_cookie;
877 	struct npc_install_flow_req *req;
878 	int err, vf = 0;
879 
880 	mutex_lock(&pfvf->mbox.lock);
881 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
882 	if (!req) {
883 		mutex_unlock(&pfvf->mbox.lock);
884 		return -ENOMEM;
885 	}
886 
887 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
888 	if (err) {
889 		/* free the allocated msg above */
890 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
891 		mutex_unlock(&pfvf->mbox.lock);
892 		return err;
893 	}
894 
895 	req->entry = flow->entry;
896 	req->intf = NIX_INTF_RX;
897 	req->set_cntr = 1;
898 	req->channel = pfvf->hw.rx_chan_base;
899 	if (ring_cookie == RX_CLS_FLOW_DISC) {
900 		req->op = NIX_RX_ACTIONOP_DROP;
901 	} else {
902 		/* change to unicast only if action of default entry is not
903 		 * requested by user
904 		 */
905 		if (flow->flow_spec.flow_type & FLOW_RSS) {
906 			req->op = NIX_RX_ACTIONOP_RSS;
907 			req->index = flow->rss_ctx_id;
908 		} else {
909 			req->op = NIX_RX_ACTIONOP_UCAST;
910 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
911 		}
912 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
913 		if (vf > pci_num_vf(pfvf->pdev)) {
914 			mutex_unlock(&pfvf->mbox.lock);
915 			return -EINVAL;
916 		}
917 	}
918 
919 	/* ethtool ring_cookie has (VF + 1) for VF */
920 	if (vf) {
921 		req->vf = vf;
922 		flow->is_vf = true;
923 		flow->vf = vf;
924 	}
925 
926 	/* Send message to AF */
927 	err = otx2_sync_mbox_msg(&pfvf->mbox);
928 	mutex_unlock(&pfvf->mbox.lock);
929 	return err;
930 }
931 
932 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
933 				    struct otx2_flow *flow)
934 {
935 	struct otx2_flow *pf_mac;
936 	struct ethhdr *eth_hdr;
937 
938 	pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
939 	if (!pf_mac)
940 		return -ENOMEM;
941 
942 	pf_mac->entry = 0;
943 	pf_mac->dmac_filter = true;
944 	pf_mac->location = pfvf->flow_cfg->max_flows;
945 	memcpy(&pf_mac->flow_spec, &flow->flow_spec,
946 	       sizeof(struct ethtool_rx_flow_spec));
947 	pf_mac->flow_spec.location = pf_mac->location;
948 
949 	/* Copy PF mac address */
950 	eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
951 	ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
952 
953 	/* Install DMAC filter with PF mac address */
954 	otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
955 
956 	otx2_add_flow_to_list(pfvf, pf_mac);
957 	pfvf->flow_cfg->nr_flows++;
958 	set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
959 
960 	return 0;
961 }
962 
963 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
964 {
965 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
966 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
967 	struct otx2_flow *flow;
968 	struct ethhdr *eth_hdr;
969 	bool new = false;
970 	int err = 0;
971 	u32 ring;
972 
973 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
974 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
975 		return -ENOMEM;
976 
977 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
978 		return -EINVAL;
979 
980 	if (fsp->location >= otx2_get_maxflows(flow_cfg))
981 		return -EINVAL;
982 
983 	flow = otx2_find_flow(pfvf, fsp->location);
984 	if (!flow) {
985 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
986 		if (!flow)
987 			return -ENOMEM;
988 		flow->location = fsp->location;
989 		new = true;
990 	}
991 	/* struct copy */
992 	flow->flow_spec = *fsp;
993 
994 	if (fsp->flow_type & FLOW_RSS)
995 		flow->rss_ctx_id = nfc->rss_context;
996 
997 	if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
998 		eth_hdr = &flow->flow_spec.h_u.ether_spec;
999 
1000 		/* Sync dmac filter table with updated fields */
1001 		if (flow->dmac_filter)
1002 			return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1003 						   flow->entry);
1004 
1005 		if (bitmap_full(&flow_cfg->dmacflt_bmap,
1006 				flow_cfg->dmacflt_max_flows)) {
1007 			netdev_warn(pfvf->netdev,
1008 				    "Can't insert the rule %d as max allowed dmac filters are %d\n",
1009 				    flow->location +
1010 				    flow_cfg->dmacflt_max_flows,
1011 				    flow_cfg->dmacflt_max_flows);
1012 			err = -EINVAL;
1013 			if (new)
1014 				kfree(flow);
1015 			return err;
1016 		}
1017 
1018 		/* Install PF mac address to DMAC filter list */
1019 		if (!test_bit(0, &flow_cfg->dmacflt_bmap))
1020 			otx2_add_flow_with_pfmac(pfvf, flow);
1021 
1022 		flow->dmac_filter = true;
1023 		flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
1024 						  flow_cfg->dmacflt_max_flows);
1025 		fsp->location = flow_cfg->max_flows + flow->entry;
1026 		flow->flow_spec.location = fsp->location;
1027 		flow->location = fsp->location;
1028 
1029 		set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1030 		otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1031 
1032 	} else {
1033 		if (flow->location >= pfvf->flow_cfg->max_flows) {
1034 			netdev_warn(pfvf->netdev,
1035 				    "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1036 				    flow->location,
1037 				    flow_cfg->max_flows - 1);
1038 			err = -EINVAL;
1039 		} else {
1040 			flow->entry = flow_cfg->flow_ent[flow->location];
1041 			err = otx2_add_flow_msg(pfvf, flow);
1042 		}
1043 	}
1044 
1045 	if (err) {
1046 		if (err == MBOX_MSG_INVALID)
1047 			err = -EINVAL;
1048 		if (new)
1049 			kfree(flow);
1050 		return err;
1051 	}
1052 
1053 	/* add the new flow installed to list */
1054 	if (new) {
1055 		otx2_add_flow_to_list(pfvf, flow);
1056 		flow_cfg->nr_flows++;
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1063 {
1064 	struct npc_delete_flow_req *req;
1065 	int err;
1066 
1067 	mutex_lock(&pfvf->mbox.lock);
1068 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1069 	if (!req) {
1070 		mutex_unlock(&pfvf->mbox.lock);
1071 		return -ENOMEM;
1072 	}
1073 
1074 	req->entry = entry;
1075 	if (all)
1076 		req->all = 1;
1077 
1078 	/* Send message to AF */
1079 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1080 	mutex_unlock(&pfvf->mbox.lock);
1081 	return err;
1082 }
1083 
1084 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1085 {
1086 	struct otx2_flow *iter;
1087 	struct ethhdr *eth_hdr;
1088 	bool found = false;
1089 
1090 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1091 		if (iter->dmac_filter && iter->entry == 0) {
1092 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1093 			if (req == DMAC_ADDR_DEL) {
1094 				otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1095 						    0);
1096 				clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
1097 				found = true;
1098 			} else {
1099 				ether_addr_copy(eth_hdr->h_dest,
1100 						pfvf->netdev->dev_addr);
1101 				otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1102 			}
1103 			break;
1104 		}
1105 	}
1106 
1107 	if (found) {
1108 		list_del(&iter->list);
1109 		kfree(iter);
1110 		pfvf->flow_cfg->nr_flows--;
1111 	}
1112 }
1113 
1114 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1115 {
1116 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1117 	struct otx2_flow *flow;
1118 	int err;
1119 
1120 	if (location >= otx2_get_maxflows(flow_cfg))
1121 		return -EINVAL;
1122 
1123 	flow = otx2_find_flow(pfvf, location);
1124 	if (!flow)
1125 		return -ENOENT;
1126 
1127 	if (flow->dmac_filter) {
1128 		struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1129 
1130 		/* user not allowed to remove dmac filter with interface mac */
1131 		if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1132 			return -EPERM;
1133 
1134 		err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1135 					  flow->entry);
1136 		clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1137 		/* If all dmac filters are removed delete macfilter with
1138 		 * interface mac address and configure CGX/RPM block in
1139 		 * promiscuous mode
1140 		 */
1141 		if (bitmap_weight(&flow_cfg->dmacflt_bmap,
1142 				  flow_cfg->dmacflt_max_flows) == 1)
1143 			otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1144 	} else {
1145 		err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1146 	}
1147 
1148 	if (err)
1149 		return err;
1150 
1151 	list_del(&flow->list);
1152 	kfree(flow);
1153 	flow_cfg->nr_flows--;
1154 
1155 	return 0;
1156 }
1157 
1158 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1159 {
1160 	struct otx2_flow *flow, *tmp;
1161 	int err;
1162 
1163 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1164 		if (flow->rss_ctx_id != ctx_id)
1165 			continue;
1166 		err = otx2_remove_flow(pfvf, flow->location);
1167 		if (err)
1168 			netdev_warn(pfvf->netdev,
1169 				    "Can't delete the rule %d associated with this rss group err:%d",
1170 				    flow->location, err);
1171 	}
1172 }
1173 
1174 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1175 {
1176 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1177 	struct npc_delete_flow_req *req;
1178 	struct otx2_flow *iter, *tmp;
1179 	int err;
1180 
1181 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1182 		return 0;
1183 
1184 	mutex_lock(&pfvf->mbox.lock);
1185 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1186 	if (!req) {
1187 		mutex_unlock(&pfvf->mbox.lock);
1188 		return -ENOMEM;
1189 	}
1190 
1191 	req->start = flow_cfg->flow_ent[0];
1192 	req->end   = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
1193 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1194 	mutex_unlock(&pfvf->mbox.lock);
1195 
1196 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1197 		list_del(&iter->list);
1198 		kfree(iter);
1199 		flow_cfg->nr_flows--;
1200 	}
1201 	return err;
1202 }
1203 
1204 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1205 {
1206 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1207 	struct npc_mcam_free_entry_req *req;
1208 	struct otx2_flow *iter, *tmp;
1209 	int err;
1210 
1211 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1212 		return 0;
1213 
1214 	/* remove all flows */
1215 	err = otx2_remove_flow_msg(pfvf, 0, true);
1216 	if (err)
1217 		return err;
1218 
1219 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1220 		list_del(&iter->list);
1221 		kfree(iter);
1222 		flow_cfg->nr_flows--;
1223 	}
1224 
1225 	mutex_lock(&pfvf->mbox.lock);
1226 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1227 	if (!req) {
1228 		mutex_unlock(&pfvf->mbox.lock);
1229 		return -ENOMEM;
1230 	}
1231 
1232 	req->all = 1;
1233 	/* Send message to AF to free MCAM entries */
1234 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1235 	if (err) {
1236 		mutex_unlock(&pfvf->mbox.lock);
1237 		return err;
1238 	}
1239 
1240 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1241 	mutex_unlock(&pfvf->mbox.lock);
1242 
1243 	return 0;
1244 }
1245 
1246 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1247 {
1248 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1249 	struct npc_install_flow_req *req;
1250 	int err;
1251 
1252 	mutex_lock(&pfvf->mbox.lock);
1253 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1254 	if (!req) {
1255 		mutex_unlock(&pfvf->mbox.lock);
1256 		return -ENOMEM;
1257 	}
1258 
1259 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1260 	req->intf = NIX_INTF_RX;
1261 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1262 	eth_broadcast_addr((u8 *)&req->mask.dmac);
1263 	req->channel = pfvf->hw.rx_chan_base;
1264 	req->op = NIX_RX_ACTION_DEFAULT;
1265 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1266 	req->vtag0_valid = true;
1267 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1268 
1269 	/* Send message to AF */
1270 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1271 	mutex_unlock(&pfvf->mbox.lock);
1272 	return err;
1273 }
1274 
1275 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1276 {
1277 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1278 	struct npc_delete_flow_req *req;
1279 	int err;
1280 
1281 	mutex_lock(&pfvf->mbox.lock);
1282 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1283 	if (!req) {
1284 		mutex_unlock(&pfvf->mbox.lock);
1285 		return -ENOMEM;
1286 	}
1287 
1288 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1289 	/* Send message to AF */
1290 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1291 	mutex_unlock(&pfvf->mbox.lock);
1292 	return err;
1293 }
1294 
1295 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1296 {
1297 	struct nix_vtag_config *req;
1298 	struct mbox_msghdr *rsp_hdr;
1299 	int err;
1300 
1301 	/* Dont have enough mcam entries */
1302 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1303 		return -ENOMEM;
1304 
1305 	if (enable) {
1306 		err = otx2_install_rxvlan_offload_flow(pf);
1307 		if (err)
1308 			return err;
1309 	} else {
1310 		err = otx2_delete_rxvlan_offload_flow(pf);
1311 		if (err)
1312 			return err;
1313 	}
1314 
1315 	mutex_lock(&pf->mbox.lock);
1316 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1317 	if (!req) {
1318 		mutex_unlock(&pf->mbox.lock);
1319 		return -ENOMEM;
1320 	}
1321 
1322 	/* config strip, capture and size */
1323 	req->vtag_size = VTAGSIZE_T4;
1324 	req->cfg_type = 1; /* rx vlan cfg */
1325 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1326 	req->rx.strip_vtag = enable;
1327 	req->rx.capture_vtag = enable;
1328 
1329 	err = otx2_sync_mbox_msg(&pf->mbox);
1330 	if (err) {
1331 		mutex_unlock(&pf->mbox.lock);
1332 		return err;
1333 	}
1334 
1335 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1336 	if (IS_ERR(rsp_hdr)) {
1337 		mutex_unlock(&pf->mbox.lock);
1338 		return PTR_ERR(rsp_hdr);
1339 	}
1340 
1341 	mutex_unlock(&pf->mbox.lock);
1342 	return rsp_hdr->rc;
1343 }
1344 
1345 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1346 {
1347 	struct otx2_flow *iter;
1348 	struct ethhdr *eth_hdr;
1349 
1350 	list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1351 		if (iter->dmac_filter) {
1352 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1353 			otx2_dmacflt_add(pf, eth_hdr->h_dest,
1354 					 iter->entry);
1355 		}
1356 	}
1357 }
1358 
1359 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1360 {
1361 	otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1362 }
1363