1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 
9 #include "otx2_common.h"
10 
11 #define OTX2_DEFAULT_ACTION	0x1
12 
13 struct otx2_flow {
14 	struct ethtool_rx_flow_spec flow_spec;
15 	struct list_head list;
16 	u32 location;
17 	u16 entry;
18 	bool is_vf;
19 	u8 rss_ctx_id;
20 	int vf;
21 	bool dmac_filter;
22 };
23 
24 enum dmac_req {
25 	DMAC_ADDR_UPDATE,
26 	DMAC_ADDR_DEL
27 };
28 
29 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
30 {
31 	devm_kfree(pfvf->dev, flow_cfg->flow_ent);
32 	flow_cfg->flow_ent = NULL;
33 	flow_cfg->ntuple_max_flows = 0;
34 	flow_cfg->tc_max_flows = 0;
35 }
36 
37 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
38 {
39 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
40 	struct npc_mcam_free_entry_req *req;
41 	int ent, err;
42 
43 	if (!flow_cfg->ntuple_max_flows)
44 		return 0;
45 
46 	mutex_lock(&pfvf->mbox.lock);
47 	for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
48 		req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
49 		if (!req)
50 			break;
51 
52 		req->entry = flow_cfg->flow_ent[ent];
53 
54 		/* Send message to AF to free MCAM entries */
55 		err = otx2_sync_mbox_msg(&pfvf->mbox);
56 		if (err)
57 			break;
58 	}
59 	mutex_unlock(&pfvf->mbox.lock);
60 	otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
61 	return 0;
62 }
63 
64 static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
65 {
66 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
67 	struct npc_mcam_alloc_entry_req *req;
68 	struct npc_mcam_alloc_entry_rsp *rsp;
69 	int ent, allocated = 0;
70 
71 	/* Free current ones and allocate new ones with requested count */
72 	otx2_free_ntuple_mcam_entries(pfvf);
73 
74 	if (!count)
75 		return 0;
76 
77 	flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
78 						sizeof(u16), GFP_KERNEL);
79 	if (!flow_cfg->flow_ent)
80 		return -ENOMEM;
81 
82 	mutex_lock(&pfvf->mbox.lock);
83 
84 	/* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
85 	 * can only be allocated.
86 	 */
87 	while (allocated < count) {
88 		req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
89 		if (!req)
90 			goto exit;
91 
92 		req->contig = false;
93 		req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
94 				NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
95 		req->priority = NPC_MCAM_HIGHER_PRIO;
96 		req->ref_entry = flow_cfg->def_ent[0];
97 
98 		/* Send message to AF */
99 		if (otx2_sync_mbox_msg(&pfvf->mbox))
100 			goto exit;
101 
102 		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
103 			(&pfvf->mbox.mbox, 0, &req->hdr);
104 
105 		for (ent = 0; ent < rsp->count; ent++)
106 			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
107 
108 		allocated += rsp->count;
109 
110 		/* If this request is not fulfilled, no need to send
111 		 * further requests.
112 		 */
113 		if (rsp->count != req->count)
114 			break;
115 	}
116 
117 exit:
118 	mutex_unlock(&pfvf->mbox.lock);
119 
120 	flow_cfg->ntuple_offset = 0;
121 	flow_cfg->ntuple_max_flows = allocated;
122 	flow_cfg->tc_max_flows = allocated;
123 
124 	if (allocated != count)
125 		netdev_info(pfvf->netdev,
126 			    "Unable to allocate %d MCAM entries for ntuple, got %d\n",
127 			    count, allocated);
128 
129 	return allocated;
130 }
131 
132 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
133 {
134 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
135 	struct npc_mcam_alloc_entry_req *req;
136 	struct npc_mcam_alloc_entry_rsp *rsp;
137 	int vf_vlan_max_flows;
138 	int ent, count;
139 
140 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
141 	count = OTX2_MAX_UNICAST_FLOWS +
142 			OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
143 
144 	flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
145 					       sizeof(u16), GFP_KERNEL);
146 	if (!flow_cfg->def_ent)
147 		return -ENOMEM;
148 
149 	mutex_lock(&pfvf->mbox.lock);
150 
151 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
152 	if (!req) {
153 		mutex_unlock(&pfvf->mbox.lock);
154 		return -ENOMEM;
155 	}
156 
157 	req->contig = false;
158 	req->count = count;
159 
160 	/* Send message to AF */
161 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
162 		mutex_unlock(&pfvf->mbox.lock);
163 		return -EINVAL;
164 	}
165 
166 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
167 	       (&pfvf->mbox.mbox, 0, &req->hdr);
168 
169 	if (rsp->count != req->count) {
170 		netdev_info(pfvf->netdev,
171 			    "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
172 		mutex_unlock(&pfvf->mbox.lock);
173 		devm_kfree(pfvf->dev, flow_cfg->def_ent);
174 		return 0;
175 	}
176 
177 	for (ent = 0; ent < rsp->count; ent++)
178 		flow_cfg->def_ent[ent] = rsp->entry_list[ent];
179 
180 	flow_cfg->vf_vlan_offset = 0;
181 	flow_cfg->unicast_offset = vf_vlan_max_flows;
182 	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
183 					OTX2_MAX_UNICAST_FLOWS;
184 	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
185 	pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
186 	pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
187 
188 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
189 	mutex_unlock(&pfvf->mbox.lock);
190 
191 	/* Allocate entries for Ntuple filters */
192 	count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
193 	if (count <= 0) {
194 		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
195 		return 0;
196 	}
197 
198 	pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
199 	pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
200 
201 	return 0;
202 }
203 
204 int otx2_mcam_flow_init(struct otx2_nic *pf)
205 {
206 	int err;
207 
208 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
209 				    GFP_KERNEL);
210 	if (!pf->flow_cfg)
211 		return -ENOMEM;
212 
213 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
214 
215 	err = otx2_alloc_mcam_entries(pf);
216 	if (err)
217 		return err;
218 
219 	/* Check if MCAM entries are allocate or not */
220 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
221 		return 0;
222 
223 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
224 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
225 	if (!pf->mac_table)
226 		return -ENOMEM;
227 
228 	otx2_dmacflt_get_max_cnt(pf);
229 
230 	/* DMAC filters are not allocated */
231 	if (!pf->flow_cfg->dmacflt_max_flows)
232 		return 0;
233 
234 	pf->flow_cfg->bmap_to_dmacindex =
235 			devm_kzalloc(pf->dev, sizeof(u8) *
236 				     pf->flow_cfg->dmacflt_max_flows,
237 				     GFP_KERNEL);
238 
239 	if (!pf->flow_cfg->bmap_to_dmacindex)
240 		return -ENOMEM;
241 
242 	pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
243 
244 	return 0;
245 }
246 
247 void otx2_mcam_flow_del(struct otx2_nic *pf)
248 {
249 	otx2_destroy_mcam_flows(pf);
250 }
251 
252 /*  On success adds mcam entry
253  *  On failure enable promisous mode
254  */
255 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
256 {
257 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
258 	struct npc_install_flow_req *req;
259 	int err, i;
260 
261 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
262 		return -ENOMEM;
263 
264 	/* dont have free mcam entries or uc list is greater than alloted */
265 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
266 		return -ENOMEM;
267 
268 	mutex_lock(&pf->mbox.lock);
269 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
270 	if (!req) {
271 		mutex_unlock(&pf->mbox.lock);
272 		return -ENOMEM;
273 	}
274 
275 	/* unicast offset starts with 32 0..31 for ntuple */
276 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
277 		if (pf->mac_table[i].inuse)
278 			continue;
279 		ether_addr_copy(pf->mac_table[i].addr, mac);
280 		pf->mac_table[i].inuse = true;
281 		pf->mac_table[i].mcam_entry =
282 			flow_cfg->def_ent[i + flow_cfg->unicast_offset];
283 		req->entry =  pf->mac_table[i].mcam_entry;
284 		break;
285 	}
286 
287 	ether_addr_copy(req->packet.dmac, mac);
288 	eth_broadcast_addr((u8 *)&req->mask.dmac);
289 	req->features = BIT_ULL(NPC_DMAC);
290 	req->channel = pf->hw.rx_chan_base;
291 	req->intf = NIX_INTF_RX;
292 	req->op = NIX_RX_ACTION_DEFAULT;
293 	req->set_cntr = 1;
294 
295 	err = otx2_sync_mbox_msg(&pf->mbox);
296 	mutex_unlock(&pf->mbox.lock);
297 
298 	return err;
299 }
300 
301 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
302 {
303 	struct otx2_nic *pf = netdev_priv(netdev);
304 
305 	if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
306 			  pf->flow_cfg->dmacflt_max_flows))
307 		netdev_warn(netdev,
308 			    "Add %pM to CGX/RPM DMAC filters list as well\n",
309 			    mac);
310 
311 	return otx2_do_add_macfilter(pf, mac);
312 }
313 
314 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
315 				       int *mcam_entry)
316 {
317 	int i;
318 
319 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
320 		if (!pf->mac_table[i].inuse)
321 			continue;
322 
323 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
324 			*mcam_entry = pf->mac_table[i].mcam_entry;
325 			pf->mac_table[i].inuse = false;
326 			return true;
327 		}
328 	}
329 	return false;
330 }
331 
332 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
333 {
334 	struct otx2_nic *pf = netdev_priv(netdev);
335 	struct npc_delete_flow_req *req;
336 	int err, mcam_entry;
337 
338 	/* check does mcam entry exists for given mac */
339 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
340 		return 0;
341 
342 	mutex_lock(&pf->mbox.lock);
343 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
344 	if (!req) {
345 		mutex_unlock(&pf->mbox.lock);
346 		return -ENOMEM;
347 	}
348 	req->entry = mcam_entry;
349 	/* Send message to AF */
350 	err = otx2_sync_mbox_msg(&pf->mbox);
351 	mutex_unlock(&pf->mbox.lock);
352 
353 	return err;
354 }
355 
356 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
357 {
358 	struct otx2_flow *iter;
359 
360 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
361 		if (iter->location == location)
362 			return iter;
363 	}
364 
365 	return NULL;
366 }
367 
368 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
369 {
370 	struct list_head *head = &pfvf->flow_cfg->flow_list;
371 	struct otx2_flow *iter;
372 
373 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
374 		if (iter->location > flow->location)
375 			break;
376 		head = &iter->list;
377 	}
378 
379 	list_add(&flow->list, head);
380 }
381 
382 static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
383 {
384 	if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
385 	    bitmap_weight(&flow_cfg->dmacflt_bmap,
386 			  flow_cfg->dmacflt_max_flows))
387 		return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
388 	else
389 		return flow_cfg->ntuple_max_flows;
390 }
391 
392 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
393 		  u32 location)
394 {
395 	struct otx2_flow *iter;
396 
397 	if (location >= otx2_get_maxflows(pfvf->flow_cfg))
398 		return -EINVAL;
399 
400 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
401 		if (iter->location == location) {
402 			nfc->fs = iter->flow_spec;
403 			nfc->rss_context = iter->rss_ctx_id;
404 			return 0;
405 		}
406 	}
407 
408 	return -ENOENT;
409 }
410 
411 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
412 		       u32 *rule_locs)
413 {
414 	u32 rule_cnt = nfc->rule_cnt;
415 	u32 location = 0;
416 	int idx = 0;
417 	int err = 0;
418 
419 	nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
420 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
421 		err = otx2_get_flow(pfvf, nfc, location);
422 		if (!err)
423 			rule_locs[idx++] = location;
424 		location++;
425 	}
426 	nfc->rule_cnt = rule_cnt;
427 
428 	return err;
429 }
430 
431 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
432 				  struct npc_install_flow_req *req,
433 				  u32 flow_type)
434 {
435 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
436 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
437 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
438 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
439 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
440 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
441 	struct flow_msg *pmask = &req->mask;
442 	struct flow_msg *pkt = &req->packet;
443 
444 	switch (flow_type) {
445 	case IP_USER_FLOW:
446 		if (ipv4_usr_mask->ip4src) {
447 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
448 			       sizeof(pkt->ip4src));
449 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
450 			       sizeof(pmask->ip4src));
451 			req->features |= BIT_ULL(NPC_SIP_IPV4);
452 		}
453 		if (ipv4_usr_mask->ip4dst) {
454 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
455 			       sizeof(pkt->ip4dst));
456 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
457 			       sizeof(pmask->ip4dst));
458 			req->features |= BIT_ULL(NPC_DIP_IPV4);
459 		}
460 		if (ipv4_usr_mask->tos) {
461 			pkt->tos = ipv4_usr_hdr->tos;
462 			pmask->tos = ipv4_usr_mask->tos;
463 			req->features |= BIT_ULL(NPC_TOS);
464 		}
465 		if (ipv4_usr_mask->proto) {
466 			switch (ipv4_usr_hdr->proto) {
467 			case IPPROTO_ICMP:
468 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
469 				break;
470 			case IPPROTO_TCP:
471 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
472 				break;
473 			case IPPROTO_UDP:
474 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
475 				break;
476 			case IPPROTO_SCTP:
477 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
478 				break;
479 			case IPPROTO_AH:
480 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
481 				break;
482 			case IPPROTO_ESP:
483 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
484 				break;
485 			default:
486 				return -EOPNOTSUPP;
487 			}
488 		}
489 		pkt->etype = cpu_to_be16(ETH_P_IP);
490 		pmask->etype = cpu_to_be16(0xFFFF);
491 		req->features |= BIT_ULL(NPC_ETYPE);
492 		break;
493 	case TCP_V4_FLOW:
494 	case UDP_V4_FLOW:
495 	case SCTP_V4_FLOW:
496 		pkt->etype = cpu_to_be16(ETH_P_IP);
497 		pmask->etype = cpu_to_be16(0xFFFF);
498 		req->features |= BIT_ULL(NPC_ETYPE);
499 		if (ipv4_l4_mask->ip4src) {
500 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
501 			       sizeof(pkt->ip4src));
502 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
503 			       sizeof(pmask->ip4src));
504 			req->features |= BIT_ULL(NPC_SIP_IPV4);
505 		}
506 		if (ipv4_l4_mask->ip4dst) {
507 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
508 			       sizeof(pkt->ip4dst));
509 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
510 			       sizeof(pmask->ip4dst));
511 			req->features |= BIT_ULL(NPC_DIP_IPV4);
512 		}
513 		if (ipv4_l4_mask->tos) {
514 			pkt->tos = ipv4_l4_hdr->tos;
515 			pmask->tos = ipv4_l4_mask->tos;
516 			req->features |= BIT_ULL(NPC_TOS);
517 		}
518 		if (ipv4_l4_mask->psrc) {
519 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
520 			       sizeof(pkt->sport));
521 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
522 			       sizeof(pmask->sport));
523 			if (flow_type == UDP_V4_FLOW)
524 				req->features |= BIT_ULL(NPC_SPORT_UDP);
525 			else if (flow_type == TCP_V4_FLOW)
526 				req->features |= BIT_ULL(NPC_SPORT_TCP);
527 			else
528 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
529 		}
530 		if (ipv4_l4_mask->pdst) {
531 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
532 			       sizeof(pkt->dport));
533 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
534 			       sizeof(pmask->dport));
535 			if (flow_type == UDP_V4_FLOW)
536 				req->features |= BIT_ULL(NPC_DPORT_UDP);
537 			else if (flow_type == TCP_V4_FLOW)
538 				req->features |= BIT_ULL(NPC_DPORT_TCP);
539 			else
540 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
541 		}
542 		if (flow_type == UDP_V4_FLOW)
543 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
544 		else if (flow_type == TCP_V4_FLOW)
545 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
546 		else
547 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
548 		break;
549 	case AH_V4_FLOW:
550 	case ESP_V4_FLOW:
551 		pkt->etype = cpu_to_be16(ETH_P_IP);
552 		pmask->etype = cpu_to_be16(0xFFFF);
553 		req->features |= BIT_ULL(NPC_ETYPE);
554 		if (ah_esp_mask->ip4src) {
555 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
556 			       sizeof(pkt->ip4src));
557 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
558 			       sizeof(pmask->ip4src));
559 			req->features |= BIT_ULL(NPC_SIP_IPV4);
560 		}
561 		if (ah_esp_mask->ip4dst) {
562 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
563 			       sizeof(pkt->ip4dst));
564 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
565 			       sizeof(pmask->ip4dst));
566 			req->features |= BIT_ULL(NPC_DIP_IPV4);
567 		}
568 		if (ah_esp_mask->tos) {
569 			pkt->tos = ah_esp_hdr->tos;
570 			pmask->tos = ah_esp_mask->tos;
571 			req->features |= BIT_ULL(NPC_TOS);
572 		}
573 
574 		/* NPC profile doesn't extract AH/ESP header fields */
575 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
576 			return -EOPNOTSUPP;
577 
578 		if (flow_type == AH_V4_FLOW)
579 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
580 		else
581 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
582 		break;
583 	default:
584 		break;
585 	}
586 
587 	return 0;
588 }
589 
590 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
591 				  struct npc_install_flow_req *req,
592 				  u32 flow_type)
593 {
594 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
595 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
596 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
597 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
598 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
599 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
600 	struct flow_msg *pmask = &req->mask;
601 	struct flow_msg *pkt = &req->packet;
602 
603 	switch (flow_type) {
604 	case IPV6_USER_FLOW:
605 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
606 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
607 			       sizeof(pkt->ip6src));
608 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
609 			       sizeof(pmask->ip6src));
610 			req->features |= BIT_ULL(NPC_SIP_IPV6);
611 		}
612 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
613 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
614 			       sizeof(pkt->ip6dst));
615 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
616 			       sizeof(pmask->ip6dst));
617 			req->features |= BIT_ULL(NPC_DIP_IPV6);
618 		}
619 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
620 		pmask->etype = cpu_to_be16(0xFFFF);
621 		req->features |= BIT_ULL(NPC_ETYPE);
622 		break;
623 	case TCP_V6_FLOW:
624 	case UDP_V6_FLOW:
625 	case SCTP_V6_FLOW:
626 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
627 		pmask->etype = cpu_to_be16(0xFFFF);
628 		req->features |= BIT_ULL(NPC_ETYPE);
629 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
630 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
631 			       sizeof(pkt->ip6src));
632 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
633 			       sizeof(pmask->ip6src));
634 			req->features |= BIT_ULL(NPC_SIP_IPV6);
635 		}
636 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
637 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
638 			       sizeof(pkt->ip6dst));
639 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
640 			       sizeof(pmask->ip6dst));
641 			req->features |= BIT_ULL(NPC_DIP_IPV6);
642 		}
643 		if (ipv6_l4_mask->psrc) {
644 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
645 			       sizeof(pkt->sport));
646 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
647 			       sizeof(pmask->sport));
648 			if (flow_type == UDP_V6_FLOW)
649 				req->features |= BIT_ULL(NPC_SPORT_UDP);
650 			else if (flow_type == TCP_V6_FLOW)
651 				req->features |= BIT_ULL(NPC_SPORT_TCP);
652 			else
653 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
654 		}
655 		if (ipv6_l4_mask->pdst) {
656 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
657 			       sizeof(pkt->dport));
658 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
659 			       sizeof(pmask->dport));
660 			if (flow_type == UDP_V6_FLOW)
661 				req->features |= BIT_ULL(NPC_DPORT_UDP);
662 			else if (flow_type == TCP_V6_FLOW)
663 				req->features |= BIT_ULL(NPC_DPORT_TCP);
664 			else
665 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
666 		}
667 		if (flow_type == UDP_V6_FLOW)
668 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
669 		else if (flow_type == TCP_V6_FLOW)
670 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
671 		else
672 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
673 		break;
674 	case AH_V6_FLOW:
675 	case ESP_V6_FLOW:
676 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
677 		pmask->etype = cpu_to_be16(0xFFFF);
678 		req->features |= BIT_ULL(NPC_ETYPE);
679 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
680 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
681 			       sizeof(pkt->ip6src));
682 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
683 			       sizeof(pmask->ip6src));
684 			req->features |= BIT_ULL(NPC_SIP_IPV6);
685 		}
686 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
687 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
688 			       sizeof(pkt->ip6dst));
689 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
690 			       sizeof(pmask->ip6dst));
691 			req->features |= BIT_ULL(NPC_DIP_IPV6);
692 		}
693 
694 		/* NPC profile doesn't extract AH/ESP header fields */
695 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
696 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
697 			return -EOPNOTSUPP;
698 
699 		if (flow_type == AH_V6_FLOW)
700 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
701 		else
702 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
703 		break;
704 	default:
705 		break;
706 	}
707 
708 	return 0;
709 }
710 
711 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
712 			      struct npc_install_flow_req *req)
713 {
714 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
715 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
716 	struct flow_msg *pmask = &req->mask;
717 	struct flow_msg *pkt = &req->packet;
718 	u32 flow_type;
719 	int ret;
720 
721 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
722 	switch (flow_type) {
723 	/* bits not set in mask are don't care */
724 	case ETHER_FLOW:
725 		if (!is_zero_ether_addr(eth_mask->h_source)) {
726 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
727 			ether_addr_copy(pmask->smac, eth_mask->h_source);
728 			req->features |= BIT_ULL(NPC_SMAC);
729 		}
730 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
731 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
732 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
733 			req->features |= BIT_ULL(NPC_DMAC);
734 		}
735 		if (eth_mask->h_proto) {
736 			memcpy(&pkt->etype, &eth_hdr->h_proto,
737 			       sizeof(pkt->etype));
738 			memcpy(&pmask->etype, &eth_mask->h_proto,
739 			       sizeof(pmask->etype));
740 			req->features |= BIT_ULL(NPC_ETYPE);
741 		}
742 		break;
743 	case IP_USER_FLOW:
744 	case TCP_V4_FLOW:
745 	case UDP_V4_FLOW:
746 	case SCTP_V4_FLOW:
747 	case AH_V4_FLOW:
748 	case ESP_V4_FLOW:
749 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
750 		if (ret)
751 			return ret;
752 		break;
753 	case IPV6_USER_FLOW:
754 	case TCP_V6_FLOW:
755 	case UDP_V6_FLOW:
756 	case SCTP_V6_FLOW:
757 	case AH_V6_FLOW:
758 	case ESP_V6_FLOW:
759 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
760 		if (ret)
761 			return ret;
762 		break;
763 	default:
764 		return -EOPNOTSUPP;
765 	}
766 	if (fsp->flow_type & FLOW_EXT) {
767 		if (fsp->m_ext.vlan_etype)
768 			return -EINVAL;
769 		if (fsp->m_ext.vlan_tci) {
770 			if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
771 				return -EINVAL;
772 			if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
773 				return -EINVAL;
774 
775 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
776 			       sizeof(pkt->vlan_tci));
777 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
778 			       sizeof(pmask->vlan_tci));
779 			req->features |= BIT_ULL(NPC_OUTER_VID);
780 		}
781 
782 		/* Not Drop/Direct to queue but use action in default entry */
783 		if (fsp->m_ext.data[1] &&
784 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
785 			req->op = NIX_RX_ACTION_DEFAULT;
786 	}
787 
788 	if (fsp->flow_type & FLOW_MAC_EXT &&
789 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
790 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
791 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
792 		req->features |= BIT_ULL(NPC_DMAC);
793 	}
794 
795 	if (!req->features)
796 		return -EOPNOTSUPP;
797 
798 	return 0;
799 }
800 
801 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
802 					struct ethtool_rx_flow_spec *fsp)
803 {
804 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
805 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
806 	u64 ring_cookie = fsp->ring_cookie;
807 	u32 flow_type;
808 
809 	if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
810 		return false;
811 
812 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
813 
814 	/* CGX/RPM block dmac filtering configured for white listing
815 	 * check for action other than DROP
816 	 */
817 	if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
818 	    !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
819 		if (is_zero_ether_addr(eth_mask->h_dest) &&
820 		    is_valid_ether_addr(eth_hdr->h_dest))
821 			return true;
822 	}
823 
824 	return false;
825 }
826 
827 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
828 {
829 	u64 ring_cookie = flow->flow_spec.ring_cookie;
830 	struct npc_install_flow_req *req;
831 	int err, vf = 0;
832 
833 	mutex_lock(&pfvf->mbox.lock);
834 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
835 	if (!req) {
836 		mutex_unlock(&pfvf->mbox.lock);
837 		return -ENOMEM;
838 	}
839 
840 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
841 	if (err) {
842 		/* free the allocated msg above */
843 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
844 		mutex_unlock(&pfvf->mbox.lock);
845 		return err;
846 	}
847 
848 	req->entry = flow->entry;
849 	req->intf = NIX_INTF_RX;
850 	req->set_cntr = 1;
851 	req->channel = pfvf->hw.rx_chan_base;
852 	if (ring_cookie == RX_CLS_FLOW_DISC) {
853 		req->op = NIX_RX_ACTIONOP_DROP;
854 	} else {
855 		/* change to unicast only if action of default entry is not
856 		 * requested by user
857 		 */
858 		if (flow->flow_spec.flow_type & FLOW_RSS) {
859 			req->op = NIX_RX_ACTIONOP_RSS;
860 			req->index = flow->rss_ctx_id;
861 		} else {
862 			req->op = NIX_RX_ACTIONOP_UCAST;
863 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
864 		}
865 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
866 		if (vf > pci_num_vf(pfvf->pdev)) {
867 			mutex_unlock(&pfvf->mbox.lock);
868 			return -EINVAL;
869 		}
870 	}
871 
872 	/* ethtool ring_cookie has (VF + 1) for VF */
873 	if (vf) {
874 		req->vf = vf;
875 		flow->is_vf = true;
876 		flow->vf = vf;
877 	}
878 
879 	/* Send message to AF */
880 	err = otx2_sync_mbox_msg(&pfvf->mbox);
881 	mutex_unlock(&pfvf->mbox.lock);
882 	return err;
883 }
884 
885 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
886 				    struct otx2_flow *flow)
887 {
888 	struct otx2_flow *pf_mac;
889 	struct ethhdr *eth_hdr;
890 
891 	pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
892 	if (!pf_mac)
893 		return -ENOMEM;
894 
895 	pf_mac->entry = 0;
896 	pf_mac->dmac_filter = true;
897 	pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
898 	memcpy(&pf_mac->flow_spec, &flow->flow_spec,
899 	       sizeof(struct ethtool_rx_flow_spec));
900 	pf_mac->flow_spec.location = pf_mac->location;
901 
902 	/* Copy PF mac address */
903 	eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
904 	ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
905 
906 	/* Install DMAC filter with PF mac address */
907 	otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
908 
909 	otx2_add_flow_to_list(pfvf, pf_mac);
910 	pfvf->flow_cfg->nr_flows++;
911 	set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
912 
913 	return 0;
914 }
915 
916 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
917 {
918 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
919 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
920 	struct otx2_flow *flow;
921 	struct ethhdr *eth_hdr;
922 	bool new = false;
923 	int err = 0;
924 	u32 ring;
925 
926 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
927 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
928 		return -ENOMEM;
929 
930 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
931 		return -EINVAL;
932 
933 	if (fsp->location >= otx2_get_maxflows(flow_cfg))
934 		return -EINVAL;
935 
936 	flow = otx2_find_flow(pfvf, fsp->location);
937 	if (!flow) {
938 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
939 		if (!flow)
940 			return -ENOMEM;
941 		flow->location = fsp->location;
942 		new = true;
943 	}
944 	/* struct copy */
945 	flow->flow_spec = *fsp;
946 
947 	if (fsp->flow_type & FLOW_RSS)
948 		flow->rss_ctx_id = nfc->rss_context;
949 
950 	if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
951 		eth_hdr = &flow->flow_spec.h_u.ether_spec;
952 
953 		/* Sync dmac filter table with updated fields */
954 		if (flow->dmac_filter)
955 			return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
956 						   flow->entry);
957 
958 		if (bitmap_full(&flow_cfg->dmacflt_bmap,
959 				flow_cfg->dmacflt_max_flows)) {
960 			netdev_warn(pfvf->netdev,
961 				    "Can't insert the rule %d as max allowed dmac filters are %d\n",
962 				    flow->location +
963 				    flow_cfg->dmacflt_max_flows,
964 				    flow_cfg->dmacflt_max_flows);
965 			err = -EINVAL;
966 			if (new)
967 				kfree(flow);
968 			return err;
969 		}
970 
971 		/* Install PF mac address to DMAC filter list */
972 		if (!test_bit(0, &flow_cfg->dmacflt_bmap))
973 			otx2_add_flow_with_pfmac(pfvf, flow);
974 
975 		flow->dmac_filter = true;
976 		flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
977 						  flow_cfg->dmacflt_max_flows);
978 		fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
979 		flow->flow_spec.location = fsp->location;
980 		flow->location = fsp->location;
981 
982 		set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
983 		otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
984 
985 	} else {
986 		if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
987 			netdev_warn(pfvf->netdev,
988 				    "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
989 				    flow->location,
990 				    flow_cfg->ntuple_max_flows - 1);
991 			err = -EINVAL;
992 		} else {
993 			flow->entry = flow_cfg->flow_ent[flow->location];
994 			err = otx2_add_flow_msg(pfvf, flow);
995 		}
996 	}
997 
998 	if (err) {
999 		if (new)
1000 			kfree(flow);
1001 		return err;
1002 	}
1003 
1004 	/* add the new flow installed to list */
1005 	if (new) {
1006 		otx2_add_flow_to_list(pfvf, flow);
1007 		flow_cfg->nr_flows++;
1008 	}
1009 
1010 	return 0;
1011 }
1012 
1013 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1014 {
1015 	struct npc_delete_flow_req *req;
1016 	int err;
1017 
1018 	mutex_lock(&pfvf->mbox.lock);
1019 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1020 	if (!req) {
1021 		mutex_unlock(&pfvf->mbox.lock);
1022 		return -ENOMEM;
1023 	}
1024 
1025 	req->entry = entry;
1026 	if (all)
1027 		req->all = 1;
1028 
1029 	/* Send message to AF */
1030 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1031 	mutex_unlock(&pfvf->mbox.lock);
1032 	return err;
1033 }
1034 
1035 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1036 {
1037 	struct otx2_flow *iter;
1038 	struct ethhdr *eth_hdr;
1039 	bool found = false;
1040 
1041 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1042 		if (iter->dmac_filter && iter->entry == 0) {
1043 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1044 			if (req == DMAC_ADDR_DEL) {
1045 				otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1046 						    0);
1047 				clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
1048 				found = true;
1049 			} else {
1050 				ether_addr_copy(eth_hdr->h_dest,
1051 						pfvf->netdev->dev_addr);
1052 				otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1053 			}
1054 			break;
1055 		}
1056 	}
1057 
1058 	if (found) {
1059 		list_del(&iter->list);
1060 		kfree(iter);
1061 		pfvf->flow_cfg->nr_flows--;
1062 	}
1063 }
1064 
1065 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1066 {
1067 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1068 	struct otx2_flow *flow;
1069 	int err;
1070 
1071 	if (location >= otx2_get_maxflows(flow_cfg))
1072 		return -EINVAL;
1073 
1074 	flow = otx2_find_flow(pfvf, location);
1075 	if (!flow)
1076 		return -ENOENT;
1077 
1078 	if (flow->dmac_filter) {
1079 		struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1080 
1081 		/* user not allowed to remove dmac filter with interface mac */
1082 		if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1083 			return -EPERM;
1084 
1085 		err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1086 					  flow->entry);
1087 		clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1088 		/* If all dmac filters are removed delete macfilter with
1089 		 * interface mac address and configure CGX/RPM block in
1090 		 * promiscuous mode
1091 		 */
1092 		if (bitmap_weight(&flow_cfg->dmacflt_bmap,
1093 				  flow_cfg->dmacflt_max_flows) == 1)
1094 			otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1095 	} else {
1096 		err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1097 	}
1098 
1099 	if (err)
1100 		return err;
1101 
1102 	list_del(&flow->list);
1103 	kfree(flow);
1104 	flow_cfg->nr_flows--;
1105 
1106 	return 0;
1107 }
1108 
1109 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1110 {
1111 	struct otx2_flow *flow, *tmp;
1112 	int err;
1113 
1114 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1115 		if (flow->rss_ctx_id != ctx_id)
1116 			continue;
1117 		err = otx2_remove_flow(pfvf, flow->location);
1118 		if (err)
1119 			netdev_warn(pfvf->netdev,
1120 				    "Can't delete the rule %d associated with this rss group err:%d",
1121 				    flow->location, err);
1122 	}
1123 }
1124 
1125 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1126 {
1127 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1128 	struct npc_delete_flow_req *req;
1129 	struct otx2_flow *iter, *tmp;
1130 	int err;
1131 
1132 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1133 		return 0;
1134 
1135 	mutex_lock(&pfvf->mbox.lock);
1136 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1137 	if (!req) {
1138 		mutex_unlock(&pfvf->mbox.lock);
1139 		return -ENOMEM;
1140 	}
1141 
1142 	req->start = flow_cfg->flow_ent[0];
1143 	req->end   = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
1144 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1145 	mutex_unlock(&pfvf->mbox.lock);
1146 
1147 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1148 		list_del(&iter->list);
1149 		kfree(iter);
1150 		flow_cfg->nr_flows--;
1151 	}
1152 	return err;
1153 }
1154 
1155 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1156 {
1157 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1158 	struct npc_mcam_free_entry_req *req;
1159 	struct otx2_flow *iter, *tmp;
1160 	int err;
1161 
1162 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1163 		return 0;
1164 
1165 	/* remove all flows */
1166 	err = otx2_remove_flow_msg(pfvf, 0, true);
1167 	if (err)
1168 		return err;
1169 
1170 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1171 		list_del(&iter->list);
1172 		kfree(iter);
1173 		flow_cfg->nr_flows--;
1174 	}
1175 
1176 	mutex_lock(&pfvf->mbox.lock);
1177 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1178 	if (!req) {
1179 		mutex_unlock(&pfvf->mbox.lock);
1180 		return -ENOMEM;
1181 	}
1182 
1183 	req->all = 1;
1184 	/* Send message to AF to free MCAM entries */
1185 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1186 	if (err) {
1187 		mutex_unlock(&pfvf->mbox.lock);
1188 		return err;
1189 	}
1190 
1191 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1192 	mutex_unlock(&pfvf->mbox.lock);
1193 
1194 	return 0;
1195 }
1196 
1197 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1198 {
1199 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1200 	struct npc_install_flow_req *req;
1201 	int err;
1202 
1203 	mutex_lock(&pfvf->mbox.lock);
1204 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1205 	if (!req) {
1206 		mutex_unlock(&pfvf->mbox.lock);
1207 		return -ENOMEM;
1208 	}
1209 
1210 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1211 	req->intf = NIX_INTF_RX;
1212 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1213 	eth_broadcast_addr((u8 *)&req->mask.dmac);
1214 	req->channel = pfvf->hw.rx_chan_base;
1215 	req->op = NIX_RX_ACTION_DEFAULT;
1216 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1217 	req->vtag0_valid = true;
1218 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1219 
1220 	/* Send message to AF */
1221 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1222 	mutex_unlock(&pfvf->mbox.lock);
1223 	return err;
1224 }
1225 
1226 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1227 {
1228 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1229 	struct npc_delete_flow_req *req;
1230 	int err;
1231 
1232 	mutex_lock(&pfvf->mbox.lock);
1233 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1234 	if (!req) {
1235 		mutex_unlock(&pfvf->mbox.lock);
1236 		return -ENOMEM;
1237 	}
1238 
1239 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1240 	/* Send message to AF */
1241 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1242 	mutex_unlock(&pfvf->mbox.lock);
1243 	return err;
1244 }
1245 
1246 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1247 {
1248 	struct nix_vtag_config *req;
1249 	struct mbox_msghdr *rsp_hdr;
1250 	int err;
1251 
1252 	/* Dont have enough mcam entries */
1253 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1254 		return -ENOMEM;
1255 
1256 	if (enable) {
1257 		err = otx2_install_rxvlan_offload_flow(pf);
1258 		if (err)
1259 			return err;
1260 	} else {
1261 		err = otx2_delete_rxvlan_offload_flow(pf);
1262 		if (err)
1263 			return err;
1264 	}
1265 
1266 	mutex_lock(&pf->mbox.lock);
1267 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1268 	if (!req) {
1269 		mutex_unlock(&pf->mbox.lock);
1270 		return -ENOMEM;
1271 	}
1272 
1273 	/* config strip, capture and size */
1274 	req->vtag_size = VTAGSIZE_T4;
1275 	req->cfg_type = 1; /* rx vlan cfg */
1276 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1277 	req->rx.strip_vtag = enable;
1278 	req->rx.capture_vtag = enable;
1279 
1280 	err = otx2_sync_mbox_msg(&pf->mbox);
1281 	if (err) {
1282 		mutex_unlock(&pf->mbox.lock);
1283 		return err;
1284 	}
1285 
1286 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1287 	if (IS_ERR(rsp_hdr)) {
1288 		mutex_unlock(&pf->mbox.lock);
1289 		return PTR_ERR(rsp_hdr);
1290 	}
1291 
1292 	mutex_unlock(&pf->mbox.lock);
1293 	return rsp_hdr->rc;
1294 }
1295 
1296 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1297 {
1298 	struct otx2_flow *iter;
1299 	struct ethhdr *eth_hdr;
1300 
1301 	list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1302 		if (iter->dmac_filter) {
1303 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1304 			otx2_dmacflt_add(pf, eth_hdr->h_dest,
1305 					 iter->entry);
1306 		}
1307 	}
1308 }
1309 
1310 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1311 {
1312 	otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1313 }
1314