1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 #include <linux/sort.h>
9 
10 #include "otx2_common.h"
11 
12 #define OTX2_DEFAULT_ACTION	0x1
13 
14 static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
15 
16 struct otx2_flow {
17 	struct ethtool_rx_flow_spec flow_spec;
18 	struct list_head list;
19 	u32 location;
20 	u16 entry;
21 	bool is_vf;
22 	u8 rss_ctx_id;
23 	int vf;
24 	bool dmac_filter;
25 };
26 
27 enum dmac_req {
28 	DMAC_ADDR_UPDATE,
29 	DMAC_ADDR_DEL
30 };
31 
32 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
33 {
34 	devm_kfree(pfvf->dev, flow_cfg->flow_ent);
35 	flow_cfg->flow_ent = NULL;
36 	flow_cfg->max_flows = 0;
37 }
38 
39 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
40 {
41 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
42 	struct npc_mcam_free_entry_req *req;
43 	int ent, err;
44 
45 	if (!flow_cfg->max_flows)
46 		return 0;
47 
48 	mutex_lock(&pfvf->mbox.lock);
49 	for (ent = 0; ent < flow_cfg->max_flows; ent++) {
50 		req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
51 		if (!req)
52 			break;
53 
54 		req->entry = flow_cfg->flow_ent[ent];
55 
56 		/* Send message to AF to free MCAM entries */
57 		err = otx2_sync_mbox_msg(&pfvf->mbox);
58 		if (err)
59 			break;
60 	}
61 	mutex_unlock(&pfvf->mbox.lock);
62 	otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
63 	return 0;
64 }
65 
66 static int mcam_entry_cmp(const void *a, const void *b)
67 {
68 	return *(u16 *)a - *(u16 *)b;
69 }
70 
71 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
72 {
73 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
74 	struct npc_mcam_alloc_entry_req *req;
75 	struct npc_mcam_alloc_entry_rsp *rsp;
76 	int ent, allocated = 0;
77 
78 	/* Free current ones and allocate new ones with requested count */
79 	otx2_free_ntuple_mcam_entries(pfvf);
80 
81 	if (!count)
82 		return 0;
83 
84 	flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
85 						sizeof(u16), GFP_KERNEL);
86 	if (!flow_cfg->flow_ent) {
87 		netdev_err(pfvf->netdev,
88 			   "%s: Unable to allocate memory for flow entries\n",
89 			    __func__);
90 		return -ENOMEM;
91 	}
92 
93 	mutex_lock(&pfvf->mbox.lock);
94 
95 	/* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
96 	 * can only be allocated.
97 	 */
98 	while (allocated < count) {
99 		req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
100 		if (!req)
101 			goto exit;
102 
103 		req->contig = false;
104 		req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
105 				NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
106 
107 		/* Allocate higher priority entries for PFs, so that VF's entries
108 		 * will be on top of PF.
109 		 */
110 		if (!is_otx2_vf(pfvf->pcifunc)) {
111 			req->priority = NPC_MCAM_HIGHER_PRIO;
112 			req->ref_entry = flow_cfg->def_ent[0];
113 		}
114 
115 		/* Send message to AF */
116 		if (otx2_sync_mbox_msg(&pfvf->mbox))
117 			goto exit;
118 
119 		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
120 			(&pfvf->mbox.mbox, 0, &req->hdr);
121 
122 		for (ent = 0; ent < rsp->count; ent++)
123 			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
124 
125 		allocated += rsp->count;
126 
127 		/* If this request is not fulfilled, no need to send
128 		 * further requests.
129 		 */
130 		if (rsp->count != req->count)
131 			break;
132 	}
133 
134 	/* Multiple MCAM entry alloc requests could result in non-sequential
135 	 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
136 	 * otherwise user installed ntuple filter index and MCAM entry index will
137 	 * not be in sync.
138 	 */
139 	if (allocated)
140 		sort(&flow_cfg->flow_ent[0], allocated,
141 		     sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
142 
143 exit:
144 	mutex_unlock(&pfvf->mbox.lock);
145 
146 	flow_cfg->max_flows = allocated;
147 
148 	if (allocated) {
149 		pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
150 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
151 	}
152 
153 	if (allocated != count)
154 		netdev_info(pfvf->netdev,
155 			    "Unable to allocate %d MCAM entries, got only %d\n",
156 			    count, allocated);
157 	return allocated;
158 }
159 EXPORT_SYMBOL(otx2_alloc_mcam_entries);
160 
161 static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
162 {
163 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
164 	struct npc_mcam_alloc_entry_req *req;
165 	struct npc_mcam_alloc_entry_rsp *rsp;
166 	int vf_vlan_max_flows;
167 	int ent, count;
168 
169 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
170 	count = OTX2_MAX_UNICAST_FLOWS +
171 			OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
172 
173 	flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
174 					       sizeof(u16), GFP_KERNEL);
175 	if (!flow_cfg->def_ent)
176 		return -ENOMEM;
177 
178 	mutex_lock(&pfvf->mbox.lock);
179 
180 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
181 	if (!req) {
182 		mutex_unlock(&pfvf->mbox.lock);
183 		return -ENOMEM;
184 	}
185 
186 	req->contig = false;
187 	req->count = count;
188 
189 	/* Send message to AF */
190 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
191 		mutex_unlock(&pfvf->mbox.lock);
192 		return -EINVAL;
193 	}
194 
195 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
196 	       (&pfvf->mbox.mbox, 0, &req->hdr);
197 
198 	if (rsp->count != req->count) {
199 		netdev_info(pfvf->netdev,
200 			    "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
201 		mutex_unlock(&pfvf->mbox.lock);
202 		devm_kfree(pfvf->dev, flow_cfg->def_ent);
203 		return 0;
204 	}
205 
206 	for (ent = 0; ent < rsp->count; ent++)
207 		flow_cfg->def_ent[ent] = rsp->entry_list[ent];
208 
209 	flow_cfg->vf_vlan_offset = 0;
210 	flow_cfg->unicast_offset = vf_vlan_max_flows;
211 	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
212 					OTX2_MAX_UNICAST_FLOWS;
213 	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
214 	pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
215 	pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
216 
217 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
218 	mutex_unlock(&pfvf->mbox.lock);
219 
220 	/* Allocate entries for Ntuple filters */
221 	count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
222 	if (count <= 0) {
223 		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
224 		return 0;
225 	}
226 
227 	pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
228 
229 	return 0;
230 }
231 
232 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
233 {
234 	struct otx2_flow_config *flow_cfg;
235 
236 	pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
237 				      sizeof(struct otx2_flow_config),
238 				      GFP_KERNEL);
239 	if (!pfvf->flow_cfg)
240 		return -ENOMEM;
241 
242 	flow_cfg = pfvf->flow_cfg;
243 	INIT_LIST_HEAD(&flow_cfg->flow_list);
244 	flow_cfg->max_flows = 0;
245 
246 	return 0;
247 }
248 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
249 
250 int otx2_mcam_flow_init(struct otx2_nic *pf)
251 {
252 	int err;
253 
254 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
255 				    GFP_KERNEL);
256 	if (!pf->flow_cfg)
257 		return -ENOMEM;
258 
259 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
260 
261 	/* Allocate bare minimum number of MCAM entries needed for
262 	 * unicast and ntuple filters.
263 	 */
264 	err = otx2_mcam_entry_init(pf);
265 	if (err)
266 		return err;
267 
268 	/* Check if MCAM entries are allocate or not */
269 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
270 		return 0;
271 
272 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
273 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
274 	if (!pf->mac_table)
275 		return -ENOMEM;
276 
277 	otx2_dmacflt_get_max_cnt(pf);
278 
279 	/* DMAC filters are not allocated */
280 	if (!pf->flow_cfg->dmacflt_max_flows)
281 		return 0;
282 
283 	pf->flow_cfg->bmap_to_dmacindex =
284 			devm_kzalloc(pf->dev, sizeof(u8) *
285 				     pf->flow_cfg->dmacflt_max_flows,
286 				     GFP_KERNEL);
287 
288 	if (!pf->flow_cfg->bmap_to_dmacindex)
289 		return -ENOMEM;
290 
291 	pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
292 
293 	return 0;
294 }
295 
296 void otx2_mcam_flow_del(struct otx2_nic *pf)
297 {
298 	otx2_destroy_mcam_flows(pf);
299 }
300 EXPORT_SYMBOL(otx2_mcam_flow_del);
301 
302 /*  On success adds mcam entry
303  *  On failure enable promisous mode
304  */
305 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
306 {
307 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
308 	struct npc_install_flow_req *req;
309 	int err, i;
310 
311 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
312 		return -ENOMEM;
313 
314 	/* dont have free mcam entries or uc list is greater than alloted */
315 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
316 		return -ENOMEM;
317 
318 	mutex_lock(&pf->mbox.lock);
319 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
320 	if (!req) {
321 		mutex_unlock(&pf->mbox.lock);
322 		return -ENOMEM;
323 	}
324 
325 	/* unicast offset starts with 32 0..31 for ntuple */
326 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
327 		if (pf->mac_table[i].inuse)
328 			continue;
329 		ether_addr_copy(pf->mac_table[i].addr, mac);
330 		pf->mac_table[i].inuse = true;
331 		pf->mac_table[i].mcam_entry =
332 			flow_cfg->def_ent[i + flow_cfg->unicast_offset];
333 		req->entry =  pf->mac_table[i].mcam_entry;
334 		break;
335 	}
336 
337 	ether_addr_copy(req->packet.dmac, mac);
338 	eth_broadcast_addr((u8 *)&req->mask.dmac);
339 	req->features = BIT_ULL(NPC_DMAC);
340 	req->channel = pf->hw.rx_chan_base;
341 	req->intf = NIX_INTF_RX;
342 	req->op = NIX_RX_ACTION_DEFAULT;
343 	req->set_cntr = 1;
344 
345 	err = otx2_sync_mbox_msg(&pf->mbox);
346 	mutex_unlock(&pf->mbox.lock);
347 
348 	return err;
349 }
350 
351 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
352 {
353 	struct otx2_nic *pf = netdev_priv(netdev);
354 
355 	if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
356 			  pf->flow_cfg->dmacflt_max_flows))
357 		netdev_warn(netdev,
358 			    "Add %pM to CGX/RPM DMAC filters list as well\n",
359 			    mac);
360 
361 	return otx2_do_add_macfilter(pf, mac);
362 }
363 
364 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
365 				       int *mcam_entry)
366 {
367 	int i;
368 
369 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
370 		if (!pf->mac_table[i].inuse)
371 			continue;
372 
373 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
374 			*mcam_entry = pf->mac_table[i].mcam_entry;
375 			pf->mac_table[i].inuse = false;
376 			return true;
377 		}
378 	}
379 	return false;
380 }
381 
382 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
383 {
384 	struct otx2_nic *pf = netdev_priv(netdev);
385 	struct npc_delete_flow_req *req;
386 	int err, mcam_entry;
387 
388 	/* check does mcam entry exists for given mac */
389 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
390 		return 0;
391 
392 	mutex_lock(&pf->mbox.lock);
393 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
394 	if (!req) {
395 		mutex_unlock(&pf->mbox.lock);
396 		return -ENOMEM;
397 	}
398 	req->entry = mcam_entry;
399 	/* Send message to AF */
400 	err = otx2_sync_mbox_msg(&pf->mbox);
401 	mutex_unlock(&pf->mbox.lock);
402 
403 	return err;
404 }
405 
406 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
407 {
408 	struct otx2_flow *iter;
409 
410 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
411 		if (iter->location == location)
412 			return iter;
413 	}
414 
415 	return NULL;
416 }
417 
418 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
419 {
420 	struct list_head *head = &pfvf->flow_cfg->flow_list;
421 	struct otx2_flow *iter;
422 
423 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
424 		if (iter->location > flow->location)
425 			break;
426 		head = &iter->list;
427 	}
428 
429 	list_add(&flow->list, head);
430 }
431 
432 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
433 {
434 	if (!flow_cfg)
435 		return 0;
436 
437 	if (flow_cfg->nr_flows == flow_cfg->max_flows ||
438 	    bitmap_weight(&flow_cfg->dmacflt_bmap,
439 			  flow_cfg->dmacflt_max_flows))
440 		return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
441 	else
442 		return flow_cfg->max_flows;
443 }
444 EXPORT_SYMBOL(otx2_get_maxflows);
445 
446 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
447 		  u32 location)
448 {
449 	struct otx2_flow *iter;
450 
451 	if (location >= otx2_get_maxflows(pfvf->flow_cfg))
452 		return -EINVAL;
453 
454 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
455 		if (iter->location == location) {
456 			nfc->fs = iter->flow_spec;
457 			nfc->rss_context = iter->rss_ctx_id;
458 			return 0;
459 		}
460 	}
461 
462 	return -ENOENT;
463 }
464 
465 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
466 		       u32 *rule_locs)
467 {
468 	u32 rule_cnt = nfc->rule_cnt;
469 	u32 location = 0;
470 	int idx = 0;
471 	int err = 0;
472 
473 	nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
474 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
475 		err = otx2_get_flow(pfvf, nfc, location);
476 		if (!err)
477 			rule_locs[idx++] = location;
478 		location++;
479 	}
480 	nfc->rule_cnt = rule_cnt;
481 
482 	return err;
483 }
484 
485 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
486 				  struct npc_install_flow_req *req,
487 				  u32 flow_type)
488 {
489 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
490 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
491 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
492 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
493 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
494 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
495 	struct flow_msg *pmask = &req->mask;
496 	struct flow_msg *pkt = &req->packet;
497 
498 	switch (flow_type) {
499 	case IP_USER_FLOW:
500 		if (ipv4_usr_mask->ip4src) {
501 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
502 			       sizeof(pkt->ip4src));
503 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
504 			       sizeof(pmask->ip4src));
505 			req->features |= BIT_ULL(NPC_SIP_IPV4);
506 		}
507 		if (ipv4_usr_mask->ip4dst) {
508 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
509 			       sizeof(pkt->ip4dst));
510 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
511 			       sizeof(pmask->ip4dst));
512 			req->features |= BIT_ULL(NPC_DIP_IPV4);
513 		}
514 		if (ipv4_usr_mask->tos) {
515 			pkt->tos = ipv4_usr_hdr->tos;
516 			pmask->tos = ipv4_usr_mask->tos;
517 			req->features |= BIT_ULL(NPC_TOS);
518 		}
519 		if (ipv4_usr_mask->proto) {
520 			switch (ipv4_usr_hdr->proto) {
521 			case IPPROTO_ICMP:
522 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
523 				break;
524 			case IPPROTO_TCP:
525 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
526 				break;
527 			case IPPROTO_UDP:
528 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
529 				break;
530 			case IPPROTO_SCTP:
531 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
532 				break;
533 			case IPPROTO_AH:
534 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
535 				break;
536 			case IPPROTO_ESP:
537 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
538 				break;
539 			default:
540 				return -EOPNOTSUPP;
541 			}
542 		}
543 		pkt->etype = cpu_to_be16(ETH_P_IP);
544 		pmask->etype = cpu_to_be16(0xFFFF);
545 		req->features |= BIT_ULL(NPC_ETYPE);
546 		break;
547 	case TCP_V4_FLOW:
548 	case UDP_V4_FLOW:
549 	case SCTP_V4_FLOW:
550 		pkt->etype = cpu_to_be16(ETH_P_IP);
551 		pmask->etype = cpu_to_be16(0xFFFF);
552 		req->features |= BIT_ULL(NPC_ETYPE);
553 		if (ipv4_l4_mask->ip4src) {
554 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
555 			       sizeof(pkt->ip4src));
556 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
557 			       sizeof(pmask->ip4src));
558 			req->features |= BIT_ULL(NPC_SIP_IPV4);
559 		}
560 		if (ipv4_l4_mask->ip4dst) {
561 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
562 			       sizeof(pkt->ip4dst));
563 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
564 			       sizeof(pmask->ip4dst));
565 			req->features |= BIT_ULL(NPC_DIP_IPV4);
566 		}
567 		if (ipv4_l4_mask->tos) {
568 			pkt->tos = ipv4_l4_hdr->tos;
569 			pmask->tos = ipv4_l4_mask->tos;
570 			req->features |= BIT_ULL(NPC_TOS);
571 		}
572 		if (ipv4_l4_mask->psrc) {
573 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
574 			       sizeof(pkt->sport));
575 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
576 			       sizeof(pmask->sport));
577 			if (flow_type == UDP_V4_FLOW)
578 				req->features |= BIT_ULL(NPC_SPORT_UDP);
579 			else if (flow_type == TCP_V4_FLOW)
580 				req->features |= BIT_ULL(NPC_SPORT_TCP);
581 			else
582 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
583 		}
584 		if (ipv4_l4_mask->pdst) {
585 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
586 			       sizeof(pkt->dport));
587 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
588 			       sizeof(pmask->dport));
589 			if (flow_type == UDP_V4_FLOW)
590 				req->features |= BIT_ULL(NPC_DPORT_UDP);
591 			else if (flow_type == TCP_V4_FLOW)
592 				req->features |= BIT_ULL(NPC_DPORT_TCP);
593 			else
594 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
595 		}
596 		if (flow_type == UDP_V4_FLOW)
597 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
598 		else if (flow_type == TCP_V4_FLOW)
599 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
600 		else
601 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
602 		break;
603 	case AH_V4_FLOW:
604 	case ESP_V4_FLOW:
605 		pkt->etype = cpu_to_be16(ETH_P_IP);
606 		pmask->etype = cpu_to_be16(0xFFFF);
607 		req->features |= BIT_ULL(NPC_ETYPE);
608 		if (ah_esp_mask->ip4src) {
609 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
610 			       sizeof(pkt->ip4src));
611 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
612 			       sizeof(pmask->ip4src));
613 			req->features |= BIT_ULL(NPC_SIP_IPV4);
614 		}
615 		if (ah_esp_mask->ip4dst) {
616 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
617 			       sizeof(pkt->ip4dst));
618 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
619 			       sizeof(pmask->ip4dst));
620 			req->features |= BIT_ULL(NPC_DIP_IPV4);
621 		}
622 		if (ah_esp_mask->tos) {
623 			pkt->tos = ah_esp_hdr->tos;
624 			pmask->tos = ah_esp_mask->tos;
625 			req->features |= BIT_ULL(NPC_TOS);
626 		}
627 
628 		/* NPC profile doesn't extract AH/ESP header fields */
629 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
630 			return -EOPNOTSUPP;
631 
632 		if (flow_type == AH_V4_FLOW)
633 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
634 		else
635 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
636 		break;
637 	default:
638 		break;
639 	}
640 
641 	return 0;
642 }
643 
644 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
645 				  struct npc_install_flow_req *req,
646 				  u32 flow_type)
647 {
648 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
649 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
650 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
651 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
652 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
653 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
654 	struct flow_msg *pmask = &req->mask;
655 	struct flow_msg *pkt = &req->packet;
656 
657 	switch (flow_type) {
658 	case IPV6_USER_FLOW:
659 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
660 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
661 			       sizeof(pkt->ip6src));
662 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
663 			       sizeof(pmask->ip6src));
664 			req->features |= BIT_ULL(NPC_SIP_IPV6);
665 		}
666 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
667 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
668 			       sizeof(pkt->ip6dst));
669 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
670 			       sizeof(pmask->ip6dst));
671 			req->features |= BIT_ULL(NPC_DIP_IPV6);
672 		}
673 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
674 		pmask->etype = cpu_to_be16(0xFFFF);
675 		req->features |= BIT_ULL(NPC_ETYPE);
676 		break;
677 	case TCP_V6_FLOW:
678 	case UDP_V6_FLOW:
679 	case SCTP_V6_FLOW:
680 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
681 		pmask->etype = cpu_to_be16(0xFFFF);
682 		req->features |= BIT_ULL(NPC_ETYPE);
683 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
684 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
685 			       sizeof(pkt->ip6src));
686 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
687 			       sizeof(pmask->ip6src));
688 			req->features |= BIT_ULL(NPC_SIP_IPV6);
689 		}
690 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
691 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
692 			       sizeof(pkt->ip6dst));
693 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
694 			       sizeof(pmask->ip6dst));
695 			req->features |= BIT_ULL(NPC_DIP_IPV6);
696 		}
697 		if (ipv6_l4_mask->psrc) {
698 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
699 			       sizeof(pkt->sport));
700 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
701 			       sizeof(pmask->sport));
702 			if (flow_type == UDP_V6_FLOW)
703 				req->features |= BIT_ULL(NPC_SPORT_UDP);
704 			else if (flow_type == TCP_V6_FLOW)
705 				req->features |= BIT_ULL(NPC_SPORT_TCP);
706 			else
707 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
708 		}
709 		if (ipv6_l4_mask->pdst) {
710 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
711 			       sizeof(pkt->dport));
712 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
713 			       sizeof(pmask->dport));
714 			if (flow_type == UDP_V6_FLOW)
715 				req->features |= BIT_ULL(NPC_DPORT_UDP);
716 			else if (flow_type == TCP_V6_FLOW)
717 				req->features |= BIT_ULL(NPC_DPORT_TCP);
718 			else
719 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
720 		}
721 		if (flow_type == UDP_V6_FLOW)
722 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
723 		else if (flow_type == TCP_V6_FLOW)
724 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
725 		else
726 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
727 		break;
728 	case AH_V6_FLOW:
729 	case ESP_V6_FLOW:
730 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
731 		pmask->etype = cpu_to_be16(0xFFFF);
732 		req->features |= BIT_ULL(NPC_ETYPE);
733 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
734 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
735 			       sizeof(pkt->ip6src));
736 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
737 			       sizeof(pmask->ip6src));
738 			req->features |= BIT_ULL(NPC_SIP_IPV6);
739 		}
740 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
741 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
742 			       sizeof(pkt->ip6dst));
743 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
744 			       sizeof(pmask->ip6dst));
745 			req->features |= BIT_ULL(NPC_DIP_IPV6);
746 		}
747 
748 		/* NPC profile doesn't extract AH/ESP header fields */
749 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
750 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
751 			return -EOPNOTSUPP;
752 
753 		if (flow_type == AH_V6_FLOW)
754 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
755 		else
756 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
757 		break;
758 	default:
759 		break;
760 	}
761 
762 	return 0;
763 }
764 
765 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
766 			      struct npc_install_flow_req *req)
767 {
768 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
769 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
770 	struct flow_msg *pmask = &req->mask;
771 	struct flow_msg *pkt = &req->packet;
772 	u32 flow_type;
773 	int ret;
774 
775 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
776 	switch (flow_type) {
777 	/* bits not set in mask are don't care */
778 	case ETHER_FLOW:
779 		if (!is_zero_ether_addr(eth_mask->h_source)) {
780 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
781 			ether_addr_copy(pmask->smac, eth_mask->h_source);
782 			req->features |= BIT_ULL(NPC_SMAC);
783 		}
784 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
785 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
786 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
787 			req->features |= BIT_ULL(NPC_DMAC);
788 		}
789 		if (eth_hdr->h_proto) {
790 			memcpy(&pkt->etype, &eth_hdr->h_proto,
791 			       sizeof(pkt->etype));
792 			memcpy(&pmask->etype, &eth_mask->h_proto,
793 			       sizeof(pmask->etype));
794 			req->features |= BIT_ULL(NPC_ETYPE);
795 		}
796 		break;
797 	case IP_USER_FLOW:
798 	case TCP_V4_FLOW:
799 	case UDP_V4_FLOW:
800 	case SCTP_V4_FLOW:
801 	case AH_V4_FLOW:
802 	case ESP_V4_FLOW:
803 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
804 		if (ret)
805 			return ret;
806 		break;
807 	case IPV6_USER_FLOW:
808 	case TCP_V6_FLOW:
809 	case UDP_V6_FLOW:
810 	case SCTP_V6_FLOW:
811 	case AH_V6_FLOW:
812 	case ESP_V6_FLOW:
813 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
814 		if (ret)
815 			return ret;
816 		break;
817 	default:
818 		return -EOPNOTSUPP;
819 	}
820 	if (fsp->flow_type & FLOW_EXT) {
821 		if (fsp->m_ext.vlan_etype)
822 			return -EINVAL;
823 		if (fsp->m_ext.vlan_tci) {
824 			if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
825 				return -EINVAL;
826 			if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
827 				return -EINVAL;
828 
829 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
830 			       sizeof(pkt->vlan_tci));
831 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
832 			       sizeof(pmask->vlan_tci));
833 			req->features |= BIT_ULL(NPC_OUTER_VID);
834 		}
835 
836 		/* Not Drop/Direct to queue but use action in default entry */
837 		if (fsp->m_ext.data[1] &&
838 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
839 			req->op = NIX_RX_ACTION_DEFAULT;
840 	}
841 
842 	if (fsp->flow_type & FLOW_MAC_EXT &&
843 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
844 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
845 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
846 		req->features |= BIT_ULL(NPC_DMAC);
847 	}
848 
849 	if (!req->features)
850 		return -EOPNOTSUPP;
851 
852 	return 0;
853 }
854 
855 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
856 					struct ethtool_rx_flow_spec *fsp)
857 {
858 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
859 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
860 	u64 ring_cookie = fsp->ring_cookie;
861 	u32 flow_type;
862 
863 	if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
864 		return false;
865 
866 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
867 
868 	/* CGX/RPM block dmac filtering configured for white listing
869 	 * check for action other than DROP
870 	 */
871 	if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
872 	    !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
873 		if (is_zero_ether_addr(eth_mask->h_dest) &&
874 		    is_valid_ether_addr(eth_hdr->h_dest))
875 			return true;
876 	}
877 
878 	return false;
879 }
880 
881 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
882 {
883 	u64 ring_cookie = flow->flow_spec.ring_cookie;
884 	struct npc_install_flow_req *req;
885 	int err, vf = 0;
886 
887 	mutex_lock(&pfvf->mbox.lock);
888 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
889 	if (!req) {
890 		mutex_unlock(&pfvf->mbox.lock);
891 		return -ENOMEM;
892 	}
893 
894 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
895 	if (err) {
896 		/* free the allocated msg above */
897 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
898 		mutex_unlock(&pfvf->mbox.lock);
899 		return err;
900 	}
901 
902 	req->entry = flow->entry;
903 	req->intf = NIX_INTF_RX;
904 	req->set_cntr = 1;
905 	req->channel = pfvf->hw.rx_chan_base;
906 	if (ring_cookie == RX_CLS_FLOW_DISC) {
907 		req->op = NIX_RX_ACTIONOP_DROP;
908 	} else {
909 		/* change to unicast only if action of default entry is not
910 		 * requested by user
911 		 */
912 		if (flow->flow_spec.flow_type & FLOW_RSS) {
913 			req->op = NIX_RX_ACTIONOP_RSS;
914 			req->index = flow->rss_ctx_id;
915 		} else {
916 			req->op = NIX_RX_ACTIONOP_UCAST;
917 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
918 		}
919 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
920 		if (vf > pci_num_vf(pfvf->pdev)) {
921 			mutex_unlock(&pfvf->mbox.lock);
922 			return -EINVAL;
923 		}
924 	}
925 
926 	/* ethtool ring_cookie has (VF + 1) for VF */
927 	if (vf) {
928 		req->vf = vf;
929 		flow->is_vf = true;
930 		flow->vf = vf;
931 	}
932 
933 	/* Send message to AF */
934 	err = otx2_sync_mbox_msg(&pfvf->mbox);
935 	mutex_unlock(&pfvf->mbox.lock);
936 	return err;
937 }
938 
939 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
940 				    struct otx2_flow *flow)
941 {
942 	struct otx2_flow *pf_mac;
943 	struct ethhdr *eth_hdr;
944 
945 	pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
946 	if (!pf_mac)
947 		return -ENOMEM;
948 
949 	pf_mac->entry = 0;
950 	pf_mac->dmac_filter = true;
951 	pf_mac->location = pfvf->flow_cfg->max_flows;
952 	memcpy(&pf_mac->flow_spec, &flow->flow_spec,
953 	       sizeof(struct ethtool_rx_flow_spec));
954 	pf_mac->flow_spec.location = pf_mac->location;
955 
956 	/* Copy PF mac address */
957 	eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
958 	ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
959 
960 	/* Install DMAC filter with PF mac address */
961 	otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
962 
963 	otx2_add_flow_to_list(pfvf, pf_mac);
964 	pfvf->flow_cfg->nr_flows++;
965 	set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
966 
967 	return 0;
968 }
969 
970 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
971 {
972 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
973 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
974 	struct otx2_flow *flow;
975 	struct ethhdr *eth_hdr;
976 	bool new = false;
977 	int err = 0;
978 	u32 ring;
979 
980 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
981 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
982 		return -ENOMEM;
983 
984 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
985 		return -EINVAL;
986 
987 	if (fsp->location >= otx2_get_maxflows(flow_cfg))
988 		return -EINVAL;
989 
990 	flow = otx2_find_flow(pfvf, fsp->location);
991 	if (!flow) {
992 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
993 		if (!flow)
994 			return -ENOMEM;
995 		flow->location = fsp->location;
996 		new = true;
997 	}
998 	/* struct copy */
999 	flow->flow_spec = *fsp;
1000 
1001 	if (fsp->flow_type & FLOW_RSS)
1002 		flow->rss_ctx_id = nfc->rss_context;
1003 
1004 	if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
1005 		eth_hdr = &flow->flow_spec.h_u.ether_spec;
1006 
1007 		/* Sync dmac filter table with updated fields */
1008 		if (flow->dmac_filter)
1009 			return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1010 						   flow->entry);
1011 
1012 		if (bitmap_full(&flow_cfg->dmacflt_bmap,
1013 				flow_cfg->dmacflt_max_flows)) {
1014 			netdev_warn(pfvf->netdev,
1015 				    "Can't insert the rule %d as max allowed dmac filters are %d\n",
1016 				    flow->location +
1017 				    flow_cfg->dmacflt_max_flows,
1018 				    flow_cfg->dmacflt_max_flows);
1019 			err = -EINVAL;
1020 			if (new)
1021 				kfree(flow);
1022 			return err;
1023 		}
1024 
1025 		/* Install PF mac address to DMAC filter list */
1026 		if (!test_bit(0, &flow_cfg->dmacflt_bmap))
1027 			otx2_add_flow_with_pfmac(pfvf, flow);
1028 
1029 		flow->dmac_filter = true;
1030 		flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
1031 						  flow_cfg->dmacflt_max_flows);
1032 		fsp->location = flow_cfg->max_flows + flow->entry;
1033 		flow->flow_spec.location = fsp->location;
1034 		flow->location = fsp->location;
1035 
1036 		set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1037 		otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1038 
1039 	} else {
1040 		if (flow->location >= pfvf->flow_cfg->max_flows) {
1041 			netdev_warn(pfvf->netdev,
1042 				    "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1043 				    flow->location,
1044 				    flow_cfg->max_flows - 1);
1045 			err = -EINVAL;
1046 		} else {
1047 			flow->entry = flow_cfg->flow_ent[flow->location];
1048 			err = otx2_add_flow_msg(pfvf, flow);
1049 		}
1050 	}
1051 
1052 	if (err) {
1053 		if (err == MBOX_MSG_INVALID)
1054 			err = -EINVAL;
1055 		if (new)
1056 			kfree(flow);
1057 		return err;
1058 	}
1059 
1060 	/* add the new flow installed to list */
1061 	if (new) {
1062 		otx2_add_flow_to_list(pfvf, flow);
1063 		flow_cfg->nr_flows++;
1064 	}
1065 
1066 	return 0;
1067 }
1068 
1069 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1070 {
1071 	struct npc_delete_flow_req *req;
1072 	int err;
1073 
1074 	mutex_lock(&pfvf->mbox.lock);
1075 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1076 	if (!req) {
1077 		mutex_unlock(&pfvf->mbox.lock);
1078 		return -ENOMEM;
1079 	}
1080 
1081 	req->entry = entry;
1082 	if (all)
1083 		req->all = 1;
1084 
1085 	/* Send message to AF */
1086 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1087 	mutex_unlock(&pfvf->mbox.lock);
1088 	return err;
1089 }
1090 
1091 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1092 {
1093 	struct otx2_flow *iter;
1094 	struct ethhdr *eth_hdr;
1095 	bool found = false;
1096 
1097 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1098 		if (iter->dmac_filter && iter->entry == 0) {
1099 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1100 			if (req == DMAC_ADDR_DEL) {
1101 				otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1102 						    0);
1103 				clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
1104 				found = true;
1105 			} else {
1106 				ether_addr_copy(eth_hdr->h_dest,
1107 						pfvf->netdev->dev_addr);
1108 				otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1109 			}
1110 			break;
1111 		}
1112 	}
1113 
1114 	if (found) {
1115 		list_del(&iter->list);
1116 		kfree(iter);
1117 		pfvf->flow_cfg->nr_flows--;
1118 	}
1119 }
1120 
1121 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1122 {
1123 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1124 	struct otx2_flow *flow;
1125 	int err;
1126 
1127 	if (location >= otx2_get_maxflows(flow_cfg))
1128 		return -EINVAL;
1129 
1130 	flow = otx2_find_flow(pfvf, location);
1131 	if (!flow)
1132 		return -ENOENT;
1133 
1134 	if (flow->dmac_filter) {
1135 		struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1136 
1137 		/* user not allowed to remove dmac filter with interface mac */
1138 		if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1139 			return -EPERM;
1140 
1141 		err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1142 					  flow->entry);
1143 		clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1144 		/* If all dmac filters are removed delete macfilter with
1145 		 * interface mac address and configure CGX/RPM block in
1146 		 * promiscuous mode
1147 		 */
1148 		if (bitmap_weight(&flow_cfg->dmacflt_bmap,
1149 				  flow_cfg->dmacflt_max_flows) == 1)
1150 			otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1151 	} else {
1152 		err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1153 	}
1154 
1155 	if (err)
1156 		return err;
1157 
1158 	list_del(&flow->list);
1159 	kfree(flow);
1160 	flow_cfg->nr_flows--;
1161 
1162 	return 0;
1163 }
1164 
1165 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1166 {
1167 	struct otx2_flow *flow, *tmp;
1168 	int err;
1169 
1170 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1171 		if (flow->rss_ctx_id != ctx_id)
1172 			continue;
1173 		err = otx2_remove_flow(pfvf, flow->location);
1174 		if (err)
1175 			netdev_warn(pfvf->netdev,
1176 				    "Can't delete the rule %d associated with this rss group err:%d",
1177 				    flow->location, err);
1178 	}
1179 }
1180 
1181 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1182 {
1183 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1184 	struct npc_delete_flow_req *req;
1185 	struct otx2_flow *iter, *tmp;
1186 	int err;
1187 
1188 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1189 		return 0;
1190 
1191 	mutex_lock(&pfvf->mbox.lock);
1192 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1193 	if (!req) {
1194 		mutex_unlock(&pfvf->mbox.lock);
1195 		return -ENOMEM;
1196 	}
1197 
1198 	req->start = flow_cfg->flow_ent[0];
1199 	req->end   = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
1200 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1201 	mutex_unlock(&pfvf->mbox.lock);
1202 
1203 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1204 		list_del(&iter->list);
1205 		kfree(iter);
1206 		flow_cfg->nr_flows--;
1207 	}
1208 	return err;
1209 }
1210 
1211 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1212 {
1213 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1214 	struct npc_mcam_free_entry_req *req;
1215 	struct otx2_flow *iter, *tmp;
1216 	int err;
1217 
1218 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1219 		return 0;
1220 
1221 	/* remove all flows */
1222 	err = otx2_remove_flow_msg(pfvf, 0, true);
1223 	if (err)
1224 		return err;
1225 
1226 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1227 		list_del(&iter->list);
1228 		kfree(iter);
1229 		flow_cfg->nr_flows--;
1230 	}
1231 
1232 	mutex_lock(&pfvf->mbox.lock);
1233 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1234 	if (!req) {
1235 		mutex_unlock(&pfvf->mbox.lock);
1236 		return -ENOMEM;
1237 	}
1238 
1239 	req->all = 1;
1240 	/* Send message to AF to free MCAM entries */
1241 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1242 	if (err) {
1243 		mutex_unlock(&pfvf->mbox.lock);
1244 		return err;
1245 	}
1246 
1247 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1248 	mutex_unlock(&pfvf->mbox.lock);
1249 
1250 	return 0;
1251 }
1252 
1253 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1254 {
1255 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1256 	struct npc_install_flow_req *req;
1257 	int err;
1258 
1259 	mutex_lock(&pfvf->mbox.lock);
1260 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1261 	if (!req) {
1262 		mutex_unlock(&pfvf->mbox.lock);
1263 		return -ENOMEM;
1264 	}
1265 
1266 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1267 	req->intf = NIX_INTF_RX;
1268 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1269 	eth_broadcast_addr((u8 *)&req->mask.dmac);
1270 	req->channel = pfvf->hw.rx_chan_base;
1271 	req->op = NIX_RX_ACTION_DEFAULT;
1272 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1273 	req->vtag0_valid = true;
1274 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1275 
1276 	/* Send message to AF */
1277 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1278 	mutex_unlock(&pfvf->mbox.lock);
1279 	return err;
1280 }
1281 
1282 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1283 {
1284 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1285 	struct npc_delete_flow_req *req;
1286 	int err;
1287 
1288 	mutex_lock(&pfvf->mbox.lock);
1289 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1290 	if (!req) {
1291 		mutex_unlock(&pfvf->mbox.lock);
1292 		return -ENOMEM;
1293 	}
1294 
1295 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1296 	/* Send message to AF */
1297 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1298 	mutex_unlock(&pfvf->mbox.lock);
1299 	return err;
1300 }
1301 
1302 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1303 {
1304 	struct nix_vtag_config *req;
1305 	struct mbox_msghdr *rsp_hdr;
1306 	int err;
1307 
1308 	/* Dont have enough mcam entries */
1309 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1310 		return -ENOMEM;
1311 
1312 	if (enable) {
1313 		err = otx2_install_rxvlan_offload_flow(pf);
1314 		if (err)
1315 			return err;
1316 	} else {
1317 		err = otx2_delete_rxvlan_offload_flow(pf);
1318 		if (err)
1319 			return err;
1320 	}
1321 
1322 	mutex_lock(&pf->mbox.lock);
1323 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1324 	if (!req) {
1325 		mutex_unlock(&pf->mbox.lock);
1326 		return -ENOMEM;
1327 	}
1328 
1329 	/* config strip, capture and size */
1330 	req->vtag_size = VTAGSIZE_T4;
1331 	req->cfg_type = 1; /* rx vlan cfg */
1332 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1333 	req->rx.strip_vtag = enable;
1334 	req->rx.capture_vtag = enable;
1335 
1336 	err = otx2_sync_mbox_msg(&pf->mbox);
1337 	if (err) {
1338 		mutex_unlock(&pf->mbox.lock);
1339 		return err;
1340 	}
1341 
1342 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1343 	if (IS_ERR(rsp_hdr)) {
1344 		mutex_unlock(&pf->mbox.lock);
1345 		return PTR_ERR(rsp_hdr);
1346 	}
1347 
1348 	mutex_unlock(&pf->mbox.lock);
1349 	return rsp_hdr->rc;
1350 }
1351 
1352 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1353 {
1354 	struct otx2_flow *iter;
1355 	struct ethhdr *eth_hdr;
1356 
1357 	list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1358 		if (iter->dmac_filter) {
1359 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1360 			otx2_dmacflt_add(pf, eth_hdr->h_dest,
1361 					 iter->entry);
1362 		}
1363 	}
1364 }
1365 
1366 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1367 {
1368 	otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1369 }
1370