1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <net/ipv6.h>
9 #include <linux/sort.h>
10 
11 #include "otx2_common.h"
12 
13 #define OTX2_DEFAULT_ACTION	0x1
14 
15 static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
16 
17 struct otx2_flow {
18 	struct ethtool_rx_flow_spec flow_spec;
19 	struct list_head list;
20 	u32 location;
21 	u16 entry;
22 	bool is_vf;
23 	u8 rss_ctx_id;
24 	int vf;
25 	bool dmac_filter;
26 };
27 
28 enum dmac_req {
29 	DMAC_ADDR_UPDATE,
30 	DMAC_ADDR_DEL
31 };
32 
33 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
34 {
35 	devm_kfree(pfvf->dev, flow_cfg->flow_ent);
36 	flow_cfg->flow_ent = NULL;
37 	flow_cfg->max_flows = 0;
38 }
39 
40 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
41 {
42 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
43 	struct npc_mcam_free_entry_req *req;
44 	int ent, err;
45 
46 	if (!flow_cfg->max_flows)
47 		return 0;
48 
49 	mutex_lock(&pfvf->mbox.lock);
50 	for (ent = 0; ent < flow_cfg->max_flows; ent++) {
51 		req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
52 		if (!req)
53 			break;
54 
55 		req->entry = flow_cfg->flow_ent[ent];
56 
57 		/* Send message to AF to free MCAM entries */
58 		err = otx2_sync_mbox_msg(&pfvf->mbox);
59 		if (err)
60 			break;
61 	}
62 	mutex_unlock(&pfvf->mbox.lock);
63 	otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
64 	return 0;
65 }
66 
67 static int mcam_entry_cmp(const void *a, const void *b)
68 {
69 	return *(u16 *)a - *(u16 *)b;
70 }
71 
72 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
73 {
74 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
75 	struct npc_mcam_alloc_entry_req *req;
76 	struct npc_mcam_alloc_entry_rsp *rsp;
77 	int ent, allocated = 0;
78 
79 	/* Free current ones and allocate new ones with requested count */
80 	otx2_free_ntuple_mcam_entries(pfvf);
81 
82 	if (!count)
83 		return 0;
84 
85 	flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
86 						sizeof(u16), GFP_KERNEL);
87 	if (!flow_cfg->flow_ent) {
88 		netdev_err(pfvf->netdev,
89 			   "%s: Unable to allocate memory for flow entries\n",
90 			    __func__);
91 		return -ENOMEM;
92 	}
93 
94 	mutex_lock(&pfvf->mbox.lock);
95 
96 	/* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
97 	 * can only be allocated.
98 	 */
99 	while (allocated < count) {
100 		req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
101 		if (!req)
102 			goto exit;
103 
104 		req->contig = false;
105 		req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
106 				NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
107 
108 		/* Allocate higher priority entries for PFs, so that VF's entries
109 		 * will be on top of PF.
110 		 */
111 		if (!is_otx2_vf(pfvf->pcifunc)) {
112 			req->priority = NPC_MCAM_HIGHER_PRIO;
113 			req->ref_entry = flow_cfg->def_ent[0];
114 		}
115 
116 		/* Send message to AF */
117 		if (otx2_sync_mbox_msg(&pfvf->mbox))
118 			goto exit;
119 
120 		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
121 			(&pfvf->mbox.mbox, 0, &req->hdr);
122 
123 		for (ent = 0; ent < rsp->count; ent++)
124 			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
125 
126 		allocated += rsp->count;
127 
128 		/* If this request is not fulfilled, no need to send
129 		 * further requests.
130 		 */
131 		if (rsp->count != req->count)
132 			break;
133 	}
134 
135 	/* Multiple MCAM entry alloc requests could result in non-sequential
136 	 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
137 	 * otherwise user installed ntuple filter index and MCAM entry index will
138 	 * not be in sync.
139 	 */
140 	if (allocated)
141 		sort(&flow_cfg->flow_ent[0], allocated,
142 		     sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
143 
144 exit:
145 	mutex_unlock(&pfvf->mbox.lock);
146 
147 	flow_cfg->max_flows = allocated;
148 
149 	if (allocated) {
150 		pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
151 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
152 	}
153 
154 	if (allocated != count)
155 		netdev_info(pfvf->netdev,
156 			    "Unable to allocate %d MCAM entries, got only %d\n",
157 			    count, allocated);
158 	return allocated;
159 }
160 EXPORT_SYMBOL(otx2_alloc_mcam_entries);
161 
162 static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
163 {
164 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
165 	struct npc_mcam_alloc_entry_req *req;
166 	struct npc_mcam_alloc_entry_rsp *rsp;
167 	int vf_vlan_max_flows;
168 	int ent, count;
169 
170 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
171 	count = OTX2_MAX_UNICAST_FLOWS +
172 			OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
173 
174 	flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
175 					       sizeof(u16), GFP_KERNEL);
176 	if (!flow_cfg->def_ent)
177 		return -ENOMEM;
178 
179 	mutex_lock(&pfvf->mbox.lock);
180 
181 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
182 	if (!req) {
183 		mutex_unlock(&pfvf->mbox.lock);
184 		return -ENOMEM;
185 	}
186 
187 	req->contig = false;
188 	req->count = count;
189 
190 	/* Send message to AF */
191 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
192 		mutex_unlock(&pfvf->mbox.lock);
193 		return -EINVAL;
194 	}
195 
196 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
197 	       (&pfvf->mbox.mbox, 0, &req->hdr);
198 
199 	if (rsp->count != req->count) {
200 		netdev_info(pfvf->netdev,
201 			    "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
202 		mutex_unlock(&pfvf->mbox.lock);
203 		devm_kfree(pfvf->dev, flow_cfg->def_ent);
204 		return 0;
205 	}
206 
207 	for (ent = 0; ent < rsp->count; ent++)
208 		flow_cfg->def_ent[ent] = rsp->entry_list[ent];
209 
210 	flow_cfg->vf_vlan_offset = 0;
211 	flow_cfg->unicast_offset = vf_vlan_max_flows;
212 	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
213 					OTX2_MAX_UNICAST_FLOWS;
214 	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
215 	pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
216 	pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
217 
218 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
219 	mutex_unlock(&pfvf->mbox.lock);
220 
221 	/* Allocate entries for Ntuple filters */
222 	count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
223 	if (count <= 0) {
224 		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
225 		return 0;
226 	}
227 
228 	pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
229 
230 	return 0;
231 }
232 
233 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
234 {
235 	struct otx2_flow_config *flow_cfg;
236 
237 	pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
238 				      sizeof(struct otx2_flow_config),
239 				      GFP_KERNEL);
240 	if (!pfvf->flow_cfg)
241 		return -ENOMEM;
242 
243 	flow_cfg = pfvf->flow_cfg;
244 	INIT_LIST_HEAD(&flow_cfg->flow_list);
245 	flow_cfg->max_flows = 0;
246 
247 	return 0;
248 }
249 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
250 
251 int otx2_mcam_flow_init(struct otx2_nic *pf)
252 {
253 	int err;
254 
255 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
256 				    GFP_KERNEL);
257 	if (!pf->flow_cfg)
258 		return -ENOMEM;
259 
260 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
261 
262 	/* Allocate bare minimum number of MCAM entries needed for
263 	 * unicast and ntuple filters.
264 	 */
265 	err = otx2_mcam_entry_init(pf);
266 	if (err)
267 		return err;
268 
269 	/* Check if MCAM entries are allocate or not */
270 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
271 		return 0;
272 
273 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
274 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
275 	if (!pf->mac_table)
276 		return -ENOMEM;
277 
278 	otx2_dmacflt_get_max_cnt(pf);
279 
280 	/* DMAC filters are not allocated */
281 	if (!pf->flow_cfg->dmacflt_max_flows)
282 		return 0;
283 
284 	pf->flow_cfg->bmap_to_dmacindex =
285 			devm_kzalloc(pf->dev, sizeof(u8) *
286 				     pf->flow_cfg->dmacflt_max_flows,
287 				     GFP_KERNEL);
288 
289 	if (!pf->flow_cfg->bmap_to_dmacindex)
290 		return -ENOMEM;
291 
292 	pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
293 
294 	return 0;
295 }
296 
297 void otx2_mcam_flow_del(struct otx2_nic *pf)
298 {
299 	otx2_destroy_mcam_flows(pf);
300 }
301 EXPORT_SYMBOL(otx2_mcam_flow_del);
302 
303 /*  On success adds mcam entry
304  *  On failure enable promisous mode
305  */
306 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
307 {
308 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
309 	struct npc_install_flow_req *req;
310 	int err, i;
311 
312 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
313 		return -ENOMEM;
314 
315 	/* dont have free mcam entries or uc list is greater than alloted */
316 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
317 		return -ENOMEM;
318 
319 	mutex_lock(&pf->mbox.lock);
320 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
321 	if (!req) {
322 		mutex_unlock(&pf->mbox.lock);
323 		return -ENOMEM;
324 	}
325 
326 	/* unicast offset starts with 32 0..31 for ntuple */
327 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
328 		if (pf->mac_table[i].inuse)
329 			continue;
330 		ether_addr_copy(pf->mac_table[i].addr, mac);
331 		pf->mac_table[i].inuse = true;
332 		pf->mac_table[i].mcam_entry =
333 			flow_cfg->def_ent[i + flow_cfg->unicast_offset];
334 		req->entry =  pf->mac_table[i].mcam_entry;
335 		break;
336 	}
337 
338 	ether_addr_copy(req->packet.dmac, mac);
339 	eth_broadcast_addr((u8 *)&req->mask.dmac);
340 	req->features = BIT_ULL(NPC_DMAC);
341 	req->channel = pf->hw.rx_chan_base;
342 	req->intf = NIX_INTF_RX;
343 	req->op = NIX_RX_ACTION_DEFAULT;
344 	req->set_cntr = 1;
345 
346 	err = otx2_sync_mbox_msg(&pf->mbox);
347 	mutex_unlock(&pf->mbox.lock);
348 
349 	return err;
350 }
351 
352 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
353 {
354 	struct otx2_nic *pf = netdev_priv(netdev);
355 
356 	if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
357 			  pf->flow_cfg->dmacflt_max_flows))
358 		netdev_warn(netdev,
359 			    "Add %pM to CGX/RPM DMAC filters list as well\n",
360 			    mac);
361 
362 	return otx2_do_add_macfilter(pf, mac);
363 }
364 
365 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
366 				       int *mcam_entry)
367 {
368 	int i;
369 
370 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
371 		if (!pf->mac_table[i].inuse)
372 			continue;
373 
374 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
375 			*mcam_entry = pf->mac_table[i].mcam_entry;
376 			pf->mac_table[i].inuse = false;
377 			return true;
378 		}
379 	}
380 	return false;
381 }
382 
383 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
384 {
385 	struct otx2_nic *pf = netdev_priv(netdev);
386 	struct npc_delete_flow_req *req;
387 	int err, mcam_entry;
388 
389 	/* check does mcam entry exists for given mac */
390 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
391 		return 0;
392 
393 	mutex_lock(&pf->mbox.lock);
394 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
395 	if (!req) {
396 		mutex_unlock(&pf->mbox.lock);
397 		return -ENOMEM;
398 	}
399 	req->entry = mcam_entry;
400 	/* Send message to AF */
401 	err = otx2_sync_mbox_msg(&pf->mbox);
402 	mutex_unlock(&pf->mbox.lock);
403 
404 	return err;
405 }
406 
407 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
408 {
409 	struct otx2_flow *iter;
410 
411 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
412 		if (iter->location == location)
413 			return iter;
414 	}
415 
416 	return NULL;
417 }
418 
419 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
420 {
421 	struct list_head *head = &pfvf->flow_cfg->flow_list;
422 	struct otx2_flow *iter;
423 
424 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
425 		if (iter->location > flow->location)
426 			break;
427 		head = &iter->list;
428 	}
429 
430 	list_add(&flow->list, head);
431 }
432 
433 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
434 {
435 	if (!flow_cfg)
436 		return 0;
437 
438 	if (flow_cfg->nr_flows == flow_cfg->max_flows ||
439 	    bitmap_weight(&flow_cfg->dmacflt_bmap,
440 			  flow_cfg->dmacflt_max_flows))
441 		return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
442 	else
443 		return flow_cfg->max_flows;
444 }
445 EXPORT_SYMBOL(otx2_get_maxflows);
446 
447 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
448 		  u32 location)
449 {
450 	struct otx2_flow *iter;
451 
452 	if (location >= otx2_get_maxflows(pfvf->flow_cfg))
453 		return -EINVAL;
454 
455 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
456 		if (iter->location == location) {
457 			nfc->fs = iter->flow_spec;
458 			nfc->rss_context = iter->rss_ctx_id;
459 			return 0;
460 		}
461 	}
462 
463 	return -ENOENT;
464 }
465 
466 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
467 		       u32 *rule_locs)
468 {
469 	u32 rule_cnt = nfc->rule_cnt;
470 	u32 location = 0;
471 	int idx = 0;
472 	int err = 0;
473 
474 	nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
475 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
476 		err = otx2_get_flow(pfvf, nfc, location);
477 		if (!err)
478 			rule_locs[idx++] = location;
479 		location++;
480 	}
481 	nfc->rule_cnt = rule_cnt;
482 
483 	return err;
484 }
485 
486 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
487 				  struct npc_install_flow_req *req,
488 				  u32 flow_type)
489 {
490 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
491 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
492 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
493 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
494 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
495 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
496 	struct flow_msg *pmask = &req->mask;
497 	struct flow_msg *pkt = &req->packet;
498 
499 	switch (flow_type) {
500 	case IP_USER_FLOW:
501 		if (ipv4_usr_mask->ip4src) {
502 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
503 			       sizeof(pkt->ip4src));
504 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
505 			       sizeof(pmask->ip4src));
506 			req->features |= BIT_ULL(NPC_SIP_IPV4);
507 		}
508 		if (ipv4_usr_mask->ip4dst) {
509 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
510 			       sizeof(pkt->ip4dst));
511 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
512 			       sizeof(pmask->ip4dst));
513 			req->features |= BIT_ULL(NPC_DIP_IPV4);
514 		}
515 		if (ipv4_usr_mask->tos) {
516 			pkt->tos = ipv4_usr_hdr->tos;
517 			pmask->tos = ipv4_usr_mask->tos;
518 			req->features |= BIT_ULL(NPC_TOS);
519 		}
520 		if (ipv4_usr_mask->proto) {
521 			switch (ipv4_usr_hdr->proto) {
522 			case IPPROTO_ICMP:
523 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
524 				break;
525 			case IPPROTO_TCP:
526 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
527 				break;
528 			case IPPROTO_UDP:
529 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
530 				break;
531 			case IPPROTO_SCTP:
532 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
533 				break;
534 			case IPPROTO_AH:
535 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
536 				break;
537 			case IPPROTO_ESP:
538 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
539 				break;
540 			default:
541 				return -EOPNOTSUPP;
542 			}
543 		}
544 		pkt->etype = cpu_to_be16(ETH_P_IP);
545 		pmask->etype = cpu_to_be16(0xFFFF);
546 		req->features |= BIT_ULL(NPC_ETYPE);
547 		break;
548 	case TCP_V4_FLOW:
549 	case UDP_V4_FLOW:
550 	case SCTP_V4_FLOW:
551 		pkt->etype = cpu_to_be16(ETH_P_IP);
552 		pmask->etype = cpu_to_be16(0xFFFF);
553 		req->features |= BIT_ULL(NPC_ETYPE);
554 		if (ipv4_l4_mask->ip4src) {
555 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
556 			       sizeof(pkt->ip4src));
557 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
558 			       sizeof(pmask->ip4src));
559 			req->features |= BIT_ULL(NPC_SIP_IPV4);
560 		}
561 		if (ipv4_l4_mask->ip4dst) {
562 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
563 			       sizeof(pkt->ip4dst));
564 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
565 			       sizeof(pmask->ip4dst));
566 			req->features |= BIT_ULL(NPC_DIP_IPV4);
567 		}
568 		if (ipv4_l4_mask->tos) {
569 			pkt->tos = ipv4_l4_hdr->tos;
570 			pmask->tos = ipv4_l4_mask->tos;
571 			req->features |= BIT_ULL(NPC_TOS);
572 		}
573 		if (ipv4_l4_mask->psrc) {
574 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
575 			       sizeof(pkt->sport));
576 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
577 			       sizeof(pmask->sport));
578 			if (flow_type == UDP_V4_FLOW)
579 				req->features |= BIT_ULL(NPC_SPORT_UDP);
580 			else if (flow_type == TCP_V4_FLOW)
581 				req->features |= BIT_ULL(NPC_SPORT_TCP);
582 			else
583 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
584 		}
585 		if (ipv4_l4_mask->pdst) {
586 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
587 			       sizeof(pkt->dport));
588 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
589 			       sizeof(pmask->dport));
590 			if (flow_type == UDP_V4_FLOW)
591 				req->features |= BIT_ULL(NPC_DPORT_UDP);
592 			else if (flow_type == TCP_V4_FLOW)
593 				req->features |= BIT_ULL(NPC_DPORT_TCP);
594 			else
595 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
596 		}
597 		if (flow_type == UDP_V4_FLOW)
598 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
599 		else if (flow_type == TCP_V4_FLOW)
600 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
601 		else
602 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
603 		break;
604 	case AH_V4_FLOW:
605 	case ESP_V4_FLOW:
606 		pkt->etype = cpu_to_be16(ETH_P_IP);
607 		pmask->etype = cpu_to_be16(0xFFFF);
608 		req->features |= BIT_ULL(NPC_ETYPE);
609 		if (ah_esp_mask->ip4src) {
610 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
611 			       sizeof(pkt->ip4src));
612 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
613 			       sizeof(pmask->ip4src));
614 			req->features |= BIT_ULL(NPC_SIP_IPV4);
615 		}
616 		if (ah_esp_mask->ip4dst) {
617 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
618 			       sizeof(pkt->ip4dst));
619 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
620 			       sizeof(pmask->ip4dst));
621 			req->features |= BIT_ULL(NPC_DIP_IPV4);
622 		}
623 		if (ah_esp_mask->tos) {
624 			pkt->tos = ah_esp_hdr->tos;
625 			pmask->tos = ah_esp_mask->tos;
626 			req->features |= BIT_ULL(NPC_TOS);
627 		}
628 
629 		/* NPC profile doesn't extract AH/ESP header fields */
630 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
631 			return -EOPNOTSUPP;
632 
633 		if (flow_type == AH_V4_FLOW)
634 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
635 		else
636 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
637 		break;
638 	default:
639 		break;
640 	}
641 
642 	return 0;
643 }
644 
645 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
646 				  struct npc_install_flow_req *req,
647 				  u32 flow_type)
648 {
649 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
650 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
651 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
652 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
653 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
654 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
655 	struct flow_msg *pmask = &req->mask;
656 	struct flow_msg *pkt = &req->packet;
657 
658 	switch (flow_type) {
659 	case IPV6_USER_FLOW:
660 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
661 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
662 			       sizeof(pkt->ip6src));
663 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
664 			       sizeof(pmask->ip6src));
665 			req->features |= BIT_ULL(NPC_SIP_IPV6);
666 		}
667 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
668 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
669 			       sizeof(pkt->ip6dst));
670 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
671 			       sizeof(pmask->ip6dst));
672 			req->features |= BIT_ULL(NPC_DIP_IPV6);
673 		}
674 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
675 		pmask->etype = cpu_to_be16(0xFFFF);
676 		req->features |= BIT_ULL(NPC_ETYPE);
677 		break;
678 	case TCP_V6_FLOW:
679 	case UDP_V6_FLOW:
680 	case SCTP_V6_FLOW:
681 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
682 		pmask->etype = cpu_to_be16(0xFFFF);
683 		req->features |= BIT_ULL(NPC_ETYPE);
684 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
685 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
686 			       sizeof(pkt->ip6src));
687 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
688 			       sizeof(pmask->ip6src));
689 			req->features |= BIT_ULL(NPC_SIP_IPV6);
690 		}
691 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
692 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
693 			       sizeof(pkt->ip6dst));
694 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
695 			       sizeof(pmask->ip6dst));
696 			req->features |= BIT_ULL(NPC_DIP_IPV6);
697 		}
698 		if (ipv6_l4_mask->psrc) {
699 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
700 			       sizeof(pkt->sport));
701 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
702 			       sizeof(pmask->sport));
703 			if (flow_type == UDP_V6_FLOW)
704 				req->features |= BIT_ULL(NPC_SPORT_UDP);
705 			else if (flow_type == TCP_V6_FLOW)
706 				req->features |= BIT_ULL(NPC_SPORT_TCP);
707 			else
708 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
709 		}
710 		if (ipv6_l4_mask->pdst) {
711 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
712 			       sizeof(pkt->dport));
713 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
714 			       sizeof(pmask->dport));
715 			if (flow_type == UDP_V6_FLOW)
716 				req->features |= BIT_ULL(NPC_DPORT_UDP);
717 			else if (flow_type == TCP_V6_FLOW)
718 				req->features |= BIT_ULL(NPC_DPORT_TCP);
719 			else
720 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
721 		}
722 		if (flow_type == UDP_V6_FLOW)
723 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
724 		else if (flow_type == TCP_V6_FLOW)
725 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
726 		else
727 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
728 		break;
729 	case AH_V6_FLOW:
730 	case ESP_V6_FLOW:
731 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
732 		pmask->etype = cpu_to_be16(0xFFFF);
733 		req->features |= BIT_ULL(NPC_ETYPE);
734 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
735 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
736 			       sizeof(pkt->ip6src));
737 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
738 			       sizeof(pmask->ip6src));
739 			req->features |= BIT_ULL(NPC_SIP_IPV6);
740 		}
741 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
742 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
743 			       sizeof(pkt->ip6dst));
744 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
745 			       sizeof(pmask->ip6dst));
746 			req->features |= BIT_ULL(NPC_DIP_IPV6);
747 		}
748 
749 		/* NPC profile doesn't extract AH/ESP header fields */
750 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
751 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
752 			return -EOPNOTSUPP;
753 
754 		if (flow_type == AH_V6_FLOW)
755 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
756 		else
757 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
758 		break;
759 	default:
760 		break;
761 	}
762 
763 	return 0;
764 }
765 
766 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
767 			      struct npc_install_flow_req *req)
768 {
769 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
770 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
771 	struct flow_msg *pmask = &req->mask;
772 	struct flow_msg *pkt = &req->packet;
773 	u32 flow_type;
774 	int ret;
775 
776 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
777 	switch (flow_type) {
778 	/* bits not set in mask are don't care */
779 	case ETHER_FLOW:
780 		if (!is_zero_ether_addr(eth_mask->h_source)) {
781 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
782 			ether_addr_copy(pmask->smac, eth_mask->h_source);
783 			req->features |= BIT_ULL(NPC_SMAC);
784 		}
785 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
786 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
787 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
788 			req->features |= BIT_ULL(NPC_DMAC);
789 		}
790 		if (eth_hdr->h_proto) {
791 			memcpy(&pkt->etype, &eth_hdr->h_proto,
792 			       sizeof(pkt->etype));
793 			memcpy(&pmask->etype, &eth_mask->h_proto,
794 			       sizeof(pmask->etype));
795 			req->features |= BIT_ULL(NPC_ETYPE);
796 		}
797 		break;
798 	case IP_USER_FLOW:
799 	case TCP_V4_FLOW:
800 	case UDP_V4_FLOW:
801 	case SCTP_V4_FLOW:
802 	case AH_V4_FLOW:
803 	case ESP_V4_FLOW:
804 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
805 		if (ret)
806 			return ret;
807 		break;
808 	case IPV6_USER_FLOW:
809 	case TCP_V6_FLOW:
810 	case UDP_V6_FLOW:
811 	case SCTP_V6_FLOW:
812 	case AH_V6_FLOW:
813 	case ESP_V6_FLOW:
814 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
815 		if (ret)
816 			return ret;
817 		break;
818 	default:
819 		return -EOPNOTSUPP;
820 	}
821 	if (fsp->flow_type & FLOW_EXT) {
822 		if (fsp->m_ext.vlan_etype)
823 			return -EINVAL;
824 		if (fsp->m_ext.vlan_tci) {
825 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
826 			       sizeof(pkt->vlan_tci));
827 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
828 			       sizeof(pmask->vlan_tci));
829 			req->features |= BIT_ULL(NPC_OUTER_VID);
830 		}
831 
832 		/* Not Drop/Direct to queue but use action in default entry */
833 		if (fsp->m_ext.data[1] &&
834 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
835 			req->op = NIX_RX_ACTION_DEFAULT;
836 	}
837 
838 	if (fsp->flow_type & FLOW_MAC_EXT &&
839 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
840 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
841 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
842 		req->features |= BIT_ULL(NPC_DMAC);
843 	}
844 
845 	if (!req->features)
846 		return -EOPNOTSUPP;
847 
848 	return 0;
849 }
850 
851 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
852 					struct ethtool_rx_flow_spec *fsp)
853 {
854 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
855 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
856 	u64 ring_cookie = fsp->ring_cookie;
857 	u32 flow_type;
858 
859 	if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
860 		return false;
861 
862 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
863 
864 	/* CGX/RPM block dmac filtering configured for white listing
865 	 * check for action other than DROP
866 	 */
867 	if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
868 	    !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
869 		if (is_zero_ether_addr(eth_mask->h_dest) &&
870 		    is_valid_ether_addr(eth_hdr->h_dest))
871 			return true;
872 	}
873 
874 	return false;
875 }
876 
877 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
878 {
879 	u64 ring_cookie = flow->flow_spec.ring_cookie;
880 	struct npc_install_flow_req *req;
881 	int err, vf = 0;
882 
883 	mutex_lock(&pfvf->mbox.lock);
884 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
885 	if (!req) {
886 		mutex_unlock(&pfvf->mbox.lock);
887 		return -ENOMEM;
888 	}
889 
890 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
891 	if (err) {
892 		/* free the allocated msg above */
893 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
894 		mutex_unlock(&pfvf->mbox.lock);
895 		return err;
896 	}
897 
898 	req->entry = flow->entry;
899 	req->intf = NIX_INTF_RX;
900 	req->set_cntr = 1;
901 	req->channel = pfvf->hw.rx_chan_base;
902 	if (ring_cookie == RX_CLS_FLOW_DISC) {
903 		req->op = NIX_RX_ACTIONOP_DROP;
904 	} else {
905 		/* change to unicast only if action of default entry is not
906 		 * requested by user
907 		 */
908 		if (flow->flow_spec.flow_type & FLOW_RSS) {
909 			req->op = NIX_RX_ACTIONOP_RSS;
910 			req->index = flow->rss_ctx_id;
911 			req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
912 		} else {
913 			req->op = NIX_RX_ACTIONOP_UCAST;
914 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
915 		}
916 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
917 		if (vf > pci_num_vf(pfvf->pdev)) {
918 			mutex_unlock(&pfvf->mbox.lock);
919 			return -EINVAL;
920 		}
921 	}
922 
923 	/* ethtool ring_cookie has (VF + 1) for VF */
924 	if (vf) {
925 		req->vf = vf;
926 		flow->is_vf = true;
927 		flow->vf = vf;
928 	}
929 
930 	/* Send message to AF */
931 	err = otx2_sync_mbox_msg(&pfvf->mbox);
932 	mutex_unlock(&pfvf->mbox.lock);
933 	return err;
934 }
935 
936 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
937 				    struct otx2_flow *flow)
938 {
939 	struct otx2_flow *pf_mac;
940 	struct ethhdr *eth_hdr;
941 
942 	pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
943 	if (!pf_mac)
944 		return -ENOMEM;
945 
946 	pf_mac->entry = 0;
947 	pf_mac->dmac_filter = true;
948 	pf_mac->location = pfvf->flow_cfg->max_flows;
949 	memcpy(&pf_mac->flow_spec, &flow->flow_spec,
950 	       sizeof(struct ethtool_rx_flow_spec));
951 	pf_mac->flow_spec.location = pf_mac->location;
952 
953 	/* Copy PF mac address */
954 	eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
955 	ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
956 
957 	/* Install DMAC filter with PF mac address */
958 	otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
959 
960 	otx2_add_flow_to_list(pfvf, pf_mac);
961 	pfvf->flow_cfg->nr_flows++;
962 	set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
963 
964 	return 0;
965 }
966 
967 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
968 {
969 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
970 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
971 	struct otx2_flow *flow;
972 	struct ethhdr *eth_hdr;
973 	bool new = false;
974 	int err = 0;
975 	u32 ring;
976 
977 	if (!flow_cfg->max_flows) {
978 		netdev_err(pfvf->netdev,
979 			   "Ntuple rule count is 0, allocate and retry\n");
980 		return -EINVAL;
981 	}
982 
983 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
984 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
985 		return -ENOMEM;
986 
987 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
988 		return -EINVAL;
989 
990 	if (fsp->location >= otx2_get_maxflows(flow_cfg))
991 		return -EINVAL;
992 
993 	flow = otx2_find_flow(pfvf, fsp->location);
994 	if (!flow) {
995 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
996 		if (!flow)
997 			return -ENOMEM;
998 		flow->location = fsp->location;
999 		new = true;
1000 	}
1001 	/* struct copy */
1002 	flow->flow_spec = *fsp;
1003 
1004 	if (fsp->flow_type & FLOW_RSS)
1005 		flow->rss_ctx_id = nfc->rss_context;
1006 
1007 	if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
1008 		eth_hdr = &flow->flow_spec.h_u.ether_spec;
1009 
1010 		/* Sync dmac filter table with updated fields */
1011 		if (flow->dmac_filter)
1012 			return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1013 						   flow->entry);
1014 
1015 		if (bitmap_full(&flow_cfg->dmacflt_bmap,
1016 				flow_cfg->dmacflt_max_flows)) {
1017 			netdev_warn(pfvf->netdev,
1018 				    "Can't insert the rule %d as max allowed dmac filters are %d\n",
1019 				    flow->location +
1020 				    flow_cfg->dmacflt_max_flows,
1021 				    flow_cfg->dmacflt_max_flows);
1022 			err = -EINVAL;
1023 			if (new)
1024 				kfree(flow);
1025 			return err;
1026 		}
1027 
1028 		/* Install PF mac address to DMAC filter list */
1029 		if (!test_bit(0, &flow_cfg->dmacflt_bmap))
1030 			otx2_add_flow_with_pfmac(pfvf, flow);
1031 
1032 		flow->dmac_filter = true;
1033 		flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
1034 						  flow_cfg->dmacflt_max_flows);
1035 		fsp->location = flow_cfg->max_flows + flow->entry;
1036 		flow->flow_spec.location = fsp->location;
1037 		flow->location = fsp->location;
1038 
1039 		set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1040 		otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1041 
1042 	} else {
1043 		if (flow->location >= pfvf->flow_cfg->max_flows) {
1044 			netdev_warn(pfvf->netdev,
1045 				    "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1046 				    flow->location,
1047 				    flow_cfg->max_flows - 1);
1048 			err = -EINVAL;
1049 		} else {
1050 			flow->entry = flow_cfg->flow_ent[flow->location];
1051 			err = otx2_add_flow_msg(pfvf, flow);
1052 		}
1053 	}
1054 
1055 	if (err) {
1056 		if (err == MBOX_MSG_INVALID)
1057 			err = -EINVAL;
1058 		if (new)
1059 			kfree(flow);
1060 		return err;
1061 	}
1062 
1063 	/* add the new flow installed to list */
1064 	if (new) {
1065 		otx2_add_flow_to_list(pfvf, flow);
1066 		flow_cfg->nr_flows++;
1067 	}
1068 
1069 	return 0;
1070 }
1071 
1072 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1073 {
1074 	struct npc_delete_flow_req *req;
1075 	int err;
1076 
1077 	mutex_lock(&pfvf->mbox.lock);
1078 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1079 	if (!req) {
1080 		mutex_unlock(&pfvf->mbox.lock);
1081 		return -ENOMEM;
1082 	}
1083 
1084 	req->entry = entry;
1085 	if (all)
1086 		req->all = 1;
1087 
1088 	/* Send message to AF */
1089 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1090 	mutex_unlock(&pfvf->mbox.lock);
1091 	return err;
1092 }
1093 
1094 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1095 {
1096 	struct otx2_flow *iter;
1097 	struct ethhdr *eth_hdr;
1098 	bool found = false;
1099 
1100 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1101 		if (iter->dmac_filter && iter->entry == 0) {
1102 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1103 			if (req == DMAC_ADDR_DEL) {
1104 				otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1105 						    0);
1106 				clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
1107 				found = true;
1108 			} else {
1109 				ether_addr_copy(eth_hdr->h_dest,
1110 						pfvf->netdev->dev_addr);
1111 				otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1112 			}
1113 			break;
1114 		}
1115 	}
1116 
1117 	if (found) {
1118 		list_del(&iter->list);
1119 		kfree(iter);
1120 		pfvf->flow_cfg->nr_flows--;
1121 	}
1122 }
1123 
1124 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1125 {
1126 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1127 	struct otx2_flow *flow;
1128 	int err;
1129 
1130 	if (location >= otx2_get_maxflows(flow_cfg))
1131 		return -EINVAL;
1132 
1133 	flow = otx2_find_flow(pfvf, location);
1134 	if (!flow)
1135 		return -ENOENT;
1136 
1137 	if (flow->dmac_filter) {
1138 		struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1139 
1140 		/* user not allowed to remove dmac filter with interface mac */
1141 		if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1142 			return -EPERM;
1143 
1144 		err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1145 					  flow->entry);
1146 		clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1147 		/* If all dmac filters are removed delete macfilter with
1148 		 * interface mac address and configure CGX/RPM block in
1149 		 * promiscuous mode
1150 		 */
1151 		if (bitmap_weight(&flow_cfg->dmacflt_bmap,
1152 				  flow_cfg->dmacflt_max_flows) == 1)
1153 			otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1154 	} else {
1155 		err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1156 	}
1157 
1158 	if (err)
1159 		return err;
1160 
1161 	list_del(&flow->list);
1162 	kfree(flow);
1163 	flow_cfg->nr_flows--;
1164 
1165 	return 0;
1166 }
1167 
1168 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1169 {
1170 	struct otx2_flow *flow, *tmp;
1171 	int err;
1172 
1173 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1174 		if (flow->rss_ctx_id != ctx_id)
1175 			continue;
1176 		err = otx2_remove_flow(pfvf, flow->location);
1177 		if (err)
1178 			netdev_warn(pfvf->netdev,
1179 				    "Can't delete the rule %d associated with this rss group err:%d",
1180 				    flow->location, err);
1181 	}
1182 }
1183 
1184 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1185 {
1186 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1187 	struct npc_delete_flow_req *req;
1188 	struct otx2_flow *iter, *tmp;
1189 	int err;
1190 
1191 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1192 		return 0;
1193 
1194 	if (!flow_cfg->max_flows)
1195 		return 0;
1196 
1197 	mutex_lock(&pfvf->mbox.lock);
1198 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1199 	if (!req) {
1200 		mutex_unlock(&pfvf->mbox.lock);
1201 		return -ENOMEM;
1202 	}
1203 
1204 	req->start = flow_cfg->flow_ent[0];
1205 	req->end   = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
1206 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1207 	mutex_unlock(&pfvf->mbox.lock);
1208 
1209 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1210 		list_del(&iter->list);
1211 		kfree(iter);
1212 		flow_cfg->nr_flows--;
1213 	}
1214 	return err;
1215 }
1216 
1217 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1218 {
1219 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1220 	struct npc_mcam_free_entry_req *req;
1221 	struct otx2_flow *iter, *tmp;
1222 	int err;
1223 
1224 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1225 		return 0;
1226 
1227 	/* remove all flows */
1228 	err = otx2_remove_flow_msg(pfvf, 0, true);
1229 	if (err)
1230 		return err;
1231 
1232 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1233 		list_del(&iter->list);
1234 		kfree(iter);
1235 		flow_cfg->nr_flows--;
1236 	}
1237 
1238 	mutex_lock(&pfvf->mbox.lock);
1239 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1240 	if (!req) {
1241 		mutex_unlock(&pfvf->mbox.lock);
1242 		return -ENOMEM;
1243 	}
1244 
1245 	req->all = 1;
1246 	/* Send message to AF to free MCAM entries */
1247 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1248 	if (err) {
1249 		mutex_unlock(&pfvf->mbox.lock);
1250 		return err;
1251 	}
1252 
1253 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1254 	mutex_unlock(&pfvf->mbox.lock);
1255 
1256 	return 0;
1257 }
1258 
1259 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1260 {
1261 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1262 	struct npc_install_flow_req *req;
1263 	int err;
1264 
1265 	mutex_lock(&pfvf->mbox.lock);
1266 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1267 	if (!req) {
1268 		mutex_unlock(&pfvf->mbox.lock);
1269 		return -ENOMEM;
1270 	}
1271 
1272 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1273 	req->intf = NIX_INTF_RX;
1274 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1275 	eth_broadcast_addr((u8 *)&req->mask.dmac);
1276 	req->channel = pfvf->hw.rx_chan_base;
1277 	req->op = NIX_RX_ACTION_DEFAULT;
1278 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1279 	req->vtag0_valid = true;
1280 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1281 
1282 	/* Send message to AF */
1283 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1284 	mutex_unlock(&pfvf->mbox.lock);
1285 	return err;
1286 }
1287 
1288 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1289 {
1290 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1291 	struct npc_delete_flow_req *req;
1292 	int err;
1293 
1294 	mutex_lock(&pfvf->mbox.lock);
1295 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1296 	if (!req) {
1297 		mutex_unlock(&pfvf->mbox.lock);
1298 		return -ENOMEM;
1299 	}
1300 
1301 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1302 	/* Send message to AF */
1303 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1304 	mutex_unlock(&pfvf->mbox.lock);
1305 	return err;
1306 }
1307 
1308 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1309 {
1310 	struct nix_vtag_config *req;
1311 	struct mbox_msghdr *rsp_hdr;
1312 	int err;
1313 
1314 	/* Dont have enough mcam entries */
1315 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1316 		return -ENOMEM;
1317 
1318 	if (enable) {
1319 		err = otx2_install_rxvlan_offload_flow(pf);
1320 		if (err)
1321 			return err;
1322 	} else {
1323 		err = otx2_delete_rxvlan_offload_flow(pf);
1324 		if (err)
1325 			return err;
1326 	}
1327 
1328 	mutex_lock(&pf->mbox.lock);
1329 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1330 	if (!req) {
1331 		mutex_unlock(&pf->mbox.lock);
1332 		return -ENOMEM;
1333 	}
1334 
1335 	/* config strip, capture and size */
1336 	req->vtag_size = VTAGSIZE_T4;
1337 	req->cfg_type = 1; /* rx vlan cfg */
1338 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1339 	req->rx.strip_vtag = enable;
1340 	req->rx.capture_vtag = enable;
1341 
1342 	err = otx2_sync_mbox_msg(&pf->mbox);
1343 	if (err) {
1344 		mutex_unlock(&pf->mbox.lock);
1345 		return err;
1346 	}
1347 
1348 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1349 	if (IS_ERR(rsp_hdr)) {
1350 		mutex_unlock(&pf->mbox.lock);
1351 		return PTR_ERR(rsp_hdr);
1352 	}
1353 
1354 	mutex_unlock(&pf->mbox.lock);
1355 	return rsp_hdr->rc;
1356 }
1357 
1358 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1359 {
1360 	struct otx2_flow *iter;
1361 	struct ethhdr *eth_hdr;
1362 
1363 	list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1364 		if (iter->dmac_filter) {
1365 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1366 			otx2_dmacflt_add(pf, eth_hdr->h_dest,
1367 					 iter->entry);
1368 		}
1369 	}
1370 }
1371 
1372 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1373 {
1374 	otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1375 }
1376