1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 #include <linux/sort.h>
9 
10 #include "otx2_common.h"
11 
12 #define OTX2_DEFAULT_ACTION	0x1
13 
14 static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
15 
16 struct otx2_flow {
17 	struct ethtool_rx_flow_spec flow_spec;
18 	struct list_head list;
19 	u32 location;
20 	u16 entry;
21 	bool is_vf;
22 	u8 rss_ctx_id;
23 	int vf;
24 	bool dmac_filter;
25 };
26 
27 enum dmac_req {
28 	DMAC_ADDR_UPDATE,
29 	DMAC_ADDR_DEL
30 };
31 
32 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
33 {
34 	devm_kfree(pfvf->dev, flow_cfg->flow_ent);
35 	flow_cfg->flow_ent = NULL;
36 	flow_cfg->max_flows = 0;
37 }
38 
39 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
40 {
41 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
42 	struct npc_mcam_free_entry_req *req;
43 	int ent, err;
44 
45 	if (!flow_cfg->max_flows)
46 		return 0;
47 
48 	mutex_lock(&pfvf->mbox.lock);
49 	for (ent = 0; ent < flow_cfg->max_flows; ent++) {
50 		req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
51 		if (!req)
52 			break;
53 
54 		req->entry = flow_cfg->flow_ent[ent];
55 
56 		/* Send message to AF to free MCAM entries */
57 		err = otx2_sync_mbox_msg(&pfvf->mbox);
58 		if (err)
59 			break;
60 	}
61 	mutex_unlock(&pfvf->mbox.lock);
62 	otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
63 	return 0;
64 }
65 
66 static int mcam_entry_cmp(const void *a, const void *b)
67 {
68 	return *(u16 *)a - *(u16 *)b;
69 }
70 
71 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
72 {
73 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
74 	struct npc_mcam_alloc_entry_req *req;
75 	struct npc_mcam_alloc_entry_rsp *rsp;
76 	int ent, allocated = 0;
77 
78 	/* Free current ones and allocate new ones with requested count */
79 	otx2_free_ntuple_mcam_entries(pfvf);
80 
81 	if (!count)
82 		return 0;
83 
84 	flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
85 						sizeof(u16), GFP_KERNEL);
86 	if (!flow_cfg->flow_ent) {
87 		netdev_err(pfvf->netdev,
88 			   "%s: Unable to allocate memory for flow entries\n",
89 			    __func__);
90 		return -ENOMEM;
91 	}
92 
93 	mutex_lock(&pfvf->mbox.lock);
94 
95 	/* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
96 	 * can only be allocated.
97 	 */
98 	while (allocated < count) {
99 		req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
100 		if (!req)
101 			goto exit;
102 
103 		req->contig = false;
104 		req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
105 				NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
106 
107 		/* Allocate higher priority entries for PFs, so that VF's entries
108 		 * will be on top of PF.
109 		 */
110 		if (!is_otx2_vf(pfvf->pcifunc)) {
111 			req->priority = NPC_MCAM_HIGHER_PRIO;
112 			req->ref_entry = flow_cfg->def_ent[0];
113 		}
114 
115 		/* Send message to AF */
116 		if (otx2_sync_mbox_msg(&pfvf->mbox))
117 			goto exit;
118 
119 		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
120 			(&pfvf->mbox.mbox, 0, &req->hdr);
121 
122 		for (ent = 0; ent < rsp->count; ent++)
123 			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
124 
125 		allocated += rsp->count;
126 
127 		/* If this request is not fulfilled, no need to send
128 		 * further requests.
129 		 */
130 		if (rsp->count != req->count)
131 			break;
132 	}
133 
134 	/* Multiple MCAM entry alloc requests could result in non-sequential
135 	 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
136 	 * otherwise user installed ntuple filter index and MCAM entry index will
137 	 * not be in sync.
138 	 */
139 	if (allocated)
140 		sort(&flow_cfg->flow_ent[0], allocated,
141 		     sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
142 
143 exit:
144 	mutex_unlock(&pfvf->mbox.lock);
145 
146 	flow_cfg->max_flows = allocated;
147 
148 	if (allocated) {
149 		pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
150 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
151 	}
152 
153 	if (allocated != count)
154 		netdev_info(pfvf->netdev,
155 			    "Unable to allocate %d MCAM entries, got only %d\n",
156 			    count, allocated);
157 	return allocated;
158 }
159 EXPORT_SYMBOL(otx2_alloc_mcam_entries);
160 
161 static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
162 {
163 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
164 	struct npc_mcam_alloc_entry_req *req;
165 	struct npc_mcam_alloc_entry_rsp *rsp;
166 	int vf_vlan_max_flows;
167 	int ent, count;
168 
169 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
170 	count = OTX2_MAX_UNICAST_FLOWS +
171 			OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
172 
173 	flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
174 					       sizeof(u16), GFP_KERNEL);
175 	if (!flow_cfg->def_ent)
176 		return -ENOMEM;
177 
178 	mutex_lock(&pfvf->mbox.lock);
179 
180 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
181 	if (!req) {
182 		mutex_unlock(&pfvf->mbox.lock);
183 		return -ENOMEM;
184 	}
185 
186 	req->contig = false;
187 	req->count = count;
188 
189 	/* Send message to AF */
190 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
191 		mutex_unlock(&pfvf->mbox.lock);
192 		return -EINVAL;
193 	}
194 
195 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
196 	       (&pfvf->mbox.mbox, 0, &req->hdr);
197 
198 	if (rsp->count != req->count) {
199 		netdev_info(pfvf->netdev,
200 			    "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
201 		mutex_unlock(&pfvf->mbox.lock);
202 		devm_kfree(pfvf->dev, flow_cfg->def_ent);
203 		return 0;
204 	}
205 
206 	for (ent = 0; ent < rsp->count; ent++)
207 		flow_cfg->def_ent[ent] = rsp->entry_list[ent];
208 
209 	flow_cfg->vf_vlan_offset = 0;
210 	flow_cfg->unicast_offset = vf_vlan_max_flows;
211 	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
212 					OTX2_MAX_UNICAST_FLOWS;
213 	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
214 	pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
215 	pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
216 
217 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
218 	mutex_unlock(&pfvf->mbox.lock);
219 
220 	/* Allocate entries for Ntuple filters */
221 	count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
222 	if (count <= 0) {
223 		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
224 		return 0;
225 	}
226 
227 	pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
228 
229 	return 0;
230 }
231 
232 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
233 {
234 	struct otx2_flow_config *flow_cfg;
235 
236 	pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
237 				      sizeof(struct otx2_flow_config),
238 				      GFP_KERNEL);
239 	if (!pfvf->flow_cfg)
240 		return -ENOMEM;
241 
242 	flow_cfg = pfvf->flow_cfg;
243 	INIT_LIST_HEAD(&flow_cfg->flow_list);
244 	flow_cfg->max_flows = 0;
245 
246 	return 0;
247 }
248 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
249 
250 int otx2_mcam_flow_init(struct otx2_nic *pf)
251 {
252 	int err;
253 
254 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
255 				    GFP_KERNEL);
256 	if (!pf->flow_cfg)
257 		return -ENOMEM;
258 
259 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
260 
261 	/* Allocate bare minimum number of MCAM entries needed for
262 	 * unicast and ntuple filters.
263 	 */
264 	err = otx2_mcam_entry_init(pf);
265 	if (err)
266 		return err;
267 
268 	/* Check if MCAM entries are allocate or not */
269 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
270 		return 0;
271 
272 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
273 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
274 	if (!pf->mac_table)
275 		return -ENOMEM;
276 
277 	otx2_dmacflt_get_max_cnt(pf);
278 
279 	/* DMAC filters are not allocated */
280 	if (!pf->flow_cfg->dmacflt_max_flows)
281 		return 0;
282 
283 	pf->flow_cfg->bmap_to_dmacindex =
284 			devm_kzalloc(pf->dev, sizeof(u8) *
285 				     pf->flow_cfg->dmacflt_max_flows,
286 				     GFP_KERNEL);
287 
288 	if (!pf->flow_cfg->bmap_to_dmacindex)
289 		return -ENOMEM;
290 
291 	pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
292 
293 	return 0;
294 }
295 
296 void otx2_mcam_flow_del(struct otx2_nic *pf)
297 {
298 	otx2_destroy_mcam_flows(pf);
299 }
300 EXPORT_SYMBOL(otx2_mcam_flow_del);
301 
302 /*  On success adds mcam entry
303  *  On failure enable promisous mode
304  */
305 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
306 {
307 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
308 	struct npc_install_flow_req *req;
309 	int err, i;
310 
311 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
312 		return -ENOMEM;
313 
314 	/* dont have free mcam entries or uc list is greater than alloted */
315 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
316 		return -ENOMEM;
317 
318 	mutex_lock(&pf->mbox.lock);
319 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
320 	if (!req) {
321 		mutex_unlock(&pf->mbox.lock);
322 		return -ENOMEM;
323 	}
324 
325 	/* unicast offset starts with 32 0..31 for ntuple */
326 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
327 		if (pf->mac_table[i].inuse)
328 			continue;
329 		ether_addr_copy(pf->mac_table[i].addr, mac);
330 		pf->mac_table[i].inuse = true;
331 		pf->mac_table[i].mcam_entry =
332 			flow_cfg->def_ent[i + flow_cfg->unicast_offset];
333 		req->entry =  pf->mac_table[i].mcam_entry;
334 		break;
335 	}
336 
337 	ether_addr_copy(req->packet.dmac, mac);
338 	eth_broadcast_addr((u8 *)&req->mask.dmac);
339 	req->features = BIT_ULL(NPC_DMAC);
340 	req->channel = pf->hw.rx_chan_base;
341 	req->intf = NIX_INTF_RX;
342 	req->op = NIX_RX_ACTION_DEFAULT;
343 	req->set_cntr = 1;
344 
345 	err = otx2_sync_mbox_msg(&pf->mbox);
346 	mutex_unlock(&pf->mbox.lock);
347 
348 	return err;
349 }
350 
351 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
352 {
353 	struct otx2_nic *pf = netdev_priv(netdev);
354 
355 	if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
356 			  pf->flow_cfg->dmacflt_max_flows))
357 		netdev_warn(netdev,
358 			    "Add %pM to CGX/RPM DMAC filters list as well\n",
359 			    mac);
360 
361 	return otx2_do_add_macfilter(pf, mac);
362 }
363 
364 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
365 				       int *mcam_entry)
366 {
367 	int i;
368 
369 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
370 		if (!pf->mac_table[i].inuse)
371 			continue;
372 
373 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
374 			*mcam_entry = pf->mac_table[i].mcam_entry;
375 			pf->mac_table[i].inuse = false;
376 			return true;
377 		}
378 	}
379 	return false;
380 }
381 
382 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
383 {
384 	struct otx2_nic *pf = netdev_priv(netdev);
385 	struct npc_delete_flow_req *req;
386 	int err, mcam_entry;
387 
388 	/* check does mcam entry exists for given mac */
389 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
390 		return 0;
391 
392 	mutex_lock(&pf->mbox.lock);
393 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
394 	if (!req) {
395 		mutex_unlock(&pf->mbox.lock);
396 		return -ENOMEM;
397 	}
398 	req->entry = mcam_entry;
399 	/* Send message to AF */
400 	err = otx2_sync_mbox_msg(&pf->mbox);
401 	mutex_unlock(&pf->mbox.lock);
402 
403 	return err;
404 }
405 
406 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
407 {
408 	struct otx2_flow *iter;
409 
410 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
411 		if (iter->location == location)
412 			return iter;
413 	}
414 
415 	return NULL;
416 }
417 
418 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
419 {
420 	struct list_head *head = &pfvf->flow_cfg->flow_list;
421 	struct otx2_flow *iter;
422 
423 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
424 		if (iter->location > flow->location)
425 			break;
426 		head = &iter->list;
427 	}
428 
429 	list_add(&flow->list, head);
430 }
431 
432 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
433 {
434 	if (!flow_cfg)
435 		return 0;
436 
437 	if (flow_cfg->nr_flows == flow_cfg->max_flows ||
438 	    bitmap_weight(&flow_cfg->dmacflt_bmap,
439 			  flow_cfg->dmacflt_max_flows))
440 		return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
441 	else
442 		return flow_cfg->max_flows;
443 }
444 EXPORT_SYMBOL(otx2_get_maxflows);
445 
446 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
447 		  u32 location)
448 {
449 	struct otx2_flow *iter;
450 
451 	if (location >= otx2_get_maxflows(pfvf->flow_cfg))
452 		return -EINVAL;
453 
454 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
455 		if (iter->location == location) {
456 			nfc->fs = iter->flow_spec;
457 			nfc->rss_context = iter->rss_ctx_id;
458 			return 0;
459 		}
460 	}
461 
462 	return -ENOENT;
463 }
464 
465 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
466 		       u32 *rule_locs)
467 {
468 	u32 rule_cnt = nfc->rule_cnt;
469 	u32 location = 0;
470 	int idx = 0;
471 	int err = 0;
472 
473 	nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
474 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
475 		err = otx2_get_flow(pfvf, nfc, location);
476 		if (!err)
477 			rule_locs[idx++] = location;
478 		location++;
479 	}
480 	nfc->rule_cnt = rule_cnt;
481 
482 	return err;
483 }
484 
485 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
486 				  struct npc_install_flow_req *req,
487 				  u32 flow_type)
488 {
489 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
490 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
491 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
492 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
493 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
494 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
495 	struct flow_msg *pmask = &req->mask;
496 	struct flow_msg *pkt = &req->packet;
497 
498 	switch (flow_type) {
499 	case IP_USER_FLOW:
500 		if (ipv4_usr_mask->ip4src) {
501 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
502 			       sizeof(pkt->ip4src));
503 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
504 			       sizeof(pmask->ip4src));
505 			req->features |= BIT_ULL(NPC_SIP_IPV4);
506 		}
507 		if (ipv4_usr_mask->ip4dst) {
508 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
509 			       sizeof(pkt->ip4dst));
510 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
511 			       sizeof(pmask->ip4dst));
512 			req->features |= BIT_ULL(NPC_DIP_IPV4);
513 		}
514 		if (ipv4_usr_mask->tos) {
515 			pkt->tos = ipv4_usr_hdr->tos;
516 			pmask->tos = ipv4_usr_mask->tos;
517 			req->features |= BIT_ULL(NPC_TOS);
518 		}
519 		if (ipv4_usr_mask->proto) {
520 			switch (ipv4_usr_hdr->proto) {
521 			case IPPROTO_ICMP:
522 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
523 				break;
524 			case IPPROTO_TCP:
525 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
526 				break;
527 			case IPPROTO_UDP:
528 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
529 				break;
530 			case IPPROTO_SCTP:
531 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
532 				break;
533 			case IPPROTO_AH:
534 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
535 				break;
536 			case IPPROTO_ESP:
537 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
538 				break;
539 			default:
540 				return -EOPNOTSUPP;
541 			}
542 		}
543 		pkt->etype = cpu_to_be16(ETH_P_IP);
544 		pmask->etype = cpu_to_be16(0xFFFF);
545 		req->features |= BIT_ULL(NPC_ETYPE);
546 		break;
547 	case TCP_V4_FLOW:
548 	case UDP_V4_FLOW:
549 	case SCTP_V4_FLOW:
550 		pkt->etype = cpu_to_be16(ETH_P_IP);
551 		pmask->etype = cpu_to_be16(0xFFFF);
552 		req->features |= BIT_ULL(NPC_ETYPE);
553 		if (ipv4_l4_mask->ip4src) {
554 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
555 			       sizeof(pkt->ip4src));
556 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
557 			       sizeof(pmask->ip4src));
558 			req->features |= BIT_ULL(NPC_SIP_IPV4);
559 		}
560 		if (ipv4_l4_mask->ip4dst) {
561 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
562 			       sizeof(pkt->ip4dst));
563 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
564 			       sizeof(pmask->ip4dst));
565 			req->features |= BIT_ULL(NPC_DIP_IPV4);
566 		}
567 		if (ipv4_l4_mask->tos) {
568 			pkt->tos = ipv4_l4_hdr->tos;
569 			pmask->tos = ipv4_l4_mask->tos;
570 			req->features |= BIT_ULL(NPC_TOS);
571 		}
572 		if (ipv4_l4_mask->psrc) {
573 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
574 			       sizeof(pkt->sport));
575 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
576 			       sizeof(pmask->sport));
577 			if (flow_type == UDP_V4_FLOW)
578 				req->features |= BIT_ULL(NPC_SPORT_UDP);
579 			else if (flow_type == TCP_V4_FLOW)
580 				req->features |= BIT_ULL(NPC_SPORT_TCP);
581 			else
582 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
583 		}
584 		if (ipv4_l4_mask->pdst) {
585 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
586 			       sizeof(pkt->dport));
587 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
588 			       sizeof(pmask->dport));
589 			if (flow_type == UDP_V4_FLOW)
590 				req->features |= BIT_ULL(NPC_DPORT_UDP);
591 			else if (flow_type == TCP_V4_FLOW)
592 				req->features |= BIT_ULL(NPC_DPORT_TCP);
593 			else
594 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
595 		}
596 		if (flow_type == UDP_V4_FLOW)
597 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
598 		else if (flow_type == TCP_V4_FLOW)
599 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
600 		else
601 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
602 		break;
603 	case AH_V4_FLOW:
604 	case ESP_V4_FLOW:
605 		pkt->etype = cpu_to_be16(ETH_P_IP);
606 		pmask->etype = cpu_to_be16(0xFFFF);
607 		req->features |= BIT_ULL(NPC_ETYPE);
608 		if (ah_esp_mask->ip4src) {
609 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
610 			       sizeof(pkt->ip4src));
611 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
612 			       sizeof(pmask->ip4src));
613 			req->features |= BIT_ULL(NPC_SIP_IPV4);
614 		}
615 		if (ah_esp_mask->ip4dst) {
616 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
617 			       sizeof(pkt->ip4dst));
618 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
619 			       sizeof(pmask->ip4dst));
620 			req->features |= BIT_ULL(NPC_DIP_IPV4);
621 		}
622 		if (ah_esp_mask->tos) {
623 			pkt->tos = ah_esp_hdr->tos;
624 			pmask->tos = ah_esp_mask->tos;
625 			req->features |= BIT_ULL(NPC_TOS);
626 		}
627 
628 		/* NPC profile doesn't extract AH/ESP header fields */
629 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
630 			return -EOPNOTSUPP;
631 
632 		if (flow_type == AH_V4_FLOW)
633 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
634 		else
635 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
636 		break;
637 	default:
638 		break;
639 	}
640 
641 	return 0;
642 }
643 
644 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
645 				  struct npc_install_flow_req *req,
646 				  u32 flow_type)
647 {
648 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
649 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
650 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
651 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
652 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
653 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
654 	struct flow_msg *pmask = &req->mask;
655 	struct flow_msg *pkt = &req->packet;
656 
657 	switch (flow_type) {
658 	case IPV6_USER_FLOW:
659 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
660 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
661 			       sizeof(pkt->ip6src));
662 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
663 			       sizeof(pmask->ip6src));
664 			req->features |= BIT_ULL(NPC_SIP_IPV6);
665 		}
666 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
667 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
668 			       sizeof(pkt->ip6dst));
669 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
670 			       sizeof(pmask->ip6dst));
671 			req->features |= BIT_ULL(NPC_DIP_IPV6);
672 		}
673 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
674 		pmask->etype = cpu_to_be16(0xFFFF);
675 		req->features |= BIT_ULL(NPC_ETYPE);
676 		break;
677 	case TCP_V6_FLOW:
678 	case UDP_V6_FLOW:
679 	case SCTP_V6_FLOW:
680 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
681 		pmask->etype = cpu_to_be16(0xFFFF);
682 		req->features |= BIT_ULL(NPC_ETYPE);
683 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
684 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
685 			       sizeof(pkt->ip6src));
686 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
687 			       sizeof(pmask->ip6src));
688 			req->features |= BIT_ULL(NPC_SIP_IPV6);
689 		}
690 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
691 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
692 			       sizeof(pkt->ip6dst));
693 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
694 			       sizeof(pmask->ip6dst));
695 			req->features |= BIT_ULL(NPC_DIP_IPV6);
696 		}
697 		if (ipv6_l4_mask->psrc) {
698 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
699 			       sizeof(pkt->sport));
700 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
701 			       sizeof(pmask->sport));
702 			if (flow_type == UDP_V6_FLOW)
703 				req->features |= BIT_ULL(NPC_SPORT_UDP);
704 			else if (flow_type == TCP_V6_FLOW)
705 				req->features |= BIT_ULL(NPC_SPORT_TCP);
706 			else
707 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
708 		}
709 		if (ipv6_l4_mask->pdst) {
710 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
711 			       sizeof(pkt->dport));
712 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
713 			       sizeof(pmask->dport));
714 			if (flow_type == UDP_V6_FLOW)
715 				req->features |= BIT_ULL(NPC_DPORT_UDP);
716 			else if (flow_type == TCP_V6_FLOW)
717 				req->features |= BIT_ULL(NPC_DPORT_TCP);
718 			else
719 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
720 		}
721 		if (flow_type == UDP_V6_FLOW)
722 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
723 		else if (flow_type == TCP_V6_FLOW)
724 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
725 		else
726 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
727 		break;
728 	case AH_V6_FLOW:
729 	case ESP_V6_FLOW:
730 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
731 		pmask->etype = cpu_to_be16(0xFFFF);
732 		req->features |= BIT_ULL(NPC_ETYPE);
733 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
734 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
735 			       sizeof(pkt->ip6src));
736 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
737 			       sizeof(pmask->ip6src));
738 			req->features |= BIT_ULL(NPC_SIP_IPV6);
739 		}
740 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
741 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
742 			       sizeof(pkt->ip6dst));
743 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
744 			       sizeof(pmask->ip6dst));
745 			req->features |= BIT_ULL(NPC_DIP_IPV6);
746 		}
747 
748 		/* NPC profile doesn't extract AH/ESP header fields */
749 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
750 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
751 			return -EOPNOTSUPP;
752 
753 		if (flow_type == AH_V6_FLOW)
754 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
755 		else
756 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
757 		break;
758 	default:
759 		break;
760 	}
761 
762 	return 0;
763 }
764 
765 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
766 			      struct npc_install_flow_req *req)
767 {
768 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
769 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
770 	struct flow_msg *pmask = &req->mask;
771 	struct flow_msg *pkt = &req->packet;
772 	u32 flow_type;
773 	int ret;
774 
775 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
776 	switch (flow_type) {
777 	/* bits not set in mask are don't care */
778 	case ETHER_FLOW:
779 		if (!is_zero_ether_addr(eth_mask->h_source)) {
780 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
781 			ether_addr_copy(pmask->smac, eth_mask->h_source);
782 			req->features |= BIT_ULL(NPC_SMAC);
783 		}
784 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
785 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
786 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
787 			req->features |= BIT_ULL(NPC_DMAC);
788 		}
789 		if (eth_hdr->h_proto) {
790 			memcpy(&pkt->etype, &eth_hdr->h_proto,
791 			       sizeof(pkt->etype));
792 			memcpy(&pmask->etype, &eth_mask->h_proto,
793 			       sizeof(pmask->etype));
794 			req->features |= BIT_ULL(NPC_ETYPE);
795 		}
796 		break;
797 	case IP_USER_FLOW:
798 	case TCP_V4_FLOW:
799 	case UDP_V4_FLOW:
800 	case SCTP_V4_FLOW:
801 	case AH_V4_FLOW:
802 	case ESP_V4_FLOW:
803 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
804 		if (ret)
805 			return ret;
806 		break;
807 	case IPV6_USER_FLOW:
808 	case TCP_V6_FLOW:
809 	case UDP_V6_FLOW:
810 	case SCTP_V6_FLOW:
811 	case AH_V6_FLOW:
812 	case ESP_V6_FLOW:
813 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
814 		if (ret)
815 			return ret;
816 		break;
817 	default:
818 		return -EOPNOTSUPP;
819 	}
820 	if (fsp->flow_type & FLOW_EXT) {
821 		if (fsp->m_ext.vlan_etype)
822 			return -EINVAL;
823 		if (fsp->m_ext.vlan_tci) {
824 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
825 			       sizeof(pkt->vlan_tci));
826 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
827 			       sizeof(pmask->vlan_tci));
828 			req->features |= BIT_ULL(NPC_OUTER_VID);
829 		}
830 
831 		/* Not Drop/Direct to queue but use action in default entry */
832 		if (fsp->m_ext.data[1] &&
833 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
834 			req->op = NIX_RX_ACTION_DEFAULT;
835 	}
836 
837 	if (fsp->flow_type & FLOW_MAC_EXT &&
838 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
839 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
840 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
841 		req->features |= BIT_ULL(NPC_DMAC);
842 	}
843 
844 	if (!req->features)
845 		return -EOPNOTSUPP;
846 
847 	return 0;
848 }
849 
850 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
851 					struct ethtool_rx_flow_spec *fsp)
852 {
853 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
854 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
855 	u64 ring_cookie = fsp->ring_cookie;
856 	u32 flow_type;
857 
858 	if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
859 		return false;
860 
861 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
862 
863 	/* CGX/RPM block dmac filtering configured for white listing
864 	 * check for action other than DROP
865 	 */
866 	if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
867 	    !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
868 		if (is_zero_ether_addr(eth_mask->h_dest) &&
869 		    is_valid_ether_addr(eth_hdr->h_dest))
870 			return true;
871 	}
872 
873 	return false;
874 }
875 
876 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
877 {
878 	u64 ring_cookie = flow->flow_spec.ring_cookie;
879 	struct npc_install_flow_req *req;
880 	int err, vf = 0;
881 
882 	mutex_lock(&pfvf->mbox.lock);
883 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
884 	if (!req) {
885 		mutex_unlock(&pfvf->mbox.lock);
886 		return -ENOMEM;
887 	}
888 
889 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
890 	if (err) {
891 		/* free the allocated msg above */
892 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
893 		mutex_unlock(&pfvf->mbox.lock);
894 		return err;
895 	}
896 
897 	req->entry = flow->entry;
898 	req->intf = NIX_INTF_RX;
899 	req->set_cntr = 1;
900 	req->channel = pfvf->hw.rx_chan_base;
901 	if (ring_cookie == RX_CLS_FLOW_DISC) {
902 		req->op = NIX_RX_ACTIONOP_DROP;
903 	} else {
904 		/* change to unicast only if action of default entry is not
905 		 * requested by user
906 		 */
907 		if (flow->flow_spec.flow_type & FLOW_RSS) {
908 			req->op = NIX_RX_ACTIONOP_RSS;
909 			req->index = flow->rss_ctx_id;
910 		} else {
911 			req->op = NIX_RX_ACTIONOP_UCAST;
912 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
913 		}
914 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
915 		if (vf > pci_num_vf(pfvf->pdev)) {
916 			mutex_unlock(&pfvf->mbox.lock);
917 			return -EINVAL;
918 		}
919 	}
920 
921 	/* ethtool ring_cookie has (VF + 1) for VF */
922 	if (vf) {
923 		req->vf = vf;
924 		flow->is_vf = true;
925 		flow->vf = vf;
926 	}
927 
928 	/* Send message to AF */
929 	err = otx2_sync_mbox_msg(&pfvf->mbox);
930 	mutex_unlock(&pfvf->mbox.lock);
931 	return err;
932 }
933 
934 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
935 				    struct otx2_flow *flow)
936 {
937 	struct otx2_flow *pf_mac;
938 	struct ethhdr *eth_hdr;
939 
940 	pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
941 	if (!pf_mac)
942 		return -ENOMEM;
943 
944 	pf_mac->entry = 0;
945 	pf_mac->dmac_filter = true;
946 	pf_mac->location = pfvf->flow_cfg->max_flows;
947 	memcpy(&pf_mac->flow_spec, &flow->flow_spec,
948 	       sizeof(struct ethtool_rx_flow_spec));
949 	pf_mac->flow_spec.location = pf_mac->location;
950 
951 	/* Copy PF mac address */
952 	eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
953 	ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
954 
955 	/* Install DMAC filter with PF mac address */
956 	otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
957 
958 	otx2_add_flow_to_list(pfvf, pf_mac);
959 	pfvf->flow_cfg->nr_flows++;
960 	set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
961 
962 	return 0;
963 }
964 
965 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
966 {
967 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
968 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
969 	struct otx2_flow *flow;
970 	struct ethhdr *eth_hdr;
971 	bool new = false;
972 	int err = 0;
973 	u32 ring;
974 
975 	if (!flow_cfg->max_flows) {
976 		netdev_err(pfvf->netdev,
977 			   "Ntuple rule count is 0, allocate and retry\n");
978 		return -EINVAL;
979 	}
980 
981 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
982 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
983 		return -ENOMEM;
984 
985 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
986 		return -EINVAL;
987 
988 	if (fsp->location >= otx2_get_maxflows(flow_cfg))
989 		return -EINVAL;
990 
991 	flow = otx2_find_flow(pfvf, fsp->location);
992 	if (!flow) {
993 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
994 		if (!flow)
995 			return -ENOMEM;
996 		flow->location = fsp->location;
997 		new = true;
998 	}
999 	/* struct copy */
1000 	flow->flow_spec = *fsp;
1001 
1002 	if (fsp->flow_type & FLOW_RSS)
1003 		flow->rss_ctx_id = nfc->rss_context;
1004 
1005 	if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
1006 		eth_hdr = &flow->flow_spec.h_u.ether_spec;
1007 
1008 		/* Sync dmac filter table with updated fields */
1009 		if (flow->dmac_filter)
1010 			return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1011 						   flow->entry);
1012 
1013 		if (bitmap_full(&flow_cfg->dmacflt_bmap,
1014 				flow_cfg->dmacflt_max_flows)) {
1015 			netdev_warn(pfvf->netdev,
1016 				    "Can't insert the rule %d as max allowed dmac filters are %d\n",
1017 				    flow->location +
1018 				    flow_cfg->dmacflt_max_flows,
1019 				    flow_cfg->dmacflt_max_flows);
1020 			err = -EINVAL;
1021 			if (new)
1022 				kfree(flow);
1023 			return err;
1024 		}
1025 
1026 		/* Install PF mac address to DMAC filter list */
1027 		if (!test_bit(0, &flow_cfg->dmacflt_bmap))
1028 			otx2_add_flow_with_pfmac(pfvf, flow);
1029 
1030 		flow->dmac_filter = true;
1031 		flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
1032 						  flow_cfg->dmacflt_max_flows);
1033 		fsp->location = flow_cfg->max_flows + flow->entry;
1034 		flow->flow_spec.location = fsp->location;
1035 		flow->location = fsp->location;
1036 
1037 		set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1038 		otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1039 
1040 	} else {
1041 		if (flow->location >= pfvf->flow_cfg->max_flows) {
1042 			netdev_warn(pfvf->netdev,
1043 				    "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1044 				    flow->location,
1045 				    flow_cfg->max_flows - 1);
1046 			err = -EINVAL;
1047 		} else {
1048 			flow->entry = flow_cfg->flow_ent[flow->location];
1049 			err = otx2_add_flow_msg(pfvf, flow);
1050 		}
1051 	}
1052 
1053 	if (err) {
1054 		if (err == MBOX_MSG_INVALID)
1055 			err = -EINVAL;
1056 		if (new)
1057 			kfree(flow);
1058 		return err;
1059 	}
1060 
1061 	/* add the new flow installed to list */
1062 	if (new) {
1063 		otx2_add_flow_to_list(pfvf, flow);
1064 		flow_cfg->nr_flows++;
1065 	}
1066 
1067 	return 0;
1068 }
1069 
1070 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1071 {
1072 	struct npc_delete_flow_req *req;
1073 	int err;
1074 
1075 	mutex_lock(&pfvf->mbox.lock);
1076 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1077 	if (!req) {
1078 		mutex_unlock(&pfvf->mbox.lock);
1079 		return -ENOMEM;
1080 	}
1081 
1082 	req->entry = entry;
1083 	if (all)
1084 		req->all = 1;
1085 
1086 	/* Send message to AF */
1087 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1088 	mutex_unlock(&pfvf->mbox.lock);
1089 	return err;
1090 }
1091 
1092 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1093 {
1094 	struct otx2_flow *iter;
1095 	struct ethhdr *eth_hdr;
1096 	bool found = false;
1097 
1098 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1099 		if (iter->dmac_filter && iter->entry == 0) {
1100 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1101 			if (req == DMAC_ADDR_DEL) {
1102 				otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1103 						    0);
1104 				clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
1105 				found = true;
1106 			} else {
1107 				ether_addr_copy(eth_hdr->h_dest,
1108 						pfvf->netdev->dev_addr);
1109 				otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1110 			}
1111 			break;
1112 		}
1113 	}
1114 
1115 	if (found) {
1116 		list_del(&iter->list);
1117 		kfree(iter);
1118 		pfvf->flow_cfg->nr_flows--;
1119 	}
1120 }
1121 
1122 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1123 {
1124 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1125 	struct otx2_flow *flow;
1126 	int err;
1127 
1128 	if (location >= otx2_get_maxflows(flow_cfg))
1129 		return -EINVAL;
1130 
1131 	flow = otx2_find_flow(pfvf, location);
1132 	if (!flow)
1133 		return -ENOENT;
1134 
1135 	if (flow->dmac_filter) {
1136 		struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1137 
1138 		/* user not allowed to remove dmac filter with interface mac */
1139 		if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1140 			return -EPERM;
1141 
1142 		err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1143 					  flow->entry);
1144 		clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1145 		/* If all dmac filters are removed delete macfilter with
1146 		 * interface mac address and configure CGX/RPM block in
1147 		 * promiscuous mode
1148 		 */
1149 		if (bitmap_weight(&flow_cfg->dmacflt_bmap,
1150 				  flow_cfg->dmacflt_max_flows) == 1)
1151 			otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1152 	} else {
1153 		err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1154 	}
1155 
1156 	if (err)
1157 		return err;
1158 
1159 	list_del(&flow->list);
1160 	kfree(flow);
1161 	flow_cfg->nr_flows--;
1162 
1163 	return 0;
1164 }
1165 
1166 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1167 {
1168 	struct otx2_flow *flow, *tmp;
1169 	int err;
1170 
1171 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1172 		if (flow->rss_ctx_id != ctx_id)
1173 			continue;
1174 		err = otx2_remove_flow(pfvf, flow->location);
1175 		if (err)
1176 			netdev_warn(pfvf->netdev,
1177 				    "Can't delete the rule %d associated with this rss group err:%d",
1178 				    flow->location, err);
1179 	}
1180 }
1181 
1182 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1183 {
1184 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1185 	struct npc_delete_flow_req *req;
1186 	struct otx2_flow *iter, *tmp;
1187 	int err;
1188 
1189 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1190 		return 0;
1191 
1192 	if (!flow_cfg->max_flows)
1193 		return 0;
1194 
1195 	mutex_lock(&pfvf->mbox.lock);
1196 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1197 	if (!req) {
1198 		mutex_unlock(&pfvf->mbox.lock);
1199 		return -ENOMEM;
1200 	}
1201 
1202 	req->start = flow_cfg->flow_ent[0];
1203 	req->end   = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
1204 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1205 	mutex_unlock(&pfvf->mbox.lock);
1206 
1207 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1208 		list_del(&iter->list);
1209 		kfree(iter);
1210 		flow_cfg->nr_flows--;
1211 	}
1212 	return err;
1213 }
1214 
1215 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1216 {
1217 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1218 	struct npc_mcam_free_entry_req *req;
1219 	struct otx2_flow *iter, *tmp;
1220 	int err;
1221 
1222 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1223 		return 0;
1224 
1225 	/* remove all flows */
1226 	err = otx2_remove_flow_msg(pfvf, 0, true);
1227 	if (err)
1228 		return err;
1229 
1230 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1231 		list_del(&iter->list);
1232 		kfree(iter);
1233 		flow_cfg->nr_flows--;
1234 	}
1235 
1236 	mutex_lock(&pfvf->mbox.lock);
1237 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1238 	if (!req) {
1239 		mutex_unlock(&pfvf->mbox.lock);
1240 		return -ENOMEM;
1241 	}
1242 
1243 	req->all = 1;
1244 	/* Send message to AF to free MCAM entries */
1245 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1246 	if (err) {
1247 		mutex_unlock(&pfvf->mbox.lock);
1248 		return err;
1249 	}
1250 
1251 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1252 	mutex_unlock(&pfvf->mbox.lock);
1253 
1254 	return 0;
1255 }
1256 
1257 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1258 {
1259 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1260 	struct npc_install_flow_req *req;
1261 	int err;
1262 
1263 	mutex_lock(&pfvf->mbox.lock);
1264 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1265 	if (!req) {
1266 		mutex_unlock(&pfvf->mbox.lock);
1267 		return -ENOMEM;
1268 	}
1269 
1270 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1271 	req->intf = NIX_INTF_RX;
1272 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1273 	eth_broadcast_addr((u8 *)&req->mask.dmac);
1274 	req->channel = pfvf->hw.rx_chan_base;
1275 	req->op = NIX_RX_ACTION_DEFAULT;
1276 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1277 	req->vtag0_valid = true;
1278 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1279 
1280 	/* Send message to AF */
1281 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1282 	mutex_unlock(&pfvf->mbox.lock);
1283 	return err;
1284 }
1285 
1286 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1287 {
1288 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1289 	struct npc_delete_flow_req *req;
1290 	int err;
1291 
1292 	mutex_lock(&pfvf->mbox.lock);
1293 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1294 	if (!req) {
1295 		mutex_unlock(&pfvf->mbox.lock);
1296 		return -ENOMEM;
1297 	}
1298 
1299 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1300 	/* Send message to AF */
1301 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1302 	mutex_unlock(&pfvf->mbox.lock);
1303 	return err;
1304 }
1305 
1306 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1307 {
1308 	struct nix_vtag_config *req;
1309 	struct mbox_msghdr *rsp_hdr;
1310 	int err;
1311 
1312 	/* Dont have enough mcam entries */
1313 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1314 		return -ENOMEM;
1315 
1316 	if (enable) {
1317 		err = otx2_install_rxvlan_offload_flow(pf);
1318 		if (err)
1319 			return err;
1320 	} else {
1321 		err = otx2_delete_rxvlan_offload_flow(pf);
1322 		if (err)
1323 			return err;
1324 	}
1325 
1326 	mutex_lock(&pf->mbox.lock);
1327 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1328 	if (!req) {
1329 		mutex_unlock(&pf->mbox.lock);
1330 		return -ENOMEM;
1331 	}
1332 
1333 	/* config strip, capture and size */
1334 	req->vtag_size = VTAGSIZE_T4;
1335 	req->cfg_type = 1; /* rx vlan cfg */
1336 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1337 	req->rx.strip_vtag = enable;
1338 	req->rx.capture_vtag = enable;
1339 
1340 	err = otx2_sync_mbox_msg(&pf->mbox);
1341 	if (err) {
1342 		mutex_unlock(&pf->mbox.lock);
1343 		return err;
1344 	}
1345 
1346 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1347 	if (IS_ERR(rsp_hdr)) {
1348 		mutex_unlock(&pf->mbox.lock);
1349 		return PTR_ERR(rsp_hdr);
1350 	}
1351 
1352 	mutex_unlock(&pf->mbox.lock);
1353 	return rsp_hdr->rc;
1354 }
1355 
1356 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1357 {
1358 	struct otx2_flow *iter;
1359 	struct ethhdr *eth_hdr;
1360 
1361 	list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1362 		if (iter->dmac_filter) {
1363 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1364 			otx2_dmacflt_add(pf, eth_hdr->h_dest,
1365 					 iter->entry);
1366 		}
1367 	}
1368 }
1369 
1370 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1371 {
1372 	otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1373 }
1374