1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 
9 #include "otx2_common.h"
10 
11 #define OTX2_DEFAULT_ACTION	0x1
12 
13 struct otx2_flow {
14 	struct ethtool_rx_flow_spec flow_spec;
15 	struct list_head list;
16 	u32 location;
17 	u16 entry;
18 	bool is_vf;
19 	u8 rss_ctx_id;
20 	int vf;
21 	bool dmac_filter;
22 };
23 
24 enum dmac_req {
25 	DMAC_ADDR_UPDATE,
26 	DMAC_ADDR_DEL
27 };
28 
29 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
30 {
31 	devm_kfree(pfvf->dev, flow_cfg->flow_ent);
32 	flow_cfg->flow_ent = NULL;
33 	flow_cfg->ntuple_max_flows = 0;
34 	flow_cfg->tc_max_flows = 0;
35 }
36 
37 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
38 {
39 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
40 	struct npc_mcam_free_entry_req *req;
41 	int ent, err;
42 
43 	if (!flow_cfg->ntuple_max_flows)
44 		return 0;
45 
46 	mutex_lock(&pfvf->mbox.lock);
47 	for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
48 		req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
49 		if (!req)
50 			break;
51 
52 		req->entry = flow_cfg->flow_ent[ent];
53 
54 		/* Send message to AF to free MCAM entries */
55 		err = otx2_sync_mbox_msg(&pfvf->mbox);
56 		if (err)
57 			break;
58 	}
59 	mutex_unlock(&pfvf->mbox.lock);
60 	otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
61 	return 0;
62 }
63 
64 static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
65 {
66 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
67 	struct npc_mcam_alloc_entry_req *req;
68 	struct npc_mcam_alloc_entry_rsp *rsp;
69 	int ent, allocated = 0;
70 
71 	/* Free current ones and allocate new ones with requested count */
72 	otx2_free_ntuple_mcam_entries(pfvf);
73 
74 	if (!count)
75 		return 0;
76 
77 	flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
78 						sizeof(u16), GFP_KERNEL);
79 	if (!flow_cfg->flow_ent)
80 		return -ENOMEM;
81 
82 	mutex_lock(&pfvf->mbox.lock);
83 
84 	/* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
85 	 * can only be allocated.
86 	 */
87 	while (allocated < count) {
88 		req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
89 		if (!req)
90 			goto exit;
91 
92 		req->contig = false;
93 		req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
94 				NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
95 
96 		/* Allocate higher priority entries for PFs, so that VF's entries
97 		 * will be on top of PF.
98 		 */
99 		if (!is_otx2_vf(pfvf->pcifunc)) {
100 			req->priority = NPC_MCAM_HIGHER_PRIO;
101 			req->ref_entry = flow_cfg->def_ent[0];
102 		}
103 
104 		/* Send message to AF */
105 		if (otx2_sync_mbox_msg(&pfvf->mbox))
106 			goto exit;
107 
108 		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
109 			(&pfvf->mbox.mbox, 0, &req->hdr);
110 
111 		for (ent = 0; ent < rsp->count; ent++)
112 			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
113 
114 		allocated += rsp->count;
115 
116 		/* If this request is not fulfilled, no need to send
117 		 * further requests.
118 		 */
119 		if (rsp->count != req->count)
120 			break;
121 	}
122 
123 exit:
124 	mutex_unlock(&pfvf->mbox.lock);
125 
126 	flow_cfg->ntuple_offset = 0;
127 	flow_cfg->ntuple_max_flows = allocated;
128 	flow_cfg->tc_max_flows = allocated;
129 
130 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
131 	pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
132 
133 	if (allocated != count)
134 		netdev_info(pfvf->netdev,
135 			    "Unable to allocate %d MCAM entries, got only %d\n",
136 			    count, allocated);
137 	return allocated;
138 }
139 
140 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
141 {
142 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
143 	struct npc_mcam_alloc_entry_req *req;
144 	struct npc_mcam_alloc_entry_rsp *rsp;
145 	int vf_vlan_max_flows;
146 	int ent, count;
147 
148 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
149 	count = OTX2_MAX_UNICAST_FLOWS +
150 			OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
151 
152 	flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
153 					       sizeof(u16), GFP_KERNEL);
154 	if (!flow_cfg->def_ent)
155 		return -ENOMEM;
156 
157 	mutex_lock(&pfvf->mbox.lock);
158 
159 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
160 	if (!req) {
161 		mutex_unlock(&pfvf->mbox.lock);
162 		return -ENOMEM;
163 	}
164 
165 	req->contig = false;
166 	req->count = count;
167 
168 	/* Send message to AF */
169 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
170 		mutex_unlock(&pfvf->mbox.lock);
171 		return -EINVAL;
172 	}
173 
174 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
175 	       (&pfvf->mbox.mbox, 0, &req->hdr);
176 
177 	if (rsp->count != req->count) {
178 		netdev_info(pfvf->netdev,
179 			    "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
180 		mutex_unlock(&pfvf->mbox.lock);
181 		devm_kfree(pfvf->dev, flow_cfg->def_ent);
182 		return 0;
183 	}
184 
185 	for (ent = 0; ent < rsp->count; ent++)
186 		flow_cfg->def_ent[ent] = rsp->entry_list[ent];
187 
188 	flow_cfg->vf_vlan_offset = 0;
189 	flow_cfg->unicast_offset = vf_vlan_max_flows;
190 	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
191 					OTX2_MAX_UNICAST_FLOWS;
192 	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
193 	pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
194 	pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
195 
196 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
197 	mutex_unlock(&pfvf->mbox.lock);
198 
199 	/* Allocate entries for Ntuple filters */
200 	count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
201 	if (count <= 0) {
202 		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
203 		return 0;
204 	}
205 
206 	pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
207 
208 	return 0;
209 }
210 
211 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
212 {
213 	struct otx2_flow_config *flow_cfg;
214 	int count;
215 
216 	pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
217 				      sizeof(struct otx2_flow_config),
218 				      GFP_KERNEL);
219 	if (!pfvf->flow_cfg)
220 		return -ENOMEM;
221 
222 	flow_cfg = pfvf->flow_cfg;
223 	INIT_LIST_HEAD(&flow_cfg->flow_list);
224 	flow_cfg->ntuple_max_flows = 0;
225 
226 	count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
227 	if (count <= 0)
228 		return -ENOMEM;
229 
230 	return 0;
231 }
232 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
233 
234 int otx2_mcam_flow_init(struct otx2_nic *pf)
235 {
236 	int err;
237 
238 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
239 				    GFP_KERNEL);
240 	if (!pf->flow_cfg)
241 		return -ENOMEM;
242 
243 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
244 
245 	err = otx2_alloc_mcam_entries(pf);
246 	if (err)
247 		return err;
248 
249 	/* Check if MCAM entries are allocate or not */
250 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
251 		return 0;
252 
253 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
254 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
255 	if (!pf->mac_table)
256 		return -ENOMEM;
257 
258 	otx2_dmacflt_get_max_cnt(pf);
259 
260 	/* DMAC filters are not allocated */
261 	if (!pf->flow_cfg->dmacflt_max_flows)
262 		return 0;
263 
264 	pf->flow_cfg->bmap_to_dmacindex =
265 			devm_kzalloc(pf->dev, sizeof(u8) *
266 				     pf->flow_cfg->dmacflt_max_flows,
267 				     GFP_KERNEL);
268 
269 	if (!pf->flow_cfg->bmap_to_dmacindex)
270 		return -ENOMEM;
271 
272 	pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
273 
274 	return 0;
275 }
276 
277 void otx2_mcam_flow_del(struct otx2_nic *pf)
278 {
279 	otx2_destroy_mcam_flows(pf);
280 }
281 EXPORT_SYMBOL(otx2_mcam_flow_del);
282 
283 /*  On success adds mcam entry
284  *  On failure enable promisous mode
285  */
286 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
287 {
288 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
289 	struct npc_install_flow_req *req;
290 	int err, i;
291 
292 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
293 		return -ENOMEM;
294 
295 	/* dont have free mcam entries or uc list is greater than alloted */
296 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
297 		return -ENOMEM;
298 
299 	mutex_lock(&pf->mbox.lock);
300 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
301 	if (!req) {
302 		mutex_unlock(&pf->mbox.lock);
303 		return -ENOMEM;
304 	}
305 
306 	/* unicast offset starts with 32 0..31 for ntuple */
307 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
308 		if (pf->mac_table[i].inuse)
309 			continue;
310 		ether_addr_copy(pf->mac_table[i].addr, mac);
311 		pf->mac_table[i].inuse = true;
312 		pf->mac_table[i].mcam_entry =
313 			flow_cfg->def_ent[i + flow_cfg->unicast_offset];
314 		req->entry =  pf->mac_table[i].mcam_entry;
315 		break;
316 	}
317 
318 	ether_addr_copy(req->packet.dmac, mac);
319 	eth_broadcast_addr((u8 *)&req->mask.dmac);
320 	req->features = BIT_ULL(NPC_DMAC);
321 	req->channel = pf->hw.rx_chan_base;
322 	req->intf = NIX_INTF_RX;
323 	req->op = NIX_RX_ACTION_DEFAULT;
324 	req->set_cntr = 1;
325 
326 	err = otx2_sync_mbox_msg(&pf->mbox);
327 	mutex_unlock(&pf->mbox.lock);
328 
329 	return err;
330 }
331 
332 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
333 {
334 	struct otx2_nic *pf = netdev_priv(netdev);
335 
336 	if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
337 			  pf->flow_cfg->dmacflt_max_flows))
338 		netdev_warn(netdev,
339 			    "Add %pM to CGX/RPM DMAC filters list as well\n",
340 			    mac);
341 
342 	return otx2_do_add_macfilter(pf, mac);
343 }
344 
345 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
346 				       int *mcam_entry)
347 {
348 	int i;
349 
350 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
351 		if (!pf->mac_table[i].inuse)
352 			continue;
353 
354 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
355 			*mcam_entry = pf->mac_table[i].mcam_entry;
356 			pf->mac_table[i].inuse = false;
357 			return true;
358 		}
359 	}
360 	return false;
361 }
362 
363 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
364 {
365 	struct otx2_nic *pf = netdev_priv(netdev);
366 	struct npc_delete_flow_req *req;
367 	int err, mcam_entry;
368 
369 	/* check does mcam entry exists for given mac */
370 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
371 		return 0;
372 
373 	mutex_lock(&pf->mbox.lock);
374 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
375 	if (!req) {
376 		mutex_unlock(&pf->mbox.lock);
377 		return -ENOMEM;
378 	}
379 	req->entry = mcam_entry;
380 	/* Send message to AF */
381 	err = otx2_sync_mbox_msg(&pf->mbox);
382 	mutex_unlock(&pf->mbox.lock);
383 
384 	return err;
385 }
386 
387 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
388 {
389 	struct otx2_flow *iter;
390 
391 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
392 		if (iter->location == location)
393 			return iter;
394 	}
395 
396 	return NULL;
397 }
398 
399 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
400 {
401 	struct list_head *head = &pfvf->flow_cfg->flow_list;
402 	struct otx2_flow *iter;
403 
404 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
405 		if (iter->location > flow->location)
406 			break;
407 		head = &iter->list;
408 	}
409 
410 	list_add(&flow->list, head);
411 }
412 
413 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
414 {
415 	if (!flow_cfg)
416 		return 0;
417 
418 	if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
419 	    bitmap_weight(&flow_cfg->dmacflt_bmap,
420 			  flow_cfg->dmacflt_max_flows))
421 		return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
422 	else
423 		return flow_cfg->ntuple_max_flows;
424 }
425 EXPORT_SYMBOL(otx2_get_maxflows);
426 
427 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
428 		  u32 location)
429 {
430 	struct otx2_flow *iter;
431 
432 	if (location >= otx2_get_maxflows(pfvf->flow_cfg))
433 		return -EINVAL;
434 
435 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
436 		if (iter->location == location) {
437 			nfc->fs = iter->flow_spec;
438 			nfc->rss_context = iter->rss_ctx_id;
439 			return 0;
440 		}
441 	}
442 
443 	return -ENOENT;
444 }
445 
446 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
447 		       u32 *rule_locs)
448 {
449 	u32 rule_cnt = nfc->rule_cnt;
450 	u32 location = 0;
451 	int idx = 0;
452 	int err = 0;
453 
454 	nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
455 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
456 		err = otx2_get_flow(pfvf, nfc, location);
457 		if (!err)
458 			rule_locs[idx++] = location;
459 		location++;
460 	}
461 	nfc->rule_cnt = rule_cnt;
462 
463 	return err;
464 }
465 
466 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
467 				  struct npc_install_flow_req *req,
468 				  u32 flow_type)
469 {
470 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
471 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
472 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
473 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
474 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
475 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
476 	struct flow_msg *pmask = &req->mask;
477 	struct flow_msg *pkt = &req->packet;
478 
479 	switch (flow_type) {
480 	case IP_USER_FLOW:
481 		if (ipv4_usr_mask->ip4src) {
482 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
483 			       sizeof(pkt->ip4src));
484 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
485 			       sizeof(pmask->ip4src));
486 			req->features |= BIT_ULL(NPC_SIP_IPV4);
487 		}
488 		if (ipv4_usr_mask->ip4dst) {
489 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
490 			       sizeof(pkt->ip4dst));
491 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
492 			       sizeof(pmask->ip4dst));
493 			req->features |= BIT_ULL(NPC_DIP_IPV4);
494 		}
495 		if (ipv4_usr_mask->tos) {
496 			pkt->tos = ipv4_usr_hdr->tos;
497 			pmask->tos = ipv4_usr_mask->tos;
498 			req->features |= BIT_ULL(NPC_TOS);
499 		}
500 		if (ipv4_usr_mask->proto) {
501 			switch (ipv4_usr_hdr->proto) {
502 			case IPPROTO_ICMP:
503 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
504 				break;
505 			case IPPROTO_TCP:
506 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
507 				break;
508 			case IPPROTO_UDP:
509 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
510 				break;
511 			case IPPROTO_SCTP:
512 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
513 				break;
514 			case IPPROTO_AH:
515 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
516 				break;
517 			case IPPROTO_ESP:
518 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
519 				break;
520 			default:
521 				return -EOPNOTSUPP;
522 			}
523 		}
524 		pkt->etype = cpu_to_be16(ETH_P_IP);
525 		pmask->etype = cpu_to_be16(0xFFFF);
526 		req->features |= BIT_ULL(NPC_ETYPE);
527 		break;
528 	case TCP_V4_FLOW:
529 	case UDP_V4_FLOW:
530 	case SCTP_V4_FLOW:
531 		pkt->etype = cpu_to_be16(ETH_P_IP);
532 		pmask->etype = cpu_to_be16(0xFFFF);
533 		req->features |= BIT_ULL(NPC_ETYPE);
534 		if (ipv4_l4_mask->ip4src) {
535 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
536 			       sizeof(pkt->ip4src));
537 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
538 			       sizeof(pmask->ip4src));
539 			req->features |= BIT_ULL(NPC_SIP_IPV4);
540 		}
541 		if (ipv4_l4_mask->ip4dst) {
542 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
543 			       sizeof(pkt->ip4dst));
544 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
545 			       sizeof(pmask->ip4dst));
546 			req->features |= BIT_ULL(NPC_DIP_IPV4);
547 		}
548 		if (ipv4_l4_mask->tos) {
549 			pkt->tos = ipv4_l4_hdr->tos;
550 			pmask->tos = ipv4_l4_mask->tos;
551 			req->features |= BIT_ULL(NPC_TOS);
552 		}
553 		if (ipv4_l4_mask->psrc) {
554 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
555 			       sizeof(pkt->sport));
556 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
557 			       sizeof(pmask->sport));
558 			if (flow_type == UDP_V4_FLOW)
559 				req->features |= BIT_ULL(NPC_SPORT_UDP);
560 			else if (flow_type == TCP_V4_FLOW)
561 				req->features |= BIT_ULL(NPC_SPORT_TCP);
562 			else
563 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
564 		}
565 		if (ipv4_l4_mask->pdst) {
566 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
567 			       sizeof(pkt->dport));
568 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
569 			       sizeof(pmask->dport));
570 			if (flow_type == UDP_V4_FLOW)
571 				req->features |= BIT_ULL(NPC_DPORT_UDP);
572 			else if (flow_type == TCP_V4_FLOW)
573 				req->features |= BIT_ULL(NPC_DPORT_TCP);
574 			else
575 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
576 		}
577 		if (flow_type == UDP_V4_FLOW)
578 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
579 		else if (flow_type == TCP_V4_FLOW)
580 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
581 		else
582 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
583 		break;
584 	case AH_V4_FLOW:
585 	case ESP_V4_FLOW:
586 		pkt->etype = cpu_to_be16(ETH_P_IP);
587 		pmask->etype = cpu_to_be16(0xFFFF);
588 		req->features |= BIT_ULL(NPC_ETYPE);
589 		if (ah_esp_mask->ip4src) {
590 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
591 			       sizeof(pkt->ip4src));
592 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
593 			       sizeof(pmask->ip4src));
594 			req->features |= BIT_ULL(NPC_SIP_IPV4);
595 		}
596 		if (ah_esp_mask->ip4dst) {
597 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
598 			       sizeof(pkt->ip4dst));
599 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
600 			       sizeof(pmask->ip4dst));
601 			req->features |= BIT_ULL(NPC_DIP_IPV4);
602 		}
603 		if (ah_esp_mask->tos) {
604 			pkt->tos = ah_esp_hdr->tos;
605 			pmask->tos = ah_esp_mask->tos;
606 			req->features |= BIT_ULL(NPC_TOS);
607 		}
608 
609 		/* NPC profile doesn't extract AH/ESP header fields */
610 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
611 			return -EOPNOTSUPP;
612 
613 		if (flow_type == AH_V4_FLOW)
614 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
615 		else
616 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
617 		break;
618 	default:
619 		break;
620 	}
621 
622 	return 0;
623 }
624 
625 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
626 				  struct npc_install_flow_req *req,
627 				  u32 flow_type)
628 {
629 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
630 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
631 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
632 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
633 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
634 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
635 	struct flow_msg *pmask = &req->mask;
636 	struct flow_msg *pkt = &req->packet;
637 
638 	switch (flow_type) {
639 	case IPV6_USER_FLOW:
640 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
641 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
642 			       sizeof(pkt->ip6src));
643 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
644 			       sizeof(pmask->ip6src));
645 			req->features |= BIT_ULL(NPC_SIP_IPV6);
646 		}
647 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
648 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
649 			       sizeof(pkt->ip6dst));
650 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
651 			       sizeof(pmask->ip6dst));
652 			req->features |= BIT_ULL(NPC_DIP_IPV6);
653 		}
654 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
655 		pmask->etype = cpu_to_be16(0xFFFF);
656 		req->features |= BIT_ULL(NPC_ETYPE);
657 		break;
658 	case TCP_V6_FLOW:
659 	case UDP_V6_FLOW:
660 	case SCTP_V6_FLOW:
661 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
662 		pmask->etype = cpu_to_be16(0xFFFF);
663 		req->features |= BIT_ULL(NPC_ETYPE);
664 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
665 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
666 			       sizeof(pkt->ip6src));
667 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
668 			       sizeof(pmask->ip6src));
669 			req->features |= BIT_ULL(NPC_SIP_IPV6);
670 		}
671 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
672 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
673 			       sizeof(pkt->ip6dst));
674 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
675 			       sizeof(pmask->ip6dst));
676 			req->features |= BIT_ULL(NPC_DIP_IPV6);
677 		}
678 		if (ipv6_l4_mask->psrc) {
679 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
680 			       sizeof(pkt->sport));
681 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
682 			       sizeof(pmask->sport));
683 			if (flow_type == UDP_V6_FLOW)
684 				req->features |= BIT_ULL(NPC_SPORT_UDP);
685 			else if (flow_type == TCP_V6_FLOW)
686 				req->features |= BIT_ULL(NPC_SPORT_TCP);
687 			else
688 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
689 		}
690 		if (ipv6_l4_mask->pdst) {
691 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
692 			       sizeof(pkt->dport));
693 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
694 			       sizeof(pmask->dport));
695 			if (flow_type == UDP_V6_FLOW)
696 				req->features |= BIT_ULL(NPC_DPORT_UDP);
697 			else if (flow_type == TCP_V6_FLOW)
698 				req->features |= BIT_ULL(NPC_DPORT_TCP);
699 			else
700 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
701 		}
702 		if (flow_type == UDP_V6_FLOW)
703 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
704 		else if (flow_type == TCP_V6_FLOW)
705 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
706 		else
707 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
708 		break;
709 	case AH_V6_FLOW:
710 	case ESP_V6_FLOW:
711 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
712 		pmask->etype = cpu_to_be16(0xFFFF);
713 		req->features |= BIT_ULL(NPC_ETYPE);
714 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
715 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
716 			       sizeof(pkt->ip6src));
717 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
718 			       sizeof(pmask->ip6src));
719 			req->features |= BIT_ULL(NPC_SIP_IPV6);
720 		}
721 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
722 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
723 			       sizeof(pkt->ip6dst));
724 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
725 			       sizeof(pmask->ip6dst));
726 			req->features |= BIT_ULL(NPC_DIP_IPV6);
727 		}
728 
729 		/* NPC profile doesn't extract AH/ESP header fields */
730 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
731 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
732 			return -EOPNOTSUPP;
733 
734 		if (flow_type == AH_V6_FLOW)
735 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
736 		else
737 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
738 		break;
739 	default:
740 		break;
741 	}
742 
743 	return 0;
744 }
745 
746 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
747 			      struct npc_install_flow_req *req)
748 {
749 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
750 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
751 	struct flow_msg *pmask = &req->mask;
752 	struct flow_msg *pkt = &req->packet;
753 	u32 flow_type;
754 	int ret;
755 
756 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
757 	switch (flow_type) {
758 	/* bits not set in mask are don't care */
759 	case ETHER_FLOW:
760 		if (!is_zero_ether_addr(eth_mask->h_source)) {
761 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
762 			ether_addr_copy(pmask->smac, eth_mask->h_source);
763 			req->features |= BIT_ULL(NPC_SMAC);
764 		}
765 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
766 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
767 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
768 			req->features |= BIT_ULL(NPC_DMAC);
769 		}
770 		if (eth_hdr->h_proto) {
771 			memcpy(&pkt->etype, &eth_hdr->h_proto,
772 			       sizeof(pkt->etype));
773 			memcpy(&pmask->etype, &eth_mask->h_proto,
774 			       sizeof(pmask->etype));
775 			req->features |= BIT_ULL(NPC_ETYPE);
776 		}
777 		break;
778 	case IP_USER_FLOW:
779 	case TCP_V4_FLOW:
780 	case UDP_V4_FLOW:
781 	case SCTP_V4_FLOW:
782 	case AH_V4_FLOW:
783 	case ESP_V4_FLOW:
784 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
785 		if (ret)
786 			return ret;
787 		break;
788 	case IPV6_USER_FLOW:
789 	case TCP_V6_FLOW:
790 	case UDP_V6_FLOW:
791 	case SCTP_V6_FLOW:
792 	case AH_V6_FLOW:
793 	case ESP_V6_FLOW:
794 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
795 		if (ret)
796 			return ret;
797 		break;
798 	default:
799 		return -EOPNOTSUPP;
800 	}
801 	if (fsp->flow_type & FLOW_EXT) {
802 		if (fsp->m_ext.vlan_etype)
803 			return -EINVAL;
804 		if (fsp->m_ext.vlan_tci) {
805 			if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
806 				return -EINVAL;
807 			if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
808 				return -EINVAL;
809 
810 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
811 			       sizeof(pkt->vlan_tci));
812 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
813 			       sizeof(pmask->vlan_tci));
814 			req->features |= BIT_ULL(NPC_OUTER_VID);
815 		}
816 
817 		/* Not Drop/Direct to queue but use action in default entry */
818 		if (fsp->m_ext.data[1] &&
819 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
820 			req->op = NIX_RX_ACTION_DEFAULT;
821 	}
822 
823 	if (fsp->flow_type & FLOW_MAC_EXT &&
824 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
825 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
826 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
827 		req->features |= BIT_ULL(NPC_DMAC);
828 	}
829 
830 	if (!req->features)
831 		return -EOPNOTSUPP;
832 
833 	return 0;
834 }
835 
836 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
837 					struct ethtool_rx_flow_spec *fsp)
838 {
839 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
840 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
841 	u64 ring_cookie = fsp->ring_cookie;
842 	u32 flow_type;
843 
844 	if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
845 		return false;
846 
847 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
848 
849 	/* CGX/RPM block dmac filtering configured for white listing
850 	 * check for action other than DROP
851 	 */
852 	if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
853 	    !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
854 		if (is_zero_ether_addr(eth_mask->h_dest) &&
855 		    is_valid_ether_addr(eth_hdr->h_dest))
856 			return true;
857 	}
858 
859 	return false;
860 }
861 
862 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
863 {
864 	u64 ring_cookie = flow->flow_spec.ring_cookie;
865 	struct npc_install_flow_req *req;
866 	int err, vf = 0;
867 
868 	mutex_lock(&pfvf->mbox.lock);
869 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
870 	if (!req) {
871 		mutex_unlock(&pfvf->mbox.lock);
872 		return -ENOMEM;
873 	}
874 
875 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
876 	if (err) {
877 		/* free the allocated msg above */
878 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
879 		mutex_unlock(&pfvf->mbox.lock);
880 		return err;
881 	}
882 
883 	req->entry = flow->entry;
884 	req->intf = NIX_INTF_RX;
885 	req->set_cntr = 1;
886 	req->channel = pfvf->hw.rx_chan_base;
887 	if (ring_cookie == RX_CLS_FLOW_DISC) {
888 		req->op = NIX_RX_ACTIONOP_DROP;
889 	} else {
890 		/* change to unicast only if action of default entry is not
891 		 * requested by user
892 		 */
893 		if (flow->flow_spec.flow_type & FLOW_RSS) {
894 			req->op = NIX_RX_ACTIONOP_RSS;
895 			req->index = flow->rss_ctx_id;
896 		} else {
897 			req->op = NIX_RX_ACTIONOP_UCAST;
898 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
899 		}
900 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
901 		if (vf > pci_num_vf(pfvf->pdev)) {
902 			mutex_unlock(&pfvf->mbox.lock);
903 			return -EINVAL;
904 		}
905 	}
906 
907 	/* ethtool ring_cookie has (VF + 1) for VF */
908 	if (vf) {
909 		req->vf = vf;
910 		flow->is_vf = true;
911 		flow->vf = vf;
912 	}
913 
914 	/* Send message to AF */
915 	err = otx2_sync_mbox_msg(&pfvf->mbox);
916 	mutex_unlock(&pfvf->mbox.lock);
917 	return err;
918 }
919 
920 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
921 				    struct otx2_flow *flow)
922 {
923 	struct otx2_flow *pf_mac;
924 	struct ethhdr *eth_hdr;
925 
926 	pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
927 	if (!pf_mac)
928 		return -ENOMEM;
929 
930 	pf_mac->entry = 0;
931 	pf_mac->dmac_filter = true;
932 	pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
933 	memcpy(&pf_mac->flow_spec, &flow->flow_spec,
934 	       sizeof(struct ethtool_rx_flow_spec));
935 	pf_mac->flow_spec.location = pf_mac->location;
936 
937 	/* Copy PF mac address */
938 	eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
939 	ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
940 
941 	/* Install DMAC filter with PF mac address */
942 	otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
943 
944 	otx2_add_flow_to_list(pfvf, pf_mac);
945 	pfvf->flow_cfg->nr_flows++;
946 	set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
947 
948 	return 0;
949 }
950 
951 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
952 {
953 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
954 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
955 	struct otx2_flow *flow;
956 	struct ethhdr *eth_hdr;
957 	bool new = false;
958 	int err = 0;
959 	u32 ring;
960 
961 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
962 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
963 		return -ENOMEM;
964 
965 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
966 		return -EINVAL;
967 
968 	if (fsp->location >= otx2_get_maxflows(flow_cfg))
969 		return -EINVAL;
970 
971 	flow = otx2_find_flow(pfvf, fsp->location);
972 	if (!flow) {
973 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
974 		if (!flow)
975 			return -ENOMEM;
976 		flow->location = fsp->location;
977 		new = true;
978 	}
979 	/* struct copy */
980 	flow->flow_spec = *fsp;
981 
982 	if (fsp->flow_type & FLOW_RSS)
983 		flow->rss_ctx_id = nfc->rss_context;
984 
985 	if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
986 		eth_hdr = &flow->flow_spec.h_u.ether_spec;
987 
988 		/* Sync dmac filter table with updated fields */
989 		if (flow->dmac_filter)
990 			return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
991 						   flow->entry);
992 
993 		if (bitmap_full(&flow_cfg->dmacflt_bmap,
994 				flow_cfg->dmacflt_max_flows)) {
995 			netdev_warn(pfvf->netdev,
996 				    "Can't insert the rule %d as max allowed dmac filters are %d\n",
997 				    flow->location +
998 				    flow_cfg->dmacflt_max_flows,
999 				    flow_cfg->dmacflt_max_flows);
1000 			err = -EINVAL;
1001 			if (new)
1002 				kfree(flow);
1003 			return err;
1004 		}
1005 
1006 		/* Install PF mac address to DMAC filter list */
1007 		if (!test_bit(0, &flow_cfg->dmacflt_bmap))
1008 			otx2_add_flow_with_pfmac(pfvf, flow);
1009 
1010 		flow->dmac_filter = true;
1011 		flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
1012 						  flow_cfg->dmacflt_max_flows);
1013 		fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
1014 		flow->flow_spec.location = fsp->location;
1015 		flow->location = fsp->location;
1016 
1017 		set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1018 		otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1019 
1020 	} else {
1021 		if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
1022 			netdev_warn(pfvf->netdev,
1023 				    "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1024 				    flow->location,
1025 				    flow_cfg->ntuple_max_flows - 1);
1026 			err = -EINVAL;
1027 		} else {
1028 			flow->entry = flow_cfg->flow_ent[flow->location];
1029 			err = otx2_add_flow_msg(pfvf, flow);
1030 		}
1031 	}
1032 
1033 	if (err) {
1034 		if (err == MBOX_MSG_INVALID)
1035 			err = -EINVAL;
1036 		if (new)
1037 			kfree(flow);
1038 		return err;
1039 	}
1040 
1041 	/* add the new flow installed to list */
1042 	if (new) {
1043 		otx2_add_flow_to_list(pfvf, flow);
1044 		flow_cfg->nr_flows++;
1045 	}
1046 
1047 	return 0;
1048 }
1049 
1050 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1051 {
1052 	struct npc_delete_flow_req *req;
1053 	int err;
1054 
1055 	mutex_lock(&pfvf->mbox.lock);
1056 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1057 	if (!req) {
1058 		mutex_unlock(&pfvf->mbox.lock);
1059 		return -ENOMEM;
1060 	}
1061 
1062 	req->entry = entry;
1063 	if (all)
1064 		req->all = 1;
1065 
1066 	/* Send message to AF */
1067 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1068 	mutex_unlock(&pfvf->mbox.lock);
1069 	return err;
1070 }
1071 
1072 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1073 {
1074 	struct otx2_flow *iter;
1075 	struct ethhdr *eth_hdr;
1076 	bool found = false;
1077 
1078 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1079 		if (iter->dmac_filter && iter->entry == 0) {
1080 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1081 			if (req == DMAC_ADDR_DEL) {
1082 				otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1083 						    0);
1084 				clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
1085 				found = true;
1086 			} else {
1087 				ether_addr_copy(eth_hdr->h_dest,
1088 						pfvf->netdev->dev_addr);
1089 				otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1090 			}
1091 			break;
1092 		}
1093 	}
1094 
1095 	if (found) {
1096 		list_del(&iter->list);
1097 		kfree(iter);
1098 		pfvf->flow_cfg->nr_flows--;
1099 	}
1100 }
1101 
1102 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1103 {
1104 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1105 	struct otx2_flow *flow;
1106 	int err;
1107 
1108 	if (location >= otx2_get_maxflows(flow_cfg))
1109 		return -EINVAL;
1110 
1111 	flow = otx2_find_flow(pfvf, location);
1112 	if (!flow)
1113 		return -ENOENT;
1114 
1115 	if (flow->dmac_filter) {
1116 		struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1117 
1118 		/* user not allowed to remove dmac filter with interface mac */
1119 		if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1120 			return -EPERM;
1121 
1122 		err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1123 					  flow->entry);
1124 		clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1125 		/* If all dmac filters are removed delete macfilter with
1126 		 * interface mac address and configure CGX/RPM block in
1127 		 * promiscuous mode
1128 		 */
1129 		if (bitmap_weight(&flow_cfg->dmacflt_bmap,
1130 				  flow_cfg->dmacflt_max_flows) == 1)
1131 			otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1132 	} else {
1133 		err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1134 	}
1135 
1136 	if (err)
1137 		return err;
1138 
1139 	list_del(&flow->list);
1140 	kfree(flow);
1141 	flow_cfg->nr_flows--;
1142 
1143 	return 0;
1144 }
1145 
1146 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1147 {
1148 	struct otx2_flow *flow, *tmp;
1149 	int err;
1150 
1151 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1152 		if (flow->rss_ctx_id != ctx_id)
1153 			continue;
1154 		err = otx2_remove_flow(pfvf, flow->location);
1155 		if (err)
1156 			netdev_warn(pfvf->netdev,
1157 				    "Can't delete the rule %d associated with this rss group err:%d",
1158 				    flow->location, err);
1159 	}
1160 }
1161 
1162 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1163 {
1164 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1165 	struct npc_delete_flow_req *req;
1166 	struct otx2_flow *iter, *tmp;
1167 	int err;
1168 
1169 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1170 		return 0;
1171 
1172 	mutex_lock(&pfvf->mbox.lock);
1173 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1174 	if (!req) {
1175 		mutex_unlock(&pfvf->mbox.lock);
1176 		return -ENOMEM;
1177 	}
1178 
1179 	req->start = flow_cfg->flow_ent[0];
1180 	req->end   = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
1181 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1182 	mutex_unlock(&pfvf->mbox.lock);
1183 
1184 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1185 		list_del(&iter->list);
1186 		kfree(iter);
1187 		flow_cfg->nr_flows--;
1188 	}
1189 	return err;
1190 }
1191 
1192 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1193 {
1194 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1195 	struct npc_mcam_free_entry_req *req;
1196 	struct otx2_flow *iter, *tmp;
1197 	int err;
1198 
1199 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1200 		return 0;
1201 
1202 	/* remove all flows */
1203 	err = otx2_remove_flow_msg(pfvf, 0, true);
1204 	if (err)
1205 		return err;
1206 
1207 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1208 		list_del(&iter->list);
1209 		kfree(iter);
1210 		flow_cfg->nr_flows--;
1211 	}
1212 
1213 	mutex_lock(&pfvf->mbox.lock);
1214 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1215 	if (!req) {
1216 		mutex_unlock(&pfvf->mbox.lock);
1217 		return -ENOMEM;
1218 	}
1219 
1220 	req->all = 1;
1221 	/* Send message to AF to free MCAM entries */
1222 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1223 	if (err) {
1224 		mutex_unlock(&pfvf->mbox.lock);
1225 		return err;
1226 	}
1227 
1228 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1229 	mutex_unlock(&pfvf->mbox.lock);
1230 
1231 	return 0;
1232 }
1233 
1234 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1235 {
1236 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1237 	struct npc_install_flow_req *req;
1238 	int err;
1239 
1240 	mutex_lock(&pfvf->mbox.lock);
1241 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1242 	if (!req) {
1243 		mutex_unlock(&pfvf->mbox.lock);
1244 		return -ENOMEM;
1245 	}
1246 
1247 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1248 	req->intf = NIX_INTF_RX;
1249 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1250 	eth_broadcast_addr((u8 *)&req->mask.dmac);
1251 	req->channel = pfvf->hw.rx_chan_base;
1252 	req->op = NIX_RX_ACTION_DEFAULT;
1253 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1254 	req->vtag0_valid = true;
1255 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1256 
1257 	/* Send message to AF */
1258 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1259 	mutex_unlock(&pfvf->mbox.lock);
1260 	return err;
1261 }
1262 
1263 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1264 {
1265 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1266 	struct npc_delete_flow_req *req;
1267 	int err;
1268 
1269 	mutex_lock(&pfvf->mbox.lock);
1270 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1271 	if (!req) {
1272 		mutex_unlock(&pfvf->mbox.lock);
1273 		return -ENOMEM;
1274 	}
1275 
1276 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1277 	/* Send message to AF */
1278 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1279 	mutex_unlock(&pfvf->mbox.lock);
1280 	return err;
1281 }
1282 
1283 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1284 {
1285 	struct nix_vtag_config *req;
1286 	struct mbox_msghdr *rsp_hdr;
1287 	int err;
1288 
1289 	/* Dont have enough mcam entries */
1290 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1291 		return -ENOMEM;
1292 
1293 	if (enable) {
1294 		err = otx2_install_rxvlan_offload_flow(pf);
1295 		if (err)
1296 			return err;
1297 	} else {
1298 		err = otx2_delete_rxvlan_offload_flow(pf);
1299 		if (err)
1300 			return err;
1301 	}
1302 
1303 	mutex_lock(&pf->mbox.lock);
1304 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1305 	if (!req) {
1306 		mutex_unlock(&pf->mbox.lock);
1307 		return -ENOMEM;
1308 	}
1309 
1310 	/* config strip, capture and size */
1311 	req->vtag_size = VTAGSIZE_T4;
1312 	req->cfg_type = 1; /* rx vlan cfg */
1313 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1314 	req->rx.strip_vtag = enable;
1315 	req->rx.capture_vtag = enable;
1316 
1317 	err = otx2_sync_mbox_msg(&pf->mbox);
1318 	if (err) {
1319 		mutex_unlock(&pf->mbox.lock);
1320 		return err;
1321 	}
1322 
1323 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1324 	if (IS_ERR(rsp_hdr)) {
1325 		mutex_unlock(&pf->mbox.lock);
1326 		return PTR_ERR(rsp_hdr);
1327 	}
1328 
1329 	mutex_unlock(&pf->mbox.lock);
1330 	return rsp_hdr->rc;
1331 }
1332 
1333 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1334 {
1335 	struct otx2_flow *iter;
1336 	struct ethhdr *eth_hdr;
1337 
1338 	list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1339 		if (iter->dmac_filter) {
1340 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1341 			otx2_dmacflt_add(pf, eth_hdr->h_dest,
1342 					 iter->entry);
1343 		}
1344 	}
1345 }
1346 
1347 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1348 {
1349 	otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1350 }
1351