1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 #include <linux/sort.h>
9 
10 #include "otx2_common.h"
11 
12 #define OTX2_DEFAULT_ACTION	0x1
13 
14 static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
15 
16 struct otx2_flow {
17 	struct ethtool_rx_flow_spec flow_spec;
18 	struct list_head list;
19 	u32 location;
20 	u16 entry;
21 	bool is_vf;
22 	u8 rss_ctx_id;
23 	int vf;
24 	bool dmac_filter;
25 };
26 
27 enum dmac_req {
28 	DMAC_ADDR_UPDATE,
29 	DMAC_ADDR_DEL
30 };
31 
32 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
33 {
34 	devm_kfree(pfvf->dev, flow_cfg->flow_ent);
35 	flow_cfg->flow_ent = NULL;
36 	flow_cfg->max_flows = 0;
37 }
38 
39 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
40 {
41 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
42 	struct npc_mcam_free_entry_req *req;
43 	int ent, err;
44 
45 	if (!flow_cfg->max_flows)
46 		return 0;
47 
48 	mutex_lock(&pfvf->mbox.lock);
49 	for (ent = 0; ent < flow_cfg->max_flows; ent++) {
50 		req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
51 		if (!req)
52 			break;
53 
54 		req->entry = flow_cfg->flow_ent[ent];
55 
56 		/* Send message to AF to free MCAM entries */
57 		err = otx2_sync_mbox_msg(&pfvf->mbox);
58 		if (err)
59 			break;
60 	}
61 	mutex_unlock(&pfvf->mbox.lock);
62 	otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
63 	return 0;
64 }
65 
66 static int mcam_entry_cmp(const void *a, const void *b)
67 {
68 	return *(u16 *)a - *(u16 *)b;
69 }
70 
71 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
72 {
73 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
74 	struct npc_mcam_alloc_entry_req *req;
75 	struct npc_mcam_alloc_entry_rsp *rsp;
76 	int ent, allocated = 0;
77 
78 	/* Free current ones and allocate new ones with requested count */
79 	otx2_free_ntuple_mcam_entries(pfvf);
80 
81 	if (!count)
82 		return 0;
83 
84 	flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
85 						sizeof(u16), GFP_KERNEL);
86 	if (!flow_cfg->flow_ent) {
87 		netdev_err(pfvf->netdev,
88 			   "%s: Unable to allocate memory for flow entries\n",
89 			    __func__);
90 		return -ENOMEM;
91 	}
92 
93 	mutex_lock(&pfvf->mbox.lock);
94 
95 	/* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
96 	 * can only be allocated.
97 	 */
98 	while (allocated < count) {
99 		req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
100 		if (!req)
101 			goto exit;
102 
103 		req->contig = false;
104 		req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
105 				NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
106 
107 		/* Allocate higher priority entries for PFs, so that VF's entries
108 		 * will be on top of PF.
109 		 */
110 		if (!is_otx2_vf(pfvf->pcifunc)) {
111 			req->priority = NPC_MCAM_HIGHER_PRIO;
112 			req->ref_entry = flow_cfg->def_ent[0];
113 		}
114 
115 		/* Send message to AF */
116 		if (otx2_sync_mbox_msg(&pfvf->mbox))
117 			goto exit;
118 
119 		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
120 			(&pfvf->mbox.mbox, 0, &req->hdr);
121 
122 		for (ent = 0; ent < rsp->count; ent++)
123 			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
124 
125 		allocated += rsp->count;
126 
127 		/* If this request is not fulfilled, no need to send
128 		 * further requests.
129 		 */
130 		if (rsp->count != req->count)
131 			break;
132 	}
133 
134 	/* Multiple MCAM entry alloc requests could result in non-sequential
135 	 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
136 	 * otherwise user installed ntuple filter index and MCAM entry index will
137 	 * not be in sync.
138 	 */
139 	if (allocated)
140 		sort(&flow_cfg->flow_ent[0], allocated,
141 		     sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
142 
143 exit:
144 	mutex_unlock(&pfvf->mbox.lock);
145 
146 	flow_cfg->max_flows = allocated;
147 
148 	if (allocated) {
149 		pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
150 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
151 	}
152 
153 	if (allocated != count)
154 		netdev_info(pfvf->netdev,
155 			    "Unable to allocate %d MCAM entries, got only %d\n",
156 			    count, allocated);
157 	return allocated;
158 }
159 EXPORT_SYMBOL(otx2_alloc_mcam_entries);
160 
161 static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
162 {
163 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
164 	struct npc_mcam_alloc_entry_req *req;
165 	struct npc_mcam_alloc_entry_rsp *rsp;
166 	int vf_vlan_max_flows;
167 	int ent, count;
168 
169 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
170 	count = OTX2_MAX_UNICAST_FLOWS +
171 			OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
172 
173 	flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
174 					       sizeof(u16), GFP_KERNEL);
175 	if (!flow_cfg->def_ent)
176 		return -ENOMEM;
177 
178 	mutex_lock(&pfvf->mbox.lock);
179 
180 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
181 	if (!req) {
182 		mutex_unlock(&pfvf->mbox.lock);
183 		return -ENOMEM;
184 	}
185 
186 	req->contig = false;
187 	req->count = count;
188 
189 	/* Send message to AF */
190 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
191 		mutex_unlock(&pfvf->mbox.lock);
192 		return -EINVAL;
193 	}
194 
195 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
196 	       (&pfvf->mbox.mbox, 0, &req->hdr);
197 
198 	if (rsp->count != req->count) {
199 		netdev_info(pfvf->netdev,
200 			    "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
201 		mutex_unlock(&pfvf->mbox.lock);
202 		devm_kfree(pfvf->dev, flow_cfg->def_ent);
203 		return 0;
204 	}
205 
206 	for (ent = 0; ent < rsp->count; ent++)
207 		flow_cfg->def_ent[ent] = rsp->entry_list[ent];
208 
209 	flow_cfg->vf_vlan_offset = 0;
210 	flow_cfg->unicast_offset = vf_vlan_max_flows;
211 	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
212 					OTX2_MAX_UNICAST_FLOWS;
213 	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
214 	pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
215 	pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
216 
217 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
218 	mutex_unlock(&pfvf->mbox.lock);
219 
220 	/* Allocate entries for Ntuple filters */
221 	count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
222 	if (count <= 0) {
223 		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
224 		return 0;
225 	}
226 
227 	pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
228 
229 	return 0;
230 }
231 
232 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
233 {
234 	struct otx2_flow_config *flow_cfg;
235 
236 	pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
237 				      sizeof(struct otx2_flow_config),
238 				      GFP_KERNEL);
239 	if (!pfvf->flow_cfg)
240 		return -ENOMEM;
241 
242 	flow_cfg = pfvf->flow_cfg;
243 	INIT_LIST_HEAD(&flow_cfg->flow_list);
244 	flow_cfg->max_flows = 0;
245 
246 	return 0;
247 }
248 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
249 
250 int otx2_mcam_flow_init(struct otx2_nic *pf)
251 {
252 	int err;
253 
254 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
255 				    GFP_KERNEL);
256 	if (!pf->flow_cfg)
257 		return -ENOMEM;
258 
259 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
260 
261 	/* Allocate bare minimum number of MCAM entries needed for
262 	 * unicast and ntuple filters.
263 	 */
264 	err = otx2_mcam_entry_init(pf);
265 	if (err)
266 		return err;
267 
268 	/* Check if MCAM entries are allocate or not */
269 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
270 		return 0;
271 
272 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
273 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
274 	if (!pf->mac_table)
275 		return -ENOMEM;
276 
277 	otx2_dmacflt_get_max_cnt(pf);
278 
279 	/* DMAC filters are not allocated */
280 	if (!pf->flow_cfg->dmacflt_max_flows)
281 		return 0;
282 
283 	pf->flow_cfg->bmap_to_dmacindex =
284 			devm_kzalloc(pf->dev, sizeof(u8) *
285 				     pf->flow_cfg->dmacflt_max_flows,
286 				     GFP_KERNEL);
287 
288 	if (!pf->flow_cfg->bmap_to_dmacindex)
289 		return -ENOMEM;
290 
291 	pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
292 
293 	return 0;
294 }
295 
296 void otx2_mcam_flow_del(struct otx2_nic *pf)
297 {
298 	otx2_destroy_mcam_flows(pf);
299 }
300 EXPORT_SYMBOL(otx2_mcam_flow_del);
301 
302 /*  On success adds mcam entry
303  *  On failure enable promisous mode
304  */
305 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
306 {
307 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
308 	struct npc_install_flow_req *req;
309 	int err, i;
310 
311 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
312 		return -ENOMEM;
313 
314 	/* dont have free mcam entries or uc list is greater than alloted */
315 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
316 		return -ENOMEM;
317 
318 	mutex_lock(&pf->mbox.lock);
319 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
320 	if (!req) {
321 		mutex_unlock(&pf->mbox.lock);
322 		return -ENOMEM;
323 	}
324 
325 	/* unicast offset starts with 32 0..31 for ntuple */
326 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
327 		if (pf->mac_table[i].inuse)
328 			continue;
329 		ether_addr_copy(pf->mac_table[i].addr, mac);
330 		pf->mac_table[i].inuse = true;
331 		pf->mac_table[i].mcam_entry =
332 			flow_cfg->def_ent[i + flow_cfg->unicast_offset];
333 		req->entry =  pf->mac_table[i].mcam_entry;
334 		break;
335 	}
336 
337 	ether_addr_copy(req->packet.dmac, mac);
338 	eth_broadcast_addr((u8 *)&req->mask.dmac);
339 	req->features = BIT_ULL(NPC_DMAC);
340 	req->channel = pf->hw.rx_chan_base;
341 	req->intf = NIX_INTF_RX;
342 	req->op = NIX_RX_ACTION_DEFAULT;
343 	req->set_cntr = 1;
344 
345 	err = otx2_sync_mbox_msg(&pf->mbox);
346 	mutex_unlock(&pf->mbox.lock);
347 
348 	return err;
349 }
350 
351 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
352 {
353 	struct otx2_nic *pf = netdev_priv(netdev);
354 
355 	if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
356 			  pf->flow_cfg->dmacflt_max_flows))
357 		netdev_warn(netdev,
358 			    "Add %pM to CGX/RPM DMAC filters list as well\n",
359 			    mac);
360 
361 	return otx2_do_add_macfilter(pf, mac);
362 }
363 
364 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
365 				       int *mcam_entry)
366 {
367 	int i;
368 
369 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
370 		if (!pf->mac_table[i].inuse)
371 			continue;
372 
373 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
374 			*mcam_entry = pf->mac_table[i].mcam_entry;
375 			pf->mac_table[i].inuse = false;
376 			return true;
377 		}
378 	}
379 	return false;
380 }
381 
382 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
383 {
384 	struct otx2_nic *pf = netdev_priv(netdev);
385 	struct npc_delete_flow_req *req;
386 	int err, mcam_entry;
387 
388 	/* check does mcam entry exists for given mac */
389 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
390 		return 0;
391 
392 	mutex_lock(&pf->mbox.lock);
393 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
394 	if (!req) {
395 		mutex_unlock(&pf->mbox.lock);
396 		return -ENOMEM;
397 	}
398 	req->entry = mcam_entry;
399 	/* Send message to AF */
400 	err = otx2_sync_mbox_msg(&pf->mbox);
401 	mutex_unlock(&pf->mbox.lock);
402 
403 	return err;
404 }
405 
406 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
407 {
408 	struct otx2_flow *iter;
409 
410 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
411 		if (iter->location == location)
412 			return iter;
413 	}
414 
415 	return NULL;
416 }
417 
418 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
419 {
420 	struct list_head *head = &pfvf->flow_cfg->flow_list;
421 	struct otx2_flow *iter;
422 
423 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
424 		if (iter->location > flow->location)
425 			break;
426 		head = &iter->list;
427 	}
428 
429 	list_add(&flow->list, head);
430 }
431 
432 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
433 {
434 	if (!flow_cfg)
435 		return 0;
436 
437 	if (flow_cfg->nr_flows == flow_cfg->max_flows ||
438 	    bitmap_weight(&flow_cfg->dmacflt_bmap,
439 			  flow_cfg->dmacflt_max_flows))
440 		return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
441 	else
442 		return flow_cfg->max_flows;
443 }
444 EXPORT_SYMBOL(otx2_get_maxflows);
445 
446 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
447 		  u32 location)
448 {
449 	struct otx2_flow *iter;
450 
451 	if (location >= otx2_get_maxflows(pfvf->flow_cfg))
452 		return -EINVAL;
453 
454 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
455 		if (iter->location == location) {
456 			nfc->fs = iter->flow_spec;
457 			nfc->rss_context = iter->rss_ctx_id;
458 			return 0;
459 		}
460 	}
461 
462 	return -ENOENT;
463 }
464 
465 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
466 		       u32 *rule_locs)
467 {
468 	u32 rule_cnt = nfc->rule_cnt;
469 	u32 location = 0;
470 	int idx = 0;
471 	int err = 0;
472 
473 	nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
474 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
475 		err = otx2_get_flow(pfvf, nfc, location);
476 		if (!err)
477 			rule_locs[idx++] = location;
478 		location++;
479 	}
480 	nfc->rule_cnt = rule_cnt;
481 
482 	return err;
483 }
484 
485 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
486 				  struct npc_install_flow_req *req,
487 				  u32 flow_type)
488 {
489 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
490 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
491 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
492 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
493 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
494 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
495 	struct flow_msg *pmask = &req->mask;
496 	struct flow_msg *pkt = &req->packet;
497 
498 	switch (flow_type) {
499 	case IP_USER_FLOW:
500 		if (ipv4_usr_mask->ip4src) {
501 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
502 			       sizeof(pkt->ip4src));
503 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
504 			       sizeof(pmask->ip4src));
505 			req->features |= BIT_ULL(NPC_SIP_IPV4);
506 		}
507 		if (ipv4_usr_mask->ip4dst) {
508 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
509 			       sizeof(pkt->ip4dst));
510 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
511 			       sizeof(pmask->ip4dst));
512 			req->features |= BIT_ULL(NPC_DIP_IPV4);
513 		}
514 		if (ipv4_usr_mask->tos) {
515 			pkt->tos = ipv4_usr_hdr->tos;
516 			pmask->tos = ipv4_usr_mask->tos;
517 			req->features |= BIT_ULL(NPC_TOS);
518 		}
519 		if (ipv4_usr_mask->proto) {
520 			switch (ipv4_usr_hdr->proto) {
521 			case IPPROTO_ICMP:
522 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
523 				break;
524 			case IPPROTO_TCP:
525 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
526 				break;
527 			case IPPROTO_UDP:
528 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
529 				break;
530 			case IPPROTO_SCTP:
531 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
532 				break;
533 			case IPPROTO_AH:
534 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
535 				break;
536 			case IPPROTO_ESP:
537 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
538 				break;
539 			default:
540 				return -EOPNOTSUPP;
541 			}
542 		}
543 		pkt->etype = cpu_to_be16(ETH_P_IP);
544 		pmask->etype = cpu_to_be16(0xFFFF);
545 		req->features |= BIT_ULL(NPC_ETYPE);
546 		break;
547 	case TCP_V4_FLOW:
548 	case UDP_V4_FLOW:
549 	case SCTP_V4_FLOW:
550 		pkt->etype = cpu_to_be16(ETH_P_IP);
551 		pmask->etype = cpu_to_be16(0xFFFF);
552 		req->features |= BIT_ULL(NPC_ETYPE);
553 		if (ipv4_l4_mask->ip4src) {
554 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
555 			       sizeof(pkt->ip4src));
556 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
557 			       sizeof(pmask->ip4src));
558 			req->features |= BIT_ULL(NPC_SIP_IPV4);
559 		}
560 		if (ipv4_l4_mask->ip4dst) {
561 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
562 			       sizeof(pkt->ip4dst));
563 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
564 			       sizeof(pmask->ip4dst));
565 			req->features |= BIT_ULL(NPC_DIP_IPV4);
566 		}
567 		if (ipv4_l4_mask->tos) {
568 			pkt->tos = ipv4_l4_hdr->tos;
569 			pmask->tos = ipv4_l4_mask->tos;
570 			req->features |= BIT_ULL(NPC_TOS);
571 		}
572 		if (ipv4_l4_mask->psrc) {
573 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
574 			       sizeof(pkt->sport));
575 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
576 			       sizeof(pmask->sport));
577 			if (flow_type == UDP_V4_FLOW)
578 				req->features |= BIT_ULL(NPC_SPORT_UDP);
579 			else if (flow_type == TCP_V4_FLOW)
580 				req->features |= BIT_ULL(NPC_SPORT_TCP);
581 			else
582 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
583 		}
584 		if (ipv4_l4_mask->pdst) {
585 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
586 			       sizeof(pkt->dport));
587 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
588 			       sizeof(pmask->dport));
589 			if (flow_type == UDP_V4_FLOW)
590 				req->features |= BIT_ULL(NPC_DPORT_UDP);
591 			else if (flow_type == TCP_V4_FLOW)
592 				req->features |= BIT_ULL(NPC_DPORT_TCP);
593 			else
594 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
595 		}
596 		if (flow_type == UDP_V4_FLOW)
597 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
598 		else if (flow_type == TCP_V4_FLOW)
599 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
600 		else
601 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
602 		break;
603 	case AH_V4_FLOW:
604 	case ESP_V4_FLOW:
605 		pkt->etype = cpu_to_be16(ETH_P_IP);
606 		pmask->etype = cpu_to_be16(0xFFFF);
607 		req->features |= BIT_ULL(NPC_ETYPE);
608 		if (ah_esp_mask->ip4src) {
609 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
610 			       sizeof(pkt->ip4src));
611 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
612 			       sizeof(pmask->ip4src));
613 			req->features |= BIT_ULL(NPC_SIP_IPV4);
614 		}
615 		if (ah_esp_mask->ip4dst) {
616 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
617 			       sizeof(pkt->ip4dst));
618 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
619 			       sizeof(pmask->ip4dst));
620 			req->features |= BIT_ULL(NPC_DIP_IPV4);
621 		}
622 		if (ah_esp_mask->tos) {
623 			pkt->tos = ah_esp_hdr->tos;
624 			pmask->tos = ah_esp_mask->tos;
625 			req->features |= BIT_ULL(NPC_TOS);
626 		}
627 
628 		/* NPC profile doesn't extract AH/ESP header fields */
629 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
630 			return -EOPNOTSUPP;
631 
632 		if (flow_type == AH_V4_FLOW)
633 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
634 		else
635 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
636 		break;
637 	default:
638 		break;
639 	}
640 
641 	return 0;
642 }
643 
644 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
645 				  struct npc_install_flow_req *req,
646 				  u32 flow_type)
647 {
648 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
649 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
650 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
651 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
652 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
653 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
654 	struct flow_msg *pmask = &req->mask;
655 	struct flow_msg *pkt = &req->packet;
656 
657 	switch (flow_type) {
658 	case IPV6_USER_FLOW:
659 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
660 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
661 			       sizeof(pkt->ip6src));
662 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
663 			       sizeof(pmask->ip6src));
664 			req->features |= BIT_ULL(NPC_SIP_IPV6);
665 		}
666 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
667 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
668 			       sizeof(pkt->ip6dst));
669 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
670 			       sizeof(pmask->ip6dst));
671 			req->features |= BIT_ULL(NPC_DIP_IPV6);
672 		}
673 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
674 		pmask->etype = cpu_to_be16(0xFFFF);
675 		req->features |= BIT_ULL(NPC_ETYPE);
676 		break;
677 	case TCP_V6_FLOW:
678 	case UDP_V6_FLOW:
679 	case SCTP_V6_FLOW:
680 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
681 		pmask->etype = cpu_to_be16(0xFFFF);
682 		req->features |= BIT_ULL(NPC_ETYPE);
683 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
684 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
685 			       sizeof(pkt->ip6src));
686 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
687 			       sizeof(pmask->ip6src));
688 			req->features |= BIT_ULL(NPC_SIP_IPV6);
689 		}
690 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
691 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
692 			       sizeof(pkt->ip6dst));
693 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
694 			       sizeof(pmask->ip6dst));
695 			req->features |= BIT_ULL(NPC_DIP_IPV6);
696 		}
697 		if (ipv6_l4_mask->psrc) {
698 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
699 			       sizeof(pkt->sport));
700 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
701 			       sizeof(pmask->sport));
702 			if (flow_type == UDP_V6_FLOW)
703 				req->features |= BIT_ULL(NPC_SPORT_UDP);
704 			else if (flow_type == TCP_V6_FLOW)
705 				req->features |= BIT_ULL(NPC_SPORT_TCP);
706 			else
707 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
708 		}
709 		if (ipv6_l4_mask->pdst) {
710 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
711 			       sizeof(pkt->dport));
712 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
713 			       sizeof(pmask->dport));
714 			if (flow_type == UDP_V6_FLOW)
715 				req->features |= BIT_ULL(NPC_DPORT_UDP);
716 			else if (flow_type == TCP_V6_FLOW)
717 				req->features |= BIT_ULL(NPC_DPORT_TCP);
718 			else
719 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
720 		}
721 		if (flow_type == UDP_V6_FLOW)
722 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
723 		else if (flow_type == TCP_V6_FLOW)
724 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
725 		else
726 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
727 		break;
728 	case AH_V6_FLOW:
729 	case ESP_V6_FLOW:
730 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
731 		pmask->etype = cpu_to_be16(0xFFFF);
732 		req->features |= BIT_ULL(NPC_ETYPE);
733 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
734 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
735 			       sizeof(pkt->ip6src));
736 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
737 			       sizeof(pmask->ip6src));
738 			req->features |= BIT_ULL(NPC_SIP_IPV6);
739 		}
740 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
741 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
742 			       sizeof(pkt->ip6dst));
743 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
744 			       sizeof(pmask->ip6dst));
745 			req->features |= BIT_ULL(NPC_DIP_IPV6);
746 		}
747 
748 		/* NPC profile doesn't extract AH/ESP header fields */
749 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
750 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
751 			return -EOPNOTSUPP;
752 
753 		if (flow_type == AH_V6_FLOW)
754 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
755 		else
756 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
757 		break;
758 	default:
759 		break;
760 	}
761 
762 	return 0;
763 }
764 
765 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
766 			      struct npc_install_flow_req *req)
767 {
768 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
769 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
770 	struct flow_msg *pmask = &req->mask;
771 	struct flow_msg *pkt = &req->packet;
772 	u32 flow_type;
773 	int ret;
774 
775 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
776 	switch (flow_type) {
777 	/* bits not set in mask are don't care */
778 	case ETHER_FLOW:
779 		if (!is_zero_ether_addr(eth_mask->h_source)) {
780 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
781 			ether_addr_copy(pmask->smac, eth_mask->h_source);
782 			req->features |= BIT_ULL(NPC_SMAC);
783 		}
784 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
785 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
786 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
787 			req->features |= BIT_ULL(NPC_DMAC);
788 		}
789 		if (eth_hdr->h_proto) {
790 			memcpy(&pkt->etype, &eth_hdr->h_proto,
791 			       sizeof(pkt->etype));
792 			memcpy(&pmask->etype, &eth_mask->h_proto,
793 			       sizeof(pmask->etype));
794 			req->features |= BIT_ULL(NPC_ETYPE);
795 		}
796 		break;
797 	case IP_USER_FLOW:
798 	case TCP_V4_FLOW:
799 	case UDP_V4_FLOW:
800 	case SCTP_V4_FLOW:
801 	case AH_V4_FLOW:
802 	case ESP_V4_FLOW:
803 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
804 		if (ret)
805 			return ret;
806 		break;
807 	case IPV6_USER_FLOW:
808 	case TCP_V6_FLOW:
809 	case UDP_V6_FLOW:
810 	case SCTP_V6_FLOW:
811 	case AH_V6_FLOW:
812 	case ESP_V6_FLOW:
813 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
814 		if (ret)
815 			return ret;
816 		break;
817 	default:
818 		return -EOPNOTSUPP;
819 	}
820 	if (fsp->flow_type & FLOW_EXT) {
821 		if (fsp->m_ext.vlan_etype)
822 			return -EINVAL;
823 		if (fsp->m_ext.vlan_tci) {
824 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
825 			       sizeof(pkt->vlan_tci));
826 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
827 			       sizeof(pmask->vlan_tci));
828 			req->features |= BIT_ULL(NPC_OUTER_VID);
829 		}
830 
831 		/* Not Drop/Direct to queue but use action in default entry */
832 		if (fsp->m_ext.data[1] &&
833 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
834 			req->op = NIX_RX_ACTION_DEFAULT;
835 	}
836 
837 	if (fsp->flow_type & FLOW_MAC_EXT &&
838 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
839 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
840 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
841 		req->features |= BIT_ULL(NPC_DMAC);
842 	}
843 
844 	if (!req->features)
845 		return -EOPNOTSUPP;
846 
847 	return 0;
848 }
849 
850 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
851 					struct ethtool_rx_flow_spec *fsp)
852 {
853 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
854 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
855 	u64 ring_cookie = fsp->ring_cookie;
856 	u32 flow_type;
857 
858 	if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
859 		return false;
860 
861 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
862 
863 	/* CGX/RPM block dmac filtering configured for white listing
864 	 * check for action other than DROP
865 	 */
866 	if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
867 	    !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
868 		if (is_zero_ether_addr(eth_mask->h_dest) &&
869 		    is_valid_ether_addr(eth_hdr->h_dest))
870 			return true;
871 	}
872 
873 	return false;
874 }
875 
876 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
877 {
878 	u64 ring_cookie = flow->flow_spec.ring_cookie;
879 	struct npc_install_flow_req *req;
880 	int err, vf = 0;
881 
882 	mutex_lock(&pfvf->mbox.lock);
883 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
884 	if (!req) {
885 		mutex_unlock(&pfvf->mbox.lock);
886 		return -ENOMEM;
887 	}
888 
889 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
890 	if (err) {
891 		/* free the allocated msg above */
892 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
893 		mutex_unlock(&pfvf->mbox.lock);
894 		return err;
895 	}
896 
897 	req->entry = flow->entry;
898 	req->intf = NIX_INTF_RX;
899 	req->set_cntr = 1;
900 	req->channel = pfvf->hw.rx_chan_base;
901 	if (ring_cookie == RX_CLS_FLOW_DISC) {
902 		req->op = NIX_RX_ACTIONOP_DROP;
903 	} else {
904 		/* change to unicast only if action of default entry is not
905 		 * requested by user
906 		 */
907 		if (flow->flow_spec.flow_type & FLOW_RSS) {
908 			req->op = NIX_RX_ACTIONOP_RSS;
909 			req->index = flow->rss_ctx_id;
910 			req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
911 		} else {
912 			req->op = NIX_RX_ACTIONOP_UCAST;
913 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
914 		}
915 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
916 		if (vf > pci_num_vf(pfvf->pdev)) {
917 			mutex_unlock(&pfvf->mbox.lock);
918 			return -EINVAL;
919 		}
920 	}
921 
922 	/* ethtool ring_cookie has (VF + 1) for VF */
923 	if (vf) {
924 		req->vf = vf;
925 		flow->is_vf = true;
926 		flow->vf = vf;
927 	}
928 
929 	/* Send message to AF */
930 	err = otx2_sync_mbox_msg(&pfvf->mbox);
931 	mutex_unlock(&pfvf->mbox.lock);
932 	return err;
933 }
934 
935 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
936 				    struct otx2_flow *flow)
937 {
938 	struct otx2_flow *pf_mac;
939 	struct ethhdr *eth_hdr;
940 
941 	pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
942 	if (!pf_mac)
943 		return -ENOMEM;
944 
945 	pf_mac->entry = 0;
946 	pf_mac->dmac_filter = true;
947 	pf_mac->location = pfvf->flow_cfg->max_flows;
948 	memcpy(&pf_mac->flow_spec, &flow->flow_spec,
949 	       sizeof(struct ethtool_rx_flow_spec));
950 	pf_mac->flow_spec.location = pf_mac->location;
951 
952 	/* Copy PF mac address */
953 	eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
954 	ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
955 
956 	/* Install DMAC filter with PF mac address */
957 	otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
958 
959 	otx2_add_flow_to_list(pfvf, pf_mac);
960 	pfvf->flow_cfg->nr_flows++;
961 	set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
962 
963 	return 0;
964 }
965 
966 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
967 {
968 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
969 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
970 	struct otx2_flow *flow;
971 	struct ethhdr *eth_hdr;
972 	bool new = false;
973 	int err = 0;
974 	u32 ring;
975 
976 	if (!flow_cfg->max_flows) {
977 		netdev_err(pfvf->netdev,
978 			   "Ntuple rule count is 0, allocate and retry\n");
979 		return -EINVAL;
980 	}
981 
982 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
983 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
984 		return -ENOMEM;
985 
986 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
987 		return -EINVAL;
988 
989 	if (fsp->location >= otx2_get_maxflows(flow_cfg))
990 		return -EINVAL;
991 
992 	flow = otx2_find_flow(pfvf, fsp->location);
993 	if (!flow) {
994 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
995 		if (!flow)
996 			return -ENOMEM;
997 		flow->location = fsp->location;
998 		new = true;
999 	}
1000 	/* struct copy */
1001 	flow->flow_spec = *fsp;
1002 
1003 	if (fsp->flow_type & FLOW_RSS)
1004 		flow->rss_ctx_id = nfc->rss_context;
1005 
1006 	if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
1007 		eth_hdr = &flow->flow_spec.h_u.ether_spec;
1008 
1009 		/* Sync dmac filter table with updated fields */
1010 		if (flow->dmac_filter)
1011 			return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1012 						   flow->entry);
1013 
1014 		if (bitmap_full(&flow_cfg->dmacflt_bmap,
1015 				flow_cfg->dmacflt_max_flows)) {
1016 			netdev_warn(pfvf->netdev,
1017 				    "Can't insert the rule %d as max allowed dmac filters are %d\n",
1018 				    flow->location +
1019 				    flow_cfg->dmacflt_max_flows,
1020 				    flow_cfg->dmacflt_max_flows);
1021 			err = -EINVAL;
1022 			if (new)
1023 				kfree(flow);
1024 			return err;
1025 		}
1026 
1027 		/* Install PF mac address to DMAC filter list */
1028 		if (!test_bit(0, &flow_cfg->dmacflt_bmap))
1029 			otx2_add_flow_with_pfmac(pfvf, flow);
1030 
1031 		flow->dmac_filter = true;
1032 		flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
1033 						  flow_cfg->dmacflt_max_flows);
1034 		fsp->location = flow_cfg->max_flows + flow->entry;
1035 		flow->flow_spec.location = fsp->location;
1036 		flow->location = fsp->location;
1037 
1038 		set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1039 		otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1040 
1041 	} else {
1042 		if (flow->location >= pfvf->flow_cfg->max_flows) {
1043 			netdev_warn(pfvf->netdev,
1044 				    "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1045 				    flow->location,
1046 				    flow_cfg->max_flows - 1);
1047 			err = -EINVAL;
1048 		} else {
1049 			flow->entry = flow_cfg->flow_ent[flow->location];
1050 			err = otx2_add_flow_msg(pfvf, flow);
1051 		}
1052 	}
1053 
1054 	if (err) {
1055 		if (err == MBOX_MSG_INVALID)
1056 			err = -EINVAL;
1057 		if (new)
1058 			kfree(flow);
1059 		return err;
1060 	}
1061 
1062 	/* add the new flow installed to list */
1063 	if (new) {
1064 		otx2_add_flow_to_list(pfvf, flow);
1065 		flow_cfg->nr_flows++;
1066 	}
1067 
1068 	return 0;
1069 }
1070 
1071 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1072 {
1073 	struct npc_delete_flow_req *req;
1074 	int err;
1075 
1076 	mutex_lock(&pfvf->mbox.lock);
1077 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1078 	if (!req) {
1079 		mutex_unlock(&pfvf->mbox.lock);
1080 		return -ENOMEM;
1081 	}
1082 
1083 	req->entry = entry;
1084 	if (all)
1085 		req->all = 1;
1086 
1087 	/* Send message to AF */
1088 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1089 	mutex_unlock(&pfvf->mbox.lock);
1090 	return err;
1091 }
1092 
1093 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1094 {
1095 	struct otx2_flow *iter;
1096 	struct ethhdr *eth_hdr;
1097 	bool found = false;
1098 
1099 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1100 		if (iter->dmac_filter && iter->entry == 0) {
1101 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1102 			if (req == DMAC_ADDR_DEL) {
1103 				otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1104 						    0);
1105 				clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
1106 				found = true;
1107 			} else {
1108 				ether_addr_copy(eth_hdr->h_dest,
1109 						pfvf->netdev->dev_addr);
1110 				otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1111 			}
1112 			break;
1113 		}
1114 	}
1115 
1116 	if (found) {
1117 		list_del(&iter->list);
1118 		kfree(iter);
1119 		pfvf->flow_cfg->nr_flows--;
1120 	}
1121 }
1122 
1123 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1124 {
1125 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1126 	struct otx2_flow *flow;
1127 	int err;
1128 
1129 	if (location >= otx2_get_maxflows(flow_cfg))
1130 		return -EINVAL;
1131 
1132 	flow = otx2_find_flow(pfvf, location);
1133 	if (!flow)
1134 		return -ENOENT;
1135 
1136 	if (flow->dmac_filter) {
1137 		struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1138 
1139 		/* user not allowed to remove dmac filter with interface mac */
1140 		if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1141 			return -EPERM;
1142 
1143 		err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1144 					  flow->entry);
1145 		clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1146 		/* If all dmac filters are removed delete macfilter with
1147 		 * interface mac address and configure CGX/RPM block in
1148 		 * promiscuous mode
1149 		 */
1150 		if (bitmap_weight(&flow_cfg->dmacflt_bmap,
1151 				  flow_cfg->dmacflt_max_flows) == 1)
1152 			otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1153 	} else {
1154 		err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1155 	}
1156 
1157 	if (err)
1158 		return err;
1159 
1160 	list_del(&flow->list);
1161 	kfree(flow);
1162 	flow_cfg->nr_flows--;
1163 
1164 	return 0;
1165 }
1166 
1167 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1168 {
1169 	struct otx2_flow *flow, *tmp;
1170 	int err;
1171 
1172 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1173 		if (flow->rss_ctx_id != ctx_id)
1174 			continue;
1175 		err = otx2_remove_flow(pfvf, flow->location);
1176 		if (err)
1177 			netdev_warn(pfvf->netdev,
1178 				    "Can't delete the rule %d associated with this rss group err:%d",
1179 				    flow->location, err);
1180 	}
1181 }
1182 
1183 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1184 {
1185 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1186 	struct npc_delete_flow_req *req;
1187 	struct otx2_flow *iter, *tmp;
1188 	int err;
1189 
1190 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1191 		return 0;
1192 
1193 	if (!flow_cfg->max_flows)
1194 		return 0;
1195 
1196 	mutex_lock(&pfvf->mbox.lock);
1197 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1198 	if (!req) {
1199 		mutex_unlock(&pfvf->mbox.lock);
1200 		return -ENOMEM;
1201 	}
1202 
1203 	req->start = flow_cfg->flow_ent[0];
1204 	req->end   = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
1205 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1206 	mutex_unlock(&pfvf->mbox.lock);
1207 
1208 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1209 		list_del(&iter->list);
1210 		kfree(iter);
1211 		flow_cfg->nr_flows--;
1212 	}
1213 	return err;
1214 }
1215 
1216 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1217 {
1218 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1219 	struct npc_mcam_free_entry_req *req;
1220 	struct otx2_flow *iter, *tmp;
1221 	int err;
1222 
1223 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1224 		return 0;
1225 
1226 	/* remove all flows */
1227 	err = otx2_remove_flow_msg(pfvf, 0, true);
1228 	if (err)
1229 		return err;
1230 
1231 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1232 		list_del(&iter->list);
1233 		kfree(iter);
1234 		flow_cfg->nr_flows--;
1235 	}
1236 
1237 	mutex_lock(&pfvf->mbox.lock);
1238 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1239 	if (!req) {
1240 		mutex_unlock(&pfvf->mbox.lock);
1241 		return -ENOMEM;
1242 	}
1243 
1244 	req->all = 1;
1245 	/* Send message to AF to free MCAM entries */
1246 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1247 	if (err) {
1248 		mutex_unlock(&pfvf->mbox.lock);
1249 		return err;
1250 	}
1251 
1252 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1253 	mutex_unlock(&pfvf->mbox.lock);
1254 
1255 	return 0;
1256 }
1257 
1258 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1259 {
1260 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1261 	struct npc_install_flow_req *req;
1262 	int err;
1263 
1264 	mutex_lock(&pfvf->mbox.lock);
1265 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1266 	if (!req) {
1267 		mutex_unlock(&pfvf->mbox.lock);
1268 		return -ENOMEM;
1269 	}
1270 
1271 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1272 	req->intf = NIX_INTF_RX;
1273 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1274 	eth_broadcast_addr((u8 *)&req->mask.dmac);
1275 	req->channel = pfvf->hw.rx_chan_base;
1276 	req->op = NIX_RX_ACTION_DEFAULT;
1277 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1278 	req->vtag0_valid = true;
1279 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1280 
1281 	/* Send message to AF */
1282 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1283 	mutex_unlock(&pfvf->mbox.lock);
1284 	return err;
1285 }
1286 
1287 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1288 {
1289 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1290 	struct npc_delete_flow_req *req;
1291 	int err;
1292 
1293 	mutex_lock(&pfvf->mbox.lock);
1294 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1295 	if (!req) {
1296 		mutex_unlock(&pfvf->mbox.lock);
1297 		return -ENOMEM;
1298 	}
1299 
1300 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1301 	/* Send message to AF */
1302 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1303 	mutex_unlock(&pfvf->mbox.lock);
1304 	return err;
1305 }
1306 
1307 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1308 {
1309 	struct nix_vtag_config *req;
1310 	struct mbox_msghdr *rsp_hdr;
1311 	int err;
1312 
1313 	/* Dont have enough mcam entries */
1314 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1315 		return -ENOMEM;
1316 
1317 	if (enable) {
1318 		err = otx2_install_rxvlan_offload_flow(pf);
1319 		if (err)
1320 			return err;
1321 	} else {
1322 		err = otx2_delete_rxvlan_offload_flow(pf);
1323 		if (err)
1324 			return err;
1325 	}
1326 
1327 	mutex_lock(&pf->mbox.lock);
1328 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1329 	if (!req) {
1330 		mutex_unlock(&pf->mbox.lock);
1331 		return -ENOMEM;
1332 	}
1333 
1334 	/* config strip, capture and size */
1335 	req->vtag_size = VTAGSIZE_T4;
1336 	req->cfg_type = 1; /* rx vlan cfg */
1337 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1338 	req->rx.strip_vtag = enable;
1339 	req->rx.capture_vtag = enable;
1340 
1341 	err = otx2_sync_mbox_msg(&pf->mbox);
1342 	if (err) {
1343 		mutex_unlock(&pf->mbox.lock);
1344 		return err;
1345 	}
1346 
1347 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1348 	if (IS_ERR(rsp_hdr)) {
1349 		mutex_unlock(&pf->mbox.lock);
1350 		return PTR_ERR(rsp_hdr);
1351 	}
1352 
1353 	mutex_unlock(&pf->mbox.lock);
1354 	return rsp_hdr->rc;
1355 }
1356 
1357 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1358 {
1359 	struct otx2_flow *iter;
1360 	struct ethhdr *eth_hdr;
1361 
1362 	list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1363 		if (iter->dmac_filter) {
1364 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1365 			otx2_dmacflt_add(pf, eth_hdr->h_dest,
1366 					 iter->entry);
1367 		}
1368 	}
1369 }
1370 
1371 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1372 {
1373 	otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1374 }
1375