1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 #include <linux/sort.h>
9 
10 #include "otx2_common.h"
11 
12 #define OTX2_DEFAULT_ACTION	0x1
13 
14 static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
15 
16 struct otx2_flow {
17 	struct ethtool_rx_flow_spec flow_spec;
18 	struct list_head list;
19 	u32 location;
20 	u16 entry;
21 	bool is_vf;
22 	u8 rss_ctx_id;
23 	int vf;
24 	bool dmac_filter;
25 };
26 
27 enum dmac_req {
28 	DMAC_ADDR_UPDATE,
29 	DMAC_ADDR_DEL
30 };
31 
32 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
33 {
34 	devm_kfree(pfvf->dev, flow_cfg->flow_ent);
35 	flow_cfg->flow_ent = NULL;
36 	flow_cfg->max_flows = 0;
37 }
38 
39 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
40 {
41 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
42 	struct npc_mcam_free_entry_req *req;
43 	int ent, err;
44 
45 	if (!flow_cfg->max_flows)
46 		return 0;
47 
48 	mutex_lock(&pfvf->mbox.lock);
49 	for (ent = 0; ent < flow_cfg->max_flows; ent++) {
50 		req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
51 		if (!req)
52 			break;
53 
54 		req->entry = flow_cfg->flow_ent[ent];
55 
56 		/* Send message to AF to free MCAM entries */
57 		err = otx2_sync_mbox_msg(&pfvf->mbox);
58 		if (err)
59 			break;
60 	}
61 	mutex_unlock(&pfvf->mbox.lock);
62 	otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
63 	return 0;
64 }
65 
66 static int mcam_entry_cmp(const void *a, const void *b)
67 {
68 	return *(u16 *)a - *(u16 *)b;
69 }
70 
71 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
72 {
73 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
74 	struct npc_mcam_alloc_entry_req *req;
75 	struct npc_mcam_alloc_entry_rsp *rsp;
76 	int ent, allocated = 0;
77 
78 	/* Free current ones and allocate new ones with requested count */
79 	otx2_free_ntuple_mcam_entries(pfvf);
80 
81 	if (!count)
82 		return 0;
83 
84 	flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
85 						sizeof(u16), GFP_KERNEL);
86 	if (!flow_cfg->flow_ent) {
87 		netdev_err(pfvf->netdev,
88 			   "%s: Unable to allocate memory for flow entries\n",
89 			    __func__);
90 		return -ENOMEM;
91 	}
92 
93 	mutex_lock(&pfvf->mbox.lock);
94 
95 	/* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
96 	 * can only be allocated.
97 	 */
98 	while (allocated < count) {
99 		req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
100 		if (!req)
101 			goto exit;
102 
103 		req->contig = false;
104 		req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
105 				NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
106 
107 		/* Allocate higher priority entries for PFs, so that VF's entries
108 		 * will be on top of PF.
109 		 */
110 		if (!is_otx2_vf(pfvf->pcifunc)) {
111 			req->priority = NPC_MCAM_HIGHER_PRIO;
112 			req->ref_entry = flow_cfg->def_ent[0];
113 		}
114 
115 		/* Send message to AF */
116 		if (otx2_sync_mbox_msg(&pfvf->mbox))
117 			goto exit;
118 
119 		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
120 			(&pfvf->mbox.mbox, 0, &req->hdr);
121 
122 		for (ent = 0; ent < rsp->count; ent++)
123 			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
124 
125 		allocated += rsp->count;
126 
127 		/* If this request is not fulfilled, no need to send
128 		 * further requests.
129 		 */
130 		if (rsp->count != req->count)
131 			break;
132 	}
133 
134 	/* Multiple MCAM entry alloc requests could result in non-sequential
135 	 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
136 	 * otherwise user installed ntuple filter index and MCAM entry index will
137 	 * not be in sync.
138 	 */
139 	if (allocated)
140 		sort(&flow_cfg->flow_ent[0], allocated,
141 		     sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
142 
143 exit:
144 	mutex_unlock(&pfvf->mbox.lock);
145 
146 	flow_cfg->max_flows = allocated;
147 
148 	if (allocated) {
149 		pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
150 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
151 	}
152 
153 	if (allocated != count)
154 		netdev_info(pfvf->netdev,
155 			    "Unable to allocate %d MCAM entries, got only %d\n",
156 			    count, allocated);
157 	return allocated;
158 }
159 EXPORT_SYMBOL(otx2_alloc_mcam_entries);
160 
161 static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
162 {
163 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
164 	struct npc_mcam_alloc_entry_req *req;
165 	struct npc_mcam_alloc_entry_rsp *rsp;
166 	int vf_vlan_max_flows;
167 	int ent, count;
168 
169 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
170 	count = OTX2_MAX_UNICAST_FLOWS +
171 			OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
172 
173 	flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
174 					       sizeof(u16), GFP_KERNEL);
175 	if (!flow_cfg->def_ent)
176 		return -ENOMEM;
177 
178 	mutex_lock(&pfvf->mbox.lock);
179 
180 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
181 	if (!req) {
182 		mutex_unlock(&pfvf->mbox.lock);
183 		return -ENOMEM;
184 	}
185 
186 	req->contig = false;
187 	req->count = count;
188 
189 	/* Send message to AF */
190 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
191 		mutex_unlock(&pfvf->mbox.lock);
192 		return -EINVAL;
193 	}
194 
195 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
196 	       (&pfvf->mbox.mbox, 0, &req->hdr);
197 
198 	if (rsp->count != req->count) {
199 		netdev_info(pfvf->netdev,
200 			    "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
201 		mutex_unlock(&pfvf->mbox.lock);
202 		devm_kfree(pfvf->dev, flow_cfg->def_ent);
203 		return 0;
204 	}
205 
206 	for (ent = 0; ent < rsp->count; ent++)
207 		flow_cfg->def_ent[ent] = rsp->entry_list[ent];
208 
209 	flow_cfg->vf_vlan_offset = 0;
210 	flow_cfg->unicast_offset = vf_vlan_max_flows;
211 	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
212 					OTX2_MAX_UNICAST_FLOWS;
213 	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
214 	pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
215 	pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
216 
217 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
218 	mutex_unlock(&pfvf->mbox.lock);
219 
220 	/* Allocate entries for Ntuple filters */
221 	count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
222 	if (count <= 0) {
223 		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
224 		return 0;
225 	}
226 
227 	pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
228 
229 	return 0;
230 }
231 
232 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
233 {
234 	struct otx2_flow_config *flow_cfg;
235 
236 	pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
237 				      sizeof(struct otx2_flow_config),
238 				      GFP_KERNEL);
239 	if (!pfvf->flow_cfg)
240 		return -ENOMEM;
241 
242 	flow_cfg = pfvf->flow_cfg;
243 	INIT_LIST_HEAD(&flow_cfg->flow_list);
244 	flow_cfg->max_flows = 0;
245 
246 	return 0;
247 }
248 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
249 
250 int otx2_mcam_flow_init(struct otx2_nic *pf)
251 {
252 	int err;
253 
254 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
255 				    GFP_KERNEL);
256 	if (!pf->flow_cfg)
257 		return -ENOMEM;
258 
259 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
260 
261 	/* Allocate bare minimum number of MCAM entries needed for
262 	 * unicast and ntuple filters.
263 	 */
264 	err = otx2_mcam_entry_init(pf);
265 	if (err)
266 		return err;
267 
268 	/* Check if MCAM entries are allocate or not */
269 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
270 		return 0;
271 
272 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
273 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
274 	if (!pf->mac_table)
275 		return -ENOMEM;
276 
277 	otx2_dmacflt_get_max_cnt(pf);
278 
279 	/* DMAC filters are not allocated */
280 	if (!pf->flow_cfg->dmacflt_max_flows)
281 		return 0;
282 
283 	pf->flow_cfg->bmap_to_dmacindex =
284 			devm_kzalloc(pf->dev, sizeof(u8) *
285 				     pf->flow_cfg->dmacflt_max_flows,
286 				     GFP_KERNEL);
287 
288 	if (!pf->flow_cfg->bmap_to_dmacindex)
289 		return -ENOMEM;
290 
291 	pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
292 
293 	return 0;
294 }
295 
296 void otx2_mcam_flow_del(struct otx2_nic *pf)
297 {
298 	otx2_destroy_mcam_flows(pf);
299 }
300 EXPORT_SYMBOL(otx2_mcam_flow_del);
301 
302 /*  On success adds mcam entry
303  *  On failure enable promisous mode
304  */
305 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
306 {
307 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
308 	struct npc_install_flow_req *req;
309 	int err, i;
310 
311 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
312 		return -ENOMEM;
313 
314 	/* dont have free mcam entries or uc list is greater than alloted */
315 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
316 		return -ENOMEM;
317 
318 	mutex_lock(&pf->mbox.lock);
319 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
320 	if (!req) {
321 		mutex_unlock(&pf->mbox.lock);
322 		return -ENOMEM;
323 	}
324 
325 	/* unicast offset starts with 32 0..31 for ntuple */
326 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
327 		if (pf->mac_table[i].inuse)
328 			continue;
329 		ether_addr_copy(pf->mac_table[i].addr, mac);
330 		pf->mac_table[i].inuse = true;
331 		pf->mac_table[i].mcam_entry =
332 			flow_cfg->def_ent[i + flow_cfg->unicast_offset];
333 		req->entry =  pf->mac_table[i].mcam_entry;
334 		break;
335 	}
336 
337 	ether_addr_copy(req->packet.dmac, mac);
338 	eth_broadcast_addr((u8 *)&req->mask.dmac);
339 	req->features = BIT_ULL(NPC_DMAC);
340 	req->channel = pf->hw.rx_chan_base;
341 	req->intf = NIX_INTF_RX;
342 	req->op = NIX_RX_ACTION_DEFAULT;
343 	req->set_cntr = 1;
344 
345 	err = otx2_sync_mbox_msg(&pf->mbox);
346 	mutex_unlock(&pf->mbox.lock);
347 
348 	return err;
349 }
350 
351 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
352 {
353 	struct otx2_nic *pf = netdev_priv(netdev);
354 
355 	if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
356 			  pf->flow_cfg->dmacflt_max_flows))
357 		netdev_warn(netdev,
358 			    "Add %pM to CGX/RPM DMAC filters list as well\n",
359 			    mac);
360 
361 	return otx2_do_add_macfilter(pf, mac);
362 }
363 
364 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
365 				       int *mcam_entry)
366 {
367 	int i;
368 
369 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
370 		if (!pf->mac_table[i].inuse)
371 			continue;
372 
373 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
374 			*mcam_entry = pf->mac_table[i].mcam_entry;
375 			pf->mac_table[i].inuse = false;
376 			return true;
377 		}
378 	}
379 	return false;
380 }
381 
382 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
383 {
384 	struct otx2_nic *pf = netdev_priv(netdev);
385 	struct npc_delete_flow_req *req;
386 	int err, mcam_entry;
387 
388 	/* check does mcam entry exists for given mac */
389 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
390 		return 0;
391 
392 	mutex_lock(&pf->mbox.lock);
393 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
394 	if (!req) {
395 		mutex_unlock(&pf->mbox.lock);
396 		return -ENOMEM;
397 	}
398 	req->entry = mcam_entry;
399 	/* Send message to AF */
400 	err = otx2_sync_mbox_msg(&pf->mbox);
401 	mutex_unlock(&pf->mbox.lock);
402 
403 	return err;
404 }
405 
406 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
407 {
408 	struct otx2_flow *iter;
409 
410 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
411 		if (iter->location == location)
412 			return iter;
413 	}
414 
415 	return NULL;
416 }
417 
418 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
419 {
420 	struct list_head *head = &pfvf->flow_cfg->flow_list;
421 	struct otx2_flow *iter;
422 
423 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
424 		if (iter->location > flow->location)
425 			break;
426 		head = &iter->list;
427 	}
428 
429 	list_add(&flow->list, head);
430 }
431 
432 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
433 {
434 	if (!flow_cfg)
435 		return 0;
436 
437 	if (flow_cfg->nr_flows == flow_cfg->max_flows ||
438 	    bitmap_weight(&flow_cfg->dmacflt_bmap,
439 			  flow_cfg->dmacflt_max_flows))
440 		return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
441 	else
442 		return flow_cfg->max_flows;
443 }
444 EXPORT_SYMBOL(otx2_get_maxflows);
445 
446 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
447 		  u32 location)
448 {
449 	struct otx2_flow *iter;
450 
451 	if (location >= otx2_get_maxflows(pfvf->flow_cfg))
452 		return -EINVAL;
453 
454 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
455 		if (iter->location == location) {
456 			nfc->fs = iter->flow_spec;
457 			nfc->rss_context = iter->rss_ctx_id;
458 			return 0;
459 		}
460 	}
461 
462 	return -ENOENT;
463 }
464 
465 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
466 		       u32 *rule_locs)
467 {
468 	u32 rule_cnt = nfc->rule_cnt;
469 	u32 location = 0;
470 	int idx = 0;
471 	int err = 0;
472 
473 	nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
474 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
475 		err = otx2_get_flow(pfvf, nfc, location);
476 		if (!err)
477 			rule_locs[idx++] = location;
478 		location++;
479 	}
480 	nfc->rule_cnt = rule_cnt;
481 
482 	return err;
483 }
484 
485 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
486 				  struct npc_install_flow_req *req,
487 				  u32 flow_type)
488 {
489 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
490 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
491 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
492 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
493 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
494 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
495 	struct flow_msg *pmask = &req->mask;
496 	struct flow_msg *pkt = &req->packet;
497 
498 	switch (flow_type) {
499 	case IP_USER_FLOW:
500 		if (ipv4_usr_mask->ip4src) {
501 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
502 			       sizeof(pkt->ip4src));
503 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
504 			       sizeof(pmask->ip4src));
505 			req->features |= BIT_ULL(NPC_SIP_IPV4);
506 		}
507 		if (ipv4_usr_mask->ip4dst) {
508 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
509 			       sizeof(pkt->ip4dst));
510 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
511 			       sizeof(pmask->ip4dst));
512 			req->features |= BIT_ULL(NPC_DIP_IPV4);
513 		}
514 		if (ipv4_usr_mask->tos) {
515 			pkt->tos = ipv4_usr_hdr->tos;
516 			pmask->tos = ipv4_usr_mask->tos;
517 			req->features |= BIT_ULL(NPC_TOS);
518 		}
519 		if (ipv4_usr_mask->proto) {
520 			switch (ipv4_usr_hdr->proto) {
521 			case IPPROTO_ICMP:
522 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
523 				break;
524 			case IPPROTO_TCP:
525 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
526 				break;
527 			case IPPROTO_UDP:
528 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
529 				break;
530 			case IPPROTO_SCTP:
531 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
532 				break;
533 			case IPPROTO_AH:
534 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
535 				break;
536 			case IPPROTO_ESP:
537 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
538 				break;
539 			default:
540 				return -EOPNOTSUPP;
541 			}
542 		}
543 		pkt->etype = cpu_to_be16(ETH_P_IP);
544 		pmask->etype = cpu_to_be16(0xFFFF);
545 		req->features |= BIT_ULL(NPC_ETYPE);
546 		break;
547 	case TCP_V4_FLOW:
548 	case UDP_V4_FLOW:
549 	case SCTP_V4_FLOW:
550 		pkt->etype = cpu_to_be16(ETH_P_IP);
551 		pmask->etype = cpu_to_be16(0xFFFF);
552 		req->features |= BIT_ULL(NPC_ETYPE);
553 		if (ipv4_l4_mask->ip4src) {
554 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
555 			       sizeof(pkt->ip4src));
556 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
557 			       sizeof(pmask->ip4src));
558 			req->features |= BIT_ULL(NPC_SIP_IPV4);
559 		}
560 		if (ipv4_l4_mask->ip4dst) {
561 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
562 			       sizeof(pkt->ip4dst));
563 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
564 			       sizeof(pmask->ip4dst));
565 			req->features |= BIT_ULL(NPC_DIP_IPV4);
566 		}
567 		if (ipv4_l4_mask->tos) {
568 			pkt->tos = ipv4_l4_hdr->tos;
569 			pmask->tos = ipv4_l4_mask->tos;
570 			req->features |= BIT_ULL(NPC_TOS);
571 		}
572 		if (ipv4_l4_mask->psrc) {
573 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
574 			       sizeof(pkt->sport));
575 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
576 			       sizeof(pmask->sport));
577 			if (flow_type == UDP_V4_FLOW)
578 				req->features |= BIT_ULL(NPC_SPORT_UDP);
579 			else if (flow_type == TCP_V4_FLOW)
580 				req->features |= BIT_ULL(NPC_SPORT_TCP);
581 			else
582 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
583 		}
584 		if (ipv4_l4_mask->pdst) {
585 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
586 			       sizeof(pkt->dport));
587 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
588 			       sizeof(pmask->dport));
589 			if (flow_type == UDP_V4_FLOW)
590 				req->features |= BIT_ULL(NPC_DPORT_UDP);
591 			else if (flow_type == TCP_V4_FLOW)
592 				req->features |= BIT_ULL(NPC_DPORT_TCP);
593 			else
594 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
595 		}
596 		if (flow_type == UDP_V4_FLOW)
597 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
598 		else if (flow_type == TCP_V4_FLOW)
599 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
600 		else
601 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
602 		break;
603 	case AH_V4_FLOW:
604 	case ESP_V4_FLOW:
605 		pkt->etype = cpu_to_be16(ETH_P_IP);
606 		pmask->etype = cpu_to_be16(0xFFFF);
607 		req->features |= BIT_ULL(NPC_ETYPE);
608 		if (ah_esp_mask->ip4src) {
609 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
610 			       sizeof(pkt->ip4src));
611 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
612 			       sizeof(pmask->ip4src));
613 			req->features |= BIT_ULL(NPC_SIP_IPV4);
614 		}
615 		if (ah_esp_mask->ip4dst) {
616 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
617 			       sizeof(pkt->ip4dst));
618 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
619 			       sizeof(pmask->ip4dst));
620 			req->features |= BIT_ULL(NPC_DIP_IPV4);
621 		}
622 		if (ah_esp_mask->tos) {
623 			pkt->tos = ah_esp_hdr->tos;
624 			pmask->tos = ah_esp_mask->tos;
625 			req->features |= BIT_ULL(NPC_TOS);
626 		}
627 
628 		/* NPC profile doesn't extract AH/ESP header fields */
629 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
630 			return -EOPNOTSUPP;
631 
632 		if (flow_type == AH_V4_FLOW)
633 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
634 		else
635 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
636 		break;
637 	default:
638 		break;
639 	}
640 
641 	return 0;
642 }
643 
644 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
645 				  struct npc_install_flow_req *req,
646 				  u32 flow_type)
647 {
648 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
649 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
650 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
651 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
652 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
653 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
654 	struct flow_msg *pmask = &req->mask;
655 	struct flow_msg *pkt = &req->packet;
656 
657 	switch (flow_type) {
658 	case IPV6_USER_FLOW:
659 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
660 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
661 			       sizeof(pkt->ip6src));
662 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
663 			       sizeof(pmask->ip6src));
664 			req->features |= BIT_ULL(NPC_SIP_IPV6);
665 		}
666 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
667 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
668 			       sizeof(pkt->ip6dst));
669 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
670 			       sizeof(pmask->ip6dst));
671 			req->features |= BIT_ULL(NPC_DIP_IPV6);
672 		}
673 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
674 		pmask->etype = cpu_to_be16(0xFFFF);
675 		req->features |= BIT_ULL(NPC_ETYPE);
676 		break;
677 	case TCP_V6_FLOW:
678 	case UDP_V6_FLOW:
679 	case SCTP_V6_FLOW:
680 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
681 		pmask->etype = cpu_to_be16(0xFFFF);
682 		req->features |= BIT_ULL(NPC_ETYPE);
683 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
684 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
685 			       sizeof(pkt->ip6src));
686 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
687 			       sizeof(pmask->ip6src));
688 			req->features |= BIT_ULL(NPC_SIP_IPV6);
689 		}
690 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
691 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
692 			       sizeof(pkt->ip6dst));
693 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
694 			       sizeof(pmask->ip6dst));
695 			req->features |= BIT_ULL(NPC_DIP_IPV6);
696 		}
697 		if (ipv6_l4_mask->psrc) {
698 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
699 			       sizeof(pkt->sport));
700 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
701 			       sizeof(pmask->sport));
702 			if (flow_type == UDP_V6_FLOW)
703 				req->features |= BIT_ULL(NPC_SPORT_UDP);
704 			else if (flow_type == TCP_V6_FLOW)
705 				req->features |= BIT_ULL(NPC_SPORT_TCP);
706 			else
707 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
708 		}
709 		if (ipv6_l4_mask->pdst) {
710 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
711 			       sizeof(pkt->dport));
712 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
713 			       sizeof(pmask->dport));
714 			if (flow_type == UDP_V6_FLOW)
715 				req->features |= BIT_ULL(NPC_DPORT_UDP);
716 			else if (flow_type == TCP_V6_FLOW)
717 				req->features |= BIT_ULL(NPC_DPORT_TCP);
718 			else
719 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
720 		}
721 		if (flow_type == UDP_V6_FLOW)
722 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
723 		else if (flow_type == TCP_V6_FLOW)
724 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
725 		else
726 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
727 		break;
728 	case AH_V6_FLOW:
729 	case ESP_V6_FLOW:
730 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
731 		pmask->etype = cpu_to_be16(0xFFFF);
732 		req->features |= BIT_ULL(NPC_ETYPE);
733 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
734 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
735 			       sizeof(pkt->ip6src));
736 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
737 			       sizeof(pmask->ip6src));
738 			req->features |= BIT_ULL(NPC_SIP_IPV6);
739 		}
740 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
741 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
742 			       sizeof(pkt->ip6dst));
743 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
744 			       sizeof(pmask->ip6dst));
745 			req->features |= BIT_ULL(NPC_DIP_IPV6);
746 		}
747 
748 		/* NPC profile doesn't extract AH/ESP header fields */
749 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
750 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
751 			return -EOPNOTSUPP;
752 
753 		if (flow_type == AH_V6_FLOW)
754 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
755 		else
756 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
757 		break;
758 	default:
759 		break;
760 	}
761 
762 	return 0;
763 }
764 
765 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
766 			      struct npc_install_flow_req *req)
767 {
768 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
769 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
770 	struct flow_msg *pmask = &req->mask;
771 	struct flow_msg *pkt = &req->packet;
772 	u32 flow_type;
773 	int ret;
774 
775 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
776 	switch (flow_type) {
777 	/* bits not set in mask are don't care */
778 	case ETHER_FLOW:
779 		if (!is_zero_ether_addr(eth_mask->h_source)) {
780 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
781 			ether_addr_copy(pmask->smac, eth_mask->h_source);
782 			req->features |= BIT_ULL(NPC_SMAC);
783 		}
784 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
785 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
786 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
787 			req->features |= BIT_ULL(NPC_DMAC);
788 		}
789 		if (eth_hdr->h_proto) {
790 			memcpy(&pkt->etype, &eth_hdr->h_proto,
791 			       sizeof(pkt->etype));
792 			memcpy(&pmask->etype, &eth_mask->h_proto,
793 			       sizeof(pmask->etype));
794 			req->features |= BIT_ULL(NPC_ETYPE);
795 		}
796 		break;
797 	case IP_USER_FLOW:
798 	case TCP_V4_FLOW:
799 	case UDP_V4_FLOW:
800 	case SCTP_V4_FLOW:
801 	case AH_V4_FLOW:
802 	case ESP_V4_FLOW:
803 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
804 		if (ret)
805 			return ret;
806 		break;
807 	case IPV6_USER_FLOW:
808 	case TCP_V6_FLOW:
809 	case UDP_V6_FLOW:
810 	case SCTP_V6_FLOW:
811 	case AH_V6_FLOW:
812 	case ESP_V6_FLOW:
813 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
814 		if (ret)
815 			return ret;
816 		break;
817 	default:
818 		return -EOPNOTSUPP;
819 	}
820 	if (fsp->flow_type & FLOW_EXT) {
821 		if (fsp->m_ext.vlan_etype)
822 			return -EINVAL;
823 		if (fsp->m_ext.vlan_tci) {
824 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
825 			       sizeof(pkt->vlan_tci));
826 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
827 			       sizeof(pmask->vlan_tci));
828 			req->features |= BIT_ULL(NPC_OUTER_VID);
829 		}
830 
831 		/* Not Drop/Direct to queue but use action in default entry */
832 		if (fsp->m_ext.data[1] &&
833 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
834 			req->op = NIX_RX_ACTION_DEFAULT;
835 	}
836 
837 	if (fsp->flow_type & FLOW_MAC_EXT &&
838 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
839 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
840 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
841 		req->features |= BIT_ULL(NPC_DMAC);
842 	}
843 
844 	if (!req->features)
845 		return -EOPNOTSUPP;
846 
847 	return 0;
848 }
849 
850 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
851 					struct ethtool_rx_flow_spec *fsp)
852 {
853 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
854 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
855 	u64 ring_cookie = fsp->ring_cookie;
856 	u32 flow_type;
857 
858 	if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
859 		return false;
860 
861 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
862 
863 	/* CGX/RPM block dmac filtering configured for white listing
864 	 * check for action other than DROP
865 	 */
866 	if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
867 	    !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
868 		if (is_zero_ether_addr(eth_mask->h_dest) &&
869 		    is_valid_ether_addr(eth_hdr->h_dest))
870 			return true;
871 	}
872 
873 	return false;
874 }
875 
876 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
877 {
878 	u64 ring_cookie = flow->flow_spec.ring_cookie;
879 	struct npc_install_flow_req *req;
880 	int err, vf = 0;
881 
882 	mutex_lock(&pfvf->mbox.lock);
883 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
884 	if (!req) {
885 		mutex_unlock(&pfvf->mbox.lock);
886 		return -ENOMEM;
887 	}
888 
889 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
890 	if (err) {
891 		/* free the allocated msg above */
892 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
893 		mutex_unlock(&pfvf->mbox.lock);
894 		return err;
895 	}
896 
897 	req->entry = flow->entry;
898 	req->intf = NIX_INTF_RX;
899 	req->set_cntr = 1;
900 	req->channel = pfvf->hw.rx_chan_base;
901 	if (ring_cookie == RX_CLS_FLOW_DISC) {
902 		req->op = NIX_RX_ACTIONOP_DROP;
903 	} else {
904 		/* change to unicast only if action of default entry is not
905 		 * requested by user
906 		 */
907 		if (flow->flow_spec.flow_type & FLOW_RSS) {
908 			req->op = NIX_RX_ACTIONOP_RSS;
909 			req->index = flow->rss_ctx_id;
910 		} else {
911 			req->op = NIX_RX_ACTIONOP_UCAST;
912 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
913 		}
914 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
915 		if (vf > pci_num_vf(pfvf->pdev)) {
916 			mutex_unlock(&pfvf->mbox.lock);
917 			return -EINVAL;
918 		}
919 	}
920 
921 	/* ethtool ring_cookie has (VF + 1) for VF */
922 	if (vf) {
923 		req->vf = vf;
924 		flow->is_vf = true;
925 		flow->vf = vf;
926 	}
927 
928 	/* Send message to AF */
929 	err = otx2_sync_mbox_msg(&pfvf->mbox);
930 	mutex_unlock(&pfvf->mbox.lock);
931 	return err;
932 }
933 
934 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
935 				    struct otx2_flow *flow)
936 {
937 	struct otx2_flow *pf_mac;
938 	struct ethhdr *eth_hdr;
939 
940 	pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
941 	if (!pf_mac)
942 		return -ENOMEM;
943 
944 	pf_mac->entry = 0;
945 	pf_mac->dmac_filter = true;
946 	pf_mac->location = pfvf->flow_cfg->max_flows;
947 	memcpy(&pf_mac->flow_spec, &flow->flow_spec,
948 	       sizeof(struct ethtool_rx_flow_spec));
949 	pf_mac->flow_spec.location = pf_mac->location;
950 
951 	/* Copy PF mac address */
952 	eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
953 	ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
954 
955 	/* Install DMAC filter with PF mac address */
956 	otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
957 
958 	otx2_add_flow_to_list(pfvf, pf_mac);
959 	pfvf->flow_cfg->nr_flows++;
960 	set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
961 
962 	return 0;
963 }
964 
965 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
966 {
967 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
968 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
969 	struct otx2_flow *flow;
970 	struct ethhdr *eth_hdr;
971 	bool new = false;
972 	int err = 0;
973 	u32 ring;
974 
975 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
976 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
977 		return -ENOMEM;
978 
979 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
980 		return -EINVAL;
981 
982 	if (fsp->location >= otx2_get_maxflows(flow_cfg))
983 		return -EINVAL;
984 
985 	flow = otx2_find_flow(pfvf, fsp->location);
986 	if (!flow) {
987 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
988 		if (!flow)
989 			return -ENOMEM;
990 		flow->location = fsp->location;
991 		new = true;
992 	}
993 	/* struct copy */
994 	flow->flow_spec = *fsp;
995 
996 	if (fsp->flow_type & FLOW_RSS)
997 		flow->rss_ctx_id = nfc->rss_context;
998 
999 	if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
1000 		eth_hdr = &flow->flow_spec.h_u.ether_spec;
1001 
1002 		/* Sync dmac filter table with updated fields */
1003 		if (flow->dmac_filter)
1004 			return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1005 						   flow->entry);
1006 
1007 		if (bitmap_full(&flow_cfg->dmacflt_bmap,
1008 				flow_cfg->dmacflt_max_flows)) {
1009 			netdev_warn(pfvf->netdev,
1010 				    "Can't insert the rule %d as max allowed dmac filters are %d\n",
1011 				    flow->location +
1012 				    flow_cfg->dmacflt_max_flows,
1013 				    flow_cfg->dmacflt_max_flows);
1014 			err = -EINVAL;
1015 			if (new)
1016 				kfree(flow);
1017 			return err;
1018 		}
1019 
1020 		/* Install PF mac address to DMAC filter list */
1021 		if (!test_bit(0, &flow_cfg->dmacflt_bmap))
1022 			otx2_add_flow_with_pfmac(pfvf, flow);
1023 
1024 		flow->dmac_filter = true;
1025 		flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
1026 						  flow_cfg->dmacflt_max_flows);
1027 		fsp->location = flow_cfg->max_flows + flow->entry;
1028 		flow->flow_spec.location = fsp->location;
1029 		flow->location = fsp->location;
1030 
1031 		set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1032 		otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1033 
1034 	} else {
1035 		if (flow->location >= pfvf->flow_cfg->max_flows) {
1036 			netdev_warn(pfvf->netdev,
1037 				    "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1038 				    flow->location,
1039 				    flow_cfg->max_flows - 1);
1040 			err = -EINVAL;
1041 		} else {
1042 			flow->entry = flow_cfg->flow_ent[flow->location];
1043 			err = otx2_add_flow_msg(pfvf, flow);
1044 		}
1045 	}
1046 
1047 	if (err) {
1048 		if (err == MBOX_MSG_INVALID)
1049 			err = -EINVAL;
1050 		if (new)
1051 			kfree(flow);
1052 		return err;
1053 	}
1054 
1055 	/* add the new flow installed to list */
1056 	if (new) {
1057 		otx2_add_flow_to_list(pfvf, flow);
1058 		flow_cfg->nr_flows++;
1059 	}
1060 
1061 	return 0;
1062 }
1063 
1064 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1065 {
1066 	struct npc_delete_flow_req *req;
1067 	int err;
1068 
1069 	mutex_lock(&pfvf->mbox.lock);
1070 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1071 	if (!req) {
1072 		mutex_unlock(&pfvf->mbox.lock);
1073 		return -ENOMEM;
1074 	}
1075 
1076 	req->entry = entry;
1077 	if (all)
1078 		req->all = 1;
1079 
1080 	/* Send message to AF */
1081 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1082 	mutex_unlock(&pfvf->mbox.lock);
1083 	return err;
1084 }
1085 
1086 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1087 {
1088 	struct otx2_flow *iter;
1089 	struct ethhdr *eth_hdr;
1090 	bool found = false;
1091 
1092 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1093 		if (iter->dmac_filter && iter->entry == 0) {
1094 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1095 			if (req == DMAC_ADDR_DEL) {
1096 				otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1097 						    0);
1098 				clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
1099 				found = true;
1100 			} else {
1101 				ether_addr_copy(eth_hdr->h_dest,
1102 						pfvf->netdev->dev_addr);
1103 				otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1104 			}
1105 			break;
1106 		}
1107 	}
1108 
1109 	if (found) {
1110 		list_del(&iter->list);
1111 		kfree(iter);
1112 		pfvf->flow_cfg->nr_flows--;
1113 	}
1114 }
1115 
1116 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1117 {
1118 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1119 	struct otx2_flow *flow;
1120 	int err;
1121 
1122 	if (location >= otx2_get_maxflows(flow_cfg))
1123 		return -EINVAL;
1124 
1125 	flow = otx2_find_flow(pfvf, location);
1126 	if (!flow)
1127 		return -ENOENT;
1128 
1129 	if (flow->dmac_filter) {
1130 		struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1131 
1132 		/* user not allowed to remove dmac filter with interface mac */
1133 		if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1134 			return -EPERM;
1135 
1136 		err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1137 					  flow->entry);
1138 		clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1139 		/* If all dmac filters are removed delete macfilter with
1140 		 * interface mac address and configure CGX/RPM block in
1141 		 * promiscuous mode
1142 		 */
1143 		if (bitmap_weight(&flow_cfg->dmacflt_bmap,
1144 				  flow_cfg->dmacflt_max_flows) == 1)
1145 			otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1146 	} else {
1147 		err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1148 	}
1149 
1150 	if (err)
1151 		return err;
1152 
1153 	list_del(&flow->list);
1154 	kfree(flow);
1155 	flow_cfg->nr_flows--;
1156 
1157 	return 0;
1158 }
1159 
1160 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1161 {
1162 	struct otx2_flow *flow, *tmp;
1163 	int err;
1164 
1165 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1166 		if (flow->rss_ctx_id != ctx_id)
1167 			continue;
1168 		err = otx2_remove_flow(pfvf, flow->location);
1169 		if (err)
1170 			netdev_warn(pfvf->netdev,
1171 				    "Can't delete the rule %d associated with this rss group err:%d",
1172 				    flow->location, err);
1173 	}
1174 }
1175 
1176 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1177 {
1178 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1179 	struct npc_delete_flow_req *req;
1180 	struct otx2_flow *iter, *tmp;
1181 	int err;
1182 
1183 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1184 		return 0;
1185 
1186 	mutex_lock(&pfvf->mbox.lock);
1187 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1188 	if (!req) {
1189 		mutex_unlock(&pfvf->mbox.lock);
1190 		return -ENOMEM;
1191 	}
1192 
1193 	req->start = flow_cfg->flow_ent[0];
1194 	req->end   = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
1195 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1196 	mutex_unlock(&pfvf->mbox.lock);
1197 
1198 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1199 		list_del(&iter->list);
1200 		kfree(iter);
1201 		flow_cfg->nr_flows--;
1202 	}
1203 	return err;
1204 }
1205 
1206 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1207 {
1208 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1209 	struct npc_mcam_free_entry_req *req;
1210 	struct otx2_flow *iter, *tmp;
1211 	int err;
1212 
1213 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1214 		return 0;
1215 
1216 	/* remove all flows */
1217 	err = otx2_remove_flow_msg(pfvf, 0, true);
1218 	if (err)
1219 		return err;
1220 
1221 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1222 		list_del(&iter->list);
1223 		kfree(iter);
1224 		flow_cfg->nr_flows--;
1225 	}
1226 
1227 	mutex_lock(&pfvf->mbox.lock);
1228 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1229 	if (!req) {
1230 		mutex_unlock(&pfvf->mbox.lock);
1231 		return -ENOMEM;
1232 	}
1233 
1234 	req->all = 1;
1235 	/* Send message to AF to free MCAM entries */
1236 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1237 	if (err) {
1238 		mutex_unlock(&pfvf->mbox.lock);
1239 		return err;
1240 	}
1241 
1242 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1243 	mutex_unlock(&pfvf->mbox.lock);
1244 
1245 	return 0;
1246 }
1247 
1248 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1249 {
1250 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1251 	struct npc_install_flow_req *req;
1252 	int err;
1253 
1254 	mutex_lock(&pfvf->mbox.lock);
1255 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1256 	if (!req) {
1257 		mutex_unlock(&pfvf->mbox.lock);
1258 		return -ENOMEM;
1259 	}
1260 
1261 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1262 	req->intf = NIX_INTF_RX;
1263 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1264 	eth_broadcast_addr((u8 *)&req->mask.dmac);
1265 	req->channel = pfvf->hw.rx_chan_base;
1266 	req->op = NIX_RX_ACTION_DEFAULT;
1267 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1268 	req->vtag0_valid = true;
1269 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1270 
1271 	/* Send message to AF */
1272 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1273 	mutex_unlock(&pfvf->mbox.lock);
1274 	return err;
1275 }
1276 
1277 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1278 {
1279 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1280 	struct npc_delete_flow_req *req;
1281 	int err;
1282 
1283 	mutex_lock(&pfvf->mbox.lock);
1284 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1285 	if (!req) {
1286 		mutex_unlock(&pfvf->mbox.lock);
1287 		return -ENOMEM;
1288 	}
1289 
1290 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1291 	/* Send message to AF */
1292 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1293 	mutex_unlock(&pfvf->mbox.lock);
1294 	return err;
1295 }
1296 
1297 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1298 {
1299 	struct nix_vtag_config *req;
1300 	struct mbox_msghdr *rsp_hdr;
1301 	int err;
1302 
1303 	/* Dont have enough mcam entries */
1304 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1305 		return -ENOMEM;
1306 
1307 	if (enable) {
1308 		err = otx2_install_rxvlan_offload_flow(pf);
1309 		if (err)
1310 			return err;
1311 	} else {
1312 		err = otx2_delete_rxvlan_offload_flow(pf);
1313 		if (err)
1314 			return err;
1315 	}
1316 
1317 	mutex_lock(&pf->mbox.lock);
1318 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1319 	if (!req) {
1320 		mutex_unlock(&pf->mbox.lock);
1321 		return -ENOMEM;
1322 	}
1323 
1324 	/* config strip, capture and size */
1325 	req->vtag_size = VTAGSIZE_T4;
1326 	req->cfg_type = 1; /* rx vlan cfg */
1327 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1328 	req->rx.strip_vtag = enable;
1329 	req->rx.capture_vtag = enable;
1330 
1331 	err = otx2_sync_mbox_msg(&pf->mbox);
1332 	if (err) {
1333 		mutex_unlock(&pf->mbox.lock);
1334 		return err;
1335 	}
1336 
1337 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1338 	if (IS_ERR(rsp_hdr)) {
1339 		mutex_unlock(&pf->mbox.lock);
1340 		return PTR_ERR(rsp_hdr);
1341 	}
1342 
1343 	mutex_unlock(&pf->mbox.lock);
1344 	return rsp_hdr->rc;
1345 }
1346 
1347 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1348 {
1349 	struct otx2_flow *iter;
1350 	struct ethhdr *eth_hdr;
1351 
1352 	list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1353 		if (iter->dmac_filter) {
1354 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1355 			otx2_dmacflt_add(pf, eth_hdr->h_dest,
1356 					 iter->entry);
1357 		}
1358 	}
1359 }
1360 
1361 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1362 {
1363 	otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1364 }
1365