1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 #include <linux/sort.h>
9 
10 #include "otx2_common.h"
11 
12 #define OTX2_DEFAULT_ACTION	0x1
13 
14 struct otx2_flow {
15 	struct ethtool_rx_flow_spec flow_spec;
16 	struct list_head list;
17 	u32 location;
18 	u16 entry;
19 	bool is_vf;
20 	u8 rss_ctx_id;
21 	int vf;
22 	bool dmac_filter;
23 };
24 
25 enum dmac_req {
26 	DMAC_ADDR_UPDATE,
27 	DMAC_ADDR_DEL
28 };
29 
30 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
31 {
32 	devm_kfree(pfvf->dev, flow_cfg->flow_ent);
33 	flow_cfg->flow_ent = NULL;
34 	flow_cfg->ntuple_max_flows = 0;
35 	flow_cfg->tc_max_flows = 0;
36 }
37 
38 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
39 {
40 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
41 	struct npc_mcam_free_entry_req *req;
42 	int ent, err;
43 
44 	if (!flow_cfg->ntuple_max_flows)
45 		return 0;
46 
47 	mutex_lock(&pfvf->mbox.lock);
48 	for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
49 		req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
50 		if (!req)
51 			break;
52 
53 		req->entry = flow_cfg->flow_ent[ent];
54 
55 		/* Send message to AF to free MCAM entries */
56 		err = otx2_sync_mbox_msg(&pfvf->mbox);
57 		if (err)
58 			break;
59 	}
60 	mutex_unlock(&pfvf->mbox.lock);
61 	otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
62 	return 0;
63 }
64 
65 static int mcam_entry_cmp(const void *a, const void *b)
66 {
67 	return *(u16 *)a - *(u16 *)b;
68 }
69 
70 static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
71 {
72 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
73 	struct npc_mcam_alloc_entry_req *req;
74 	struct npc_mcam_alloc_entry_rsp *rsp;
75 	int ent, allocated = 0;
76 
77 	/* Free current ones and allocate new ones with requested count */
78 	otx2_free_ntuple_mcam_entries(pfvf);
79 
80 	if (!count)
81 		return 0;
82 
83 	flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
84 						sizeof(u16), GFP_KERNEL);
85 	if (!flow_cfg->flow_ent)
86 		return -ENOMEM;
87 
88 	mutex_lock(&pfvf->mbox.lock);
89 
90 	/* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
91 	 * can only be allocated.
92 	 */
93 	while (allocated < count) {
94 		req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
95 		if (!req)
96 			goto exit;
97 
98 		req->contig = false;
99 		req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
100 				NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
101 
102 		/* Allocate higher priority entries for PFs, so that VF's entries
103 		 * will be on top of PF.
104 		 */
105 		if (!is_otx2_vf(pfvf->pcifunc)) {
106 			req->priority = NPC_MCAM_HIGHER_PRIO;
107 			req->ref_entry = flow_cfg->def_ent[0];
108 		}
109 
110 		/* Send message to AF */
111 		if (otx2_sync_mbox_msg(&pfvf->mbox))
112 			goto exit;
113 
114 		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
115 			(&pfvf->mbox.mbox, 0, &req->hdr);
116 
117 		for (ent = 0; ent < rsp->count; ent++)
118 			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
119 
120 		allocated += rsp->count;
121 
122 		/* If this request is not fulfilled, no need to send
123 		 * further requests.
124 		 */
125 		if (rsp->count != req->count)
126 			break;
127 	}
128 
129 	/* Multiple MCAM entry alloc requests could result in non-sequential
130 	 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
131 	 * otherwise user installed ntuple filter index and MCAM entry index will
132 	 * not be in sync.
133 	 */
134 	if (allocated)
135 		sort(&flow_cfg->flow_ent[0], allocated,
136 		     sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
137 
138 exit:
139 	mutex_unlock(&pfvf->mbox.lock);
140 
141 	flow_cfg->ntuple_offset = 0;
142 	flow_cfg->ntuple_max_flows = allocated;
143 	flow_cfg->tc_max_flows = allocated;
144 
145 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
146 	pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
147 
148 	if (allocated != count)
149 		netdev_info(pfvf->netdev,
150 			    "Unable to allocate %d MCAM entries, got only %d\n",
151 			    count, allocated);
152 	return allocated;
153 }
154 
155 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
156 {
157 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
158 	struct npc_mcam_alloc_entry_req *req;
159 	struct npc_mcam_alloc_entry_rsp *rsp;
160 	int vf_vlan_max_flows;
161 	int ent, count;
162 
163 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
164 	count = OTX2_MAX_UNICAST_FLOWS +
165 			OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
166 
167 	flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
168 					       sizeof(u16), GFP_KERNEL);
169 	if (!flow_cfg->def_ent)
170 		return -ENOMEM;
171 
172 	mutex_lock(&pfvf->mbox.lock);
173 
174 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
175 	if (!req) {
176 		mutex_unlock(&pfvf->mbox.lock);
177 		return -ENOMEM;
178 	}
179 
180 	req->contig = false;
181 	req->count = count;
182 
183 	/* Send message to AF */
184 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
185 		mutex_unlock(&pfvf->mbox.lock);
186 		return -EINVAL;
187 	}
188 
189 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
190 	       (&pfvf->mbox.mbox, 0, &req->hdr);
191 
192 	if (rsp->count != req->count) {
193 		netdev_info(pfvf->netdev,
194 			    "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
195 		mutex_unlock(&pfvf->mbox.lock);
196 		devm_kfree(pfvf->dev, flow_cfg->def_ent);
197 		return 0;
198 	}
199 
200 	for (ent = 0; ent < rsp->count; ent++)
201 		flow_cfg->def_ent[ent] = rsp->entry_list[ent];
202 
203 	flow_cfg->vf_vlan_offset = 0;
204 	flow_cfg->unicast_offset = vf_vlan_max_flows;
205 	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
206 					OTX2_MAX_UNICAST_FLOWS;
207 	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
208 	pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
209 	pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
210 
211 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
212 	mutex_unlock(&pfvf->mbox.lock);
213 
214 	/* Allocate entries for Ntuple filters */
215 	count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
216 	if (count <= 0) {
217 		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
218 		return 0;
219 	}
220 
221 	pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
222 
223 	return 0;
224 }
225 
226 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
227 {
228 	struct otx2_flow_config *flow_cfg;
229 	int count;
230 
231 	pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
232 				      sizeof(struct otx2_flow_config),
233 				      GFP_KERNEL);
234 	if (!pfvf->flow_cfg)
235 		return -ENOMEM;
236 
237 	flow_cfg = pfvf->flow_cfg;
238 	INIT_LIST_HEAD(&flow_cfg->flow_list);
239 	flow_cfg->ntuple_max_flows = 0;
240 
241 	count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
242 	if (count <= 0)
243 		return -ENOMEM;
244 
245 	return 0;
246 }
247 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
248 
249 int otx2_mcam_flow_init(struct otx2_nic *pf)
250 {
251 	int err;
252 
253 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
254 				    GFP_KERNEL);
255 	if (!pf->flow_cfg)
256 		return -ENOMEM;
257 
258 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
259 
260 	err = otx2_alloc_mcam_entries(pf);
261 	if (err)
262 		return err;
263 
264 	/* Check if MCAM entries are allocate or not */
265 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
266 		return 0;
267 
268 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
269 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
270 	if (!pf->mac_table)
271 		return -ENOMEM;
272 
273 	otx2_dmacflt_get_max_cnt(pf);
274 
275 	/* DMAC filters are not allocated */
276 	if (!pf->flow_cfg->dmacflt_max_flows)
277 		return 0;
278 
279 	pf->flow_cfg->bmap_to_dmacindex =
280 			devm_kzalloc(pf->dev, sizeof(u8) *
281 				     pf->flow_cfg->dmacflt_max_flows,
282 				     GFP_KERNEL);
283 
284 	if (!pf->flow_cfg->bmap_to_dmacindex)
285 		return -ENOMEM;
286 
287 	pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
288 
289 	return 0;
290 }
291 
292 void otx2_mcam_flow_del(struct otx2_nic *pf)
293 {
294 	otx2_destroy_mcam_flows(pf);
295 }
296 EXPORT_SYMBOL(otx2_mcam_flow_del);
297 
298 /*  On success adds mcam entry
299  *  On failure enable promisous mode
300  */
301 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
302 {
303 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
304 	struct npc_install_flow_req *req;
305 	int err, i;
306 
307 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
308 		return -ENOMEM;
309 
310 	/* dont have free mcam entries or uc list is greater than alloted */
311 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
312 		return -ENOMEM;
313 
314 	mutex_lock(&pf->mbox.lock);
315 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
316 	if (!req) {
317 		mutex_unlock(&pf->mbox.lock);
318 		return -ENOMEM;
319 	}
320 
321 	/* unicast offset starts with 32 0..31 for ntuple */
322 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
323 		if (pf->mac_table[i].inuse)
324 			continue;
325 		ether_addr_copy(pf->mac_table[i].addr, mac);
326 		pf->mac_table[i].inuse = true;
327 		pf->mac_table[i].mcam_entry =
328 			flow_cfg->def_ent[i + flow_cfg->unicast_offset];
329 		req->entry =  pf->mac_table[i].mcam_entry;
330 		break;
331 	}
332 
333 	ether_addr_copy(req->packet.dmac, mac);
334 	eth_broadcast_addr((u8 *)&req->mask.dmac);
335 	req->features = BIT_ULL(NPC_DMAC);
336 	req->channel = pf->hw.rx_chan_base;
337 	req->intf = NIX_INTF_RX;
338 	req->op = NIX_RX_ACTION_DEFAULT;
339 	req->set_cntr = 1;
340 
341 	err = otx2_sync_mbox_msg(&pf->mbox);
342 	mutex_unlock(&pf->mbox.lock);
343 
344 	return err;
345 }
346 
347 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
348 {
349 	struct otx2_nic *pf = netdev_priv(netdev);
350 
351 	if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
352 			  pf->flow_cfg->dmacflt_max_flows))
353 		netdev_warn(netdev,
354 			    "Add %pM to CGX/RPM DMAC filters list as well\n",
355 			    mac);
356 
357 	return otx2_do_add_macfilter(pf, mac);
358 }
359 
360 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
361 				       int *mcam_entry)
362 {
363 	int i;
364 
365 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
366 		if (!pf->mac_table[i].inuse)
367 			continue;
368 
369 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
370 			*mcam_entry = pf->mac_table[i].mcam_entry;
371 			pf->mac_table[i].inuse = false;
372 			return true;
373 		}
374 	}
375 	return false;
376 }
377 
378 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
379 {
380 	struct otx2_nic *pf = netdev_priv(netdev);
381 	struct npc_delete_flow_req *req;
382 	int err, mcam_entry;
383 
384 	/* check does mcam entry exists for given mac */
385 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
386 		return 0;
387 
388 	mutex_lock(&pf->mbox.lock);
389 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
390 	if (!req) {
391 		mutex_unlock(&pf->mbox.lock);
392 		return -ENOMEM;
393 	}
394 	req->entry = mcam_entry;
395 	/* Send message to AF */
396 	err = otx2_sync_mbox_msg(&pf->mbox);
397 	mutex_unlock(&pf->mbox.lock);
398 
399 	return err;
400 }
401 
402 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
403 {
404 	struct otx2_flow *iter;
405 
406 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
407 		if (iter->location == location)
408 			return iter;
409 	}
410 
411 	return NULL;
412 }
413 
414 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
415 {
416 	struct list_head *head = &pfvf->flow_cfg->flow_list;
417 	struct otx2_flow *iter;
418 
419 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
420 		if (iter->location > flow->location)
421 			break;
422 		head = &iter->list;
423 	}
424 
425 	list_add(&flow->list, head);
426 }
427 
428 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
429 {
430 	if (!flow_cfg)
431 		return 0;
432 
433 	if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
434 	    bitmap_weight(&flow_cfg->dmacflt_bmap,
435 			  flow_cfg->dmacflt_max_flows))
436 		return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
437 	else
438 		return flow_cfg->ntuple_max_flows;
439 }
440 EXPORT_SYMBOL(otx2_get_maxflows);
441 
442 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
443 		  u32 location)
444 {
445 	struct otx2_flow *iter;
446 
447 	if (location >= otx2_get_maxflows(pfvf->flow_cfg))
448 		return -EINVAL;
449 
450 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
451 		if (iter->location == location) {
452 			nfc->fs = iter->flow_spec;
453 			nfc->rss_context = iter->rss_ctx_id;
454 			return 0;
455 		}
456 	}
457 
458 	return -ENOENT;
459 }
460 
461 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
462 		       u32 *rule_locs)
463 {
464 	u32 rule_cnt = nfc->rule_cnt;
465 	u32 location = 0;
466 	int idx = 0;
467 	int err = 0;
468 
469 	nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
470 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
471 		err = otx2_get_flow(pfvf, nfc, location);
472 		if (!err)
473 			rule_locs[idx++] = location;
474 		location++;
475 	}
476 	nfc->rule_cnt = rule_cnt;
477 
478 	return err;
479 }
480 
481 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
482 				  struct npc_install_flow_req *req,
483 				  u32 flow_type)
484 {
485 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
486 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
487 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
488 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
489 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
490 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
491 	struct flow_msg *pmask = &req->mask;
492 	struct flow_msg *pkt = &req->packet;
493 
494 	switch (flow_type) {
495 	case IP_USER_FLOW:
496 		if (ipv4_usr_mask->ip4src) {
497 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
498 			       sizeof(pkt->ip4src));
499 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
500 			       sizeof(pmask->ip4src));
501 			req->features |= BIT_ULL(NPC_SIP_IPV4);
502 		}
503 		if (ipv4_usr_mask->ip4dst) {
504 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
505 			       sizeof(pkt->ip4dst));
506 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
507 			       sizeof(pmask->ip4dst));
508 			req->features |= BIT_ULL(NPC_DIP_IPV4);
509 		}
510 		if (ipv4_usr_mask->tos) {
511 			pkt->tos = ipv4_usr_hdr->tos;
512 			pmask->tos = ipv4_usr_mask->tos;
513 			req->features |= BIT_ULL(NPC_TOS);
514 		}
515 		if (ipv4_usr_mask->proto) {
516 			switch (ipv4_usr_hdr->proto) {
517 			case IPPROTO_ICMP:
518 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
519 				break;
520 			case IPPROTO_TCP:
521 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
522 				break;
523 			case IPPROTO_UDP:
524 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
525 				break;
526 			case IPPROTO_SCTP:
527 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
528 				break;
529 			case IPPROTO_AH:
530 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
531 				break;
532 			case IPPROTO_ESP:
533 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
534 				break;
535 			default:
536 				return -EOPNOTSUPP;
537 			}
538 		}
539 		pkt->etype = cpu_to_be16(ETH_P_IP);
540 		pmask->etype = cpu_to_be16(0xFFFF);
541 		req->features |= BIT_ULL(NPC_ETYPE);
542 		break;
543 	case TCP_V4_FLOW:
544 	case UDP_V4_FLOW:
545 	case SCTP_V4_FLOW:
546 		pkt->etype = cpu_to_be16(ETH_P_IP);
547 		pmask->etype = cpu_to_be16(0xFFFF);
548 		req->features |= BIT_ULL(NPC_ETYPE);
549 		if (ipv4_l4_mask->ip4src) {
550 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
551 			       sizeof(pkt->ip4src));
552 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
553 			       sizeof(pmask->ip4src));
554 			req->features |= BIT_ULL(NPC_SIP_IPV4);
555 		}
556 		if (ipv4_l4_mask->ip4dst) {
557 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
558 			       sizeof(pkt->ip4dst));
559 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
560 			       sizeof(pmask->ip4dst));
561 			req->features |= BIT_ULL(NPC_DIP_IPV4);
562 		}
563 		if (ipv4_l4_mask->tos) {
564 			pkt->tos = ipv4_l4_hdr->tos;
565 			pmask->tos = ipv4_l4_mask->tos;
566 			req->features |= BIT_ULL(NPC_TOS);
567 		}
568 		if (ipv4_l4_mask->psrc) {
569 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
570 			       sizeof(pkt->sport));
571 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
572 			       sizeof(pmask->sport));
573 			if (flow_type == UDP_V4_FLOW)
574 				req->features |= BIT_ULL(NPC_SPORT_UDP);
575 			else if (flow_type == TCP_V4_FLOW)
576 				req->features |= BIT_ULL(NPC_SPORT_TCP);
577 			else
578 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
579 		}
580 		if (ipv4_l4_mask->pdst) {
581 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
582 			       sizeof(pkt->dport));
583 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
584 			       sizeof(pmask->dport));
585 			if (flow_type == UDP_V4_FLOW)
586 				req->features |= BIT_ULL(NPC_DPORT_UDP);
587 			else if (flow_type == TCP_V4_FLOW)
588 				req->features |= BIT_ULL(NPC_DPORT_TCP);
589 			else
590 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
591 		}
592 		if (flow_type == UDP_V4_FLOW)
593 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
594 		else if (flow_type == TCP_V4_FLOW)
595 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
596 		else
597 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
598 		break;
599 	case AH_V4_FLOW:
600 	case ESP_V4_FLOW:
601 		pkt->etype = cpu_to_be16(ETH_P_IP);
602 		pmask->etype = cpu_to_be16(0xFFFF);
603 		req->features |= BIT_ULL(NPC_ETYPE);
604 		if (ah_esp_mask->ip4src) {
605 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
606 			       sizeof(pkt->ip4src));
607 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
608 			       sizeof(pmask->ip4src));
609 			req->features |= BIT_ULL(NPC_SIP_IPV4);
610 		}
611 		if (ah_esp_mask->ip4dst) {
612 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
613 			       sizeof(pkt->ip4dst));
614 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
615 			       sizeof(pmask->ip4dst));
616 			req->features |= BIT_ULL(NPC_DIP_IPV4);
617 		}
618 		if (ah_esp_mask->tos) {
619 			pkt->tos = ah_esp_hdr->tos;
620 			pmask->tos = ah_esp_mask->tos;
621 			req->features |= BIT_ULL(NPC_TOS);
622 		}
623 
624 		/* NPC profile doesn't extract AH/ESP header fields */
625 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
626 			return -EOPNOTSUPP;
627 
628 		if (flow_type == AH_V4_FLOW)
629 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
630 		else
631 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
632 		break;
633 	default:
634 		break;
635 	}
636 
637 	return 0;
638 }
639 
640 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
641 				  struct npc_install_flow_req *req,
642 				  u32 flow_type)
643 {
644 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
645 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
646 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
647 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
648 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
649 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
650 	struct flow_msg *pmask = &req->mask;
651 	struct flow_msg *pkt = &req->packet;
652 
653 	switch (flow_type) {
654 	case IPV6_USER_FLOW:
655 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
656 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
657 			       sizeof(pkt->ip6src));
658 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
659 			       sizeof(pmask->ip6src));
660 			req->features |= BIT_ULL(NPC_SIP_IPV6);
661 		}
662 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
663 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
664 			       sizeof(pkt->ip6dst));
665 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
666 			       sizeof(pmask->ip6dst));
667 			req->features |= BIT_ULL(NPC_DIP_IPV6);
668 		}
669 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
670 		pmask->etype = cpu_to_be16(0xFFFF);
671 		req->features |= BIT_ULL(NPC_ETYPE);
672 		break;
673 	case TCP_V6_FLOW:
674 	case UDP_V6_FLOW:
675 	case SCTP_V6_FLOW:
676 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
677 		pmask->etype = cpu_to_be16(0xFFFF);
678 		req->features |= BIT_ULL(NPC_ETYPE);
679 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
680 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
681 			       sizeof(pkt->ip6src));
682 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
683 			       sizeof(pmask->ip6src));
684 			req->features |= BIT_ULL(NPC_SIP_IPV6);
685 		}
686 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
687 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
688 			       sizeof(pkt->ip6dst));
689 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
690 			       sizeof(pmask->ip6dst));
691 			req->features |= BIT_ULL(NPC_DIP_IPV6);
692 		}
693 		if (ipv6_l4_mask->psrc) {
694 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
695 			       sizeof(pkt->sport));
696 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
697 			       sizeof(pmask->sport));
698 			if (flow_type == UDP_V6_FLOW)
699 				req->features |= BIT_ULL(NPC_SPORT_UDP);
700 			else if (flow_type == TCP_V6_FLOW)
701 				req->features |= BIT_ULL(NPC_SPORT_TCP);
702 			else
703 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
704 		}
705 		if (ipv6_l4_mask->pdst) {
706 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
707 			       sizeof(pkt->dport));
708 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
709 			       sizeof(pmask->dport));
710 			if (flow_type == UDP_V6_FLOW)
711 				req->features |= BIT_ULL(NPC_DPORT_UDP);
712 			else if (flow_type == TCP_V6_FLOW)
713 				req->features |= BIT_ULL(NPC_DPORT_TCP);
714 			else
715 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
716 		}
717 		if (flow_type == UDP_V6_FLOW)
718 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
719 		else if (flow_type == TCP_V6_FLOW)
720 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
721 		else
722 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
723 		break;
724 	case AH_V6_FLOW:
725 	case ESP_V6_FLOW:
726 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
727 		pmask->etype = cpu_to_be16(0xFFFF);
728 		req->features |= BIT_ULL(NPC_ETYPE);
729 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
730 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
731 			       sizeof(pkt->ip6src));
732 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
733 			       sizeof(pmask->ip6src));
734 			req->features |= BIT_ULL(NPC_SIP_IPV6);
735 		}
736 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
737 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
738 			       sizeof(pkt->ip6dst));
739 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
740 			       sizeof(pmask->ip6dst));
741 			req->features |= BIT_ULL(NPC_DIP_IPV6);
742 		}
743 
744 		/* NPC profile doesn't extract AH/ESP header fields */
745 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
746 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
747 			return -EOPNOTSUPP;
748 
749 		if (flow_type == AH_V6_FLOW)
750 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
751 		else
752 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
753 		break;
754 	default:
755 		break;
756 	}
757 
758 	return 0;
759 }
760 
761 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
762 			      struct npc_install_flow_req *req)
763 {
764 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
765 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
766 	struct flow_msg *pmask = &req->mask;
767 	struct flow_msg *pkt = &req->packet;
768 	u32 flow_type;
769 	int ret;
770 
771 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
772 	switch (flow_type) {
773 	/* bits not set in mask are don't care */
774 	case ETHER_FLOW:
775 		if (!is_zero_ether_addr(eth_mask->h_source)) {
776 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
777 			ether_addr_copy(pmask->smac, eth_mask->h_source);
778 			req->features |= BIT_ULL(NPC_SMAC);
779 		}
780 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
781 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
782 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
783 			req->features |= BIT_ULL(NPC_DMAC);
784 		}
785 		if (eth_hdr->h_proto) {
786 			memcpy(&pkt->etype, &eth_hdr->h_proto,
787 			       sizeof(pkt->etype));
788 			memcpy(&pmask->etype, &eth_mask->h_proto,
789 			       sizeof(pmask->etype));
790 			req->features |= BIT_ULL(NPC_ETYPE);
791 		}
792 		break;
793 	case IP_USER_FLOW:
794 	case TCP_V4_FLOW:
795 	case UDP_V4_FLOW:
796 	case SCTP_V4_FLOW:
797 	case AH_V4_FLOW:
798 	case ESP_V4_FLOW:
799 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
800 		if (ret)
801 			return ret;
802 		break;
803 	case IPV6_USER_FLOW:
804 	case TCP_V6_FLOW:
805 	case UDP_V6_FLOW:
806 	case SCTP_V6_FLOW:
807 	case AH_V6_FLOW:
808 	case ESP_V6_FLOW:
809 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
810 		if (ret)
811 			return ret;
812 		break;
813 	default:
814 		return -EOPNOTSUPP;
815 	}
816 	if (fsp->flow_type & FLOW_EXT) {
817 		if (fsp->m_ext.vlan_etype)
818 			return -EINVAL;
819 		if (fsp->m_ext.vlan_tci) {
820 			if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
821 				return -EINVAL;
822 			if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
823 				return -EINVAL;
824 
825 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
826 			       sizeof(pkt->vlan_tci));
827 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
828 			       sizeof(pmask->vlan_tci));
829 			req->features |= BIT_ULL(NPC_OUTER_VID);
830 		}
831 
832 		/* Not Drop/Direct to queue but use action in default entry */
833 		if (fsp->m_ext.data[1] &&
834 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
835 			req->op = NIX_RX_ACTION_DEFAULT;
836 	}
837 
838 	if (fsp->flow_type & FLOW_MAC_EXT &&
839 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
840 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
841 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
842 		req->features |= BIT_ULL(NPC_DMAC);
843 	}
844 
845 	if (!req->features)
846 		return -EOPNOTSUPP;
847 
848 	return 0;
849 }
850 
851 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
852 					struct ethtool_rx_flow_spec *fsp)
853 {
854 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
855 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
856 	u64 ring_cookie = fsp->ring_cookie;
857 	u32 flow_type;
858 
859 	if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
860 		return false;
861 
862 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
863 
864 	/* CGX/RPM block dmac filtering configured for white listing
865 	 * check for action other than DROP
866 	 */
867 	if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
868 	    !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
869 		if (is_zero_ether_addr(eth_mask->h_dest) &&
870 		    is_valid_ether_addr(eth_hdr->h_dest))
871 			return true;
872 	}
873 
874 	return false;
875 }
876 
877 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
878 {
879 	u64 ring_cookie = flow->flow_spec.ring_cookie;
880 	struct npc_install_flow_req *req;
881 	int err, vf = 0;
882 
883 	mutex_lock(&pfvf->mbox.lock);
884 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
885 	if (!req) {
886 		mutex_unlock(&pfvf->mbox.lock);
887 		return -ENOMEM;
888 	}
889 
890 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
891 	if (err) {
892 		/* free the allocated msg above */
893 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
894 		mutex_unlock(&pfvf->mbox.lock);
895 		return err;
896 	}
897 
898 	req->entry = flow->entry;
899 	req->intf = NIX_INTF_RX;
900 	req->set_cntr = 1;
901 	req->channel = pfvf->hw.rx_chan_base;
902 	if (ring_cookie == RX_CLS_FLOW_DISC) {
903 		req->op = NIX_RX_ACTIONOP_DROP;
904 	} else {
905 		/* change to unicast only if action of default entry is not
906 		 * requested by user
907 		 */
908 		if (flow->flow_spec.flow_type & FLOW_RSS) {
909 			req->op = NIX_RX_ACTIONOP_RSS;
910 			req->index = flow->rss_ctx_id;
911 		} else {
912 			req->op = NIX_RX_ACTIONOP_UCAST;
913 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
914 		}
915 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
916 		if (vf > pci_num_vf(pfvf->pdev)) {
917 			mutex_unlock(&pfvf->mbox.lock);
918 			return -EINVAL;
919 		}
920 	}
921 
922 	/* ethtool ring_cookie has (VF + 1) for VF */
923 	if (vf) {
924 		req->vf = vf;
925 		flow->is_vf = true;
926 		flow->vf = vf;
927 	}
928 
929 	/* Send message to AF */
930 	err = otx2_sync_mbox_msg(&pfvf->mbox);
931 	mutex_unlock(&pfvf->mbox.lock);
932 	return err;
933 }
934 
935 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
936 				    struct otx2_flow *flow)
937 {
938 	struct otx2_flow *pf_mac;
939 	struct ethhdr *eth_hdr;
940 
941 	pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
942 	if (!pf_mac)
943 		return -ENOMEM;
944 
945 	pf_mac->entry = 0;
946 	pf_mac->dmac_filter = true;
947 	pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
948 	memcpy(&pf_mac->flow_spec, &flow->flow_spec,
949 	       sizeof(struct ethtool_rx_flow_spec));
950 	pf_mac->flow_spec.location = pf_mac->location;
951 
952 	/* Copy PF mac address */
953 	eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
954 	ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
955 
956 	/* Install DMAC filter with PF mac address */
957 	otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
958 
959 	otx2_add_flow_to_list(pfvf, pf_mac);
960 	pfvf->flow_cfg->nr_flows++;
961 	set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
962 
963 	return 0;
964 }
965 
966 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
967 {
968 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
969 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
970 	struct otx2_flow *flow;
971 	struct ethhdr *eth_hdr;
972 	bool new = false;
973 	int err = 0;
974 	u32 ring;
975 
976 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
977 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
978 		return -ENOMEM;
979 
980 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
981 		return -EINVAL;
982 
983 	if (fsp->location >= otx2_get_maxflows(flow_cfg))
984 		return -EINVAL;
985 
986 	flow = otx2_find_flow(pfvf, fsp->location);
987 	if (!flow) {
988 		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
989 		if (!flow)
990 			return -ENOMEM;
991 		flow->location = fsp->location;
992 		new = true;
993 	}
994 	/* struct copy */
995 	flow->flow_spec = *fsp;
996 
997 	if (fsp->flow_type & FLOW_RSS)
998 		flow->rss_ctx_id = nfc->rss_context;
999 
1000 	if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
1001 		eth_hdr = &flow->flow_spec.h_u.ether_spec;
1002 
1003 		/* Sync dmac filter table with updated fields */
1004 		if (flow->dmac_filter)
1005 			return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1006 						   flow->entry);
1007 
1008 		if (bitmap_full(&flow_cfg->dmacflt_bmap,
1009 				flow_cfg->dmacflt_max_flows)) {
1010 			netdev_warn(pfvf->netdev,
1011 				    "Can't insert the rule %d as max allowed dmac filters are %d\n",
1012 				    flow->location +
1013 				    flow_cfg->dmacflt_max_flows,
1014 				    flow_cfg->dmacflt_max_flows);
1015 			err = -EINVAL;
1016 			if (new)
1017 				kfree(flow);
1018 			return err;
1019 		}
1020 
1021 		/* Install PF mac address to DMAC filter list */
1022 		if (!test_bit(0, &flow_cfg->dmacflt_bmap))
1023 			otx2_add_flow_with_pfmac(pfvf, flow);
1024 
1025 		flow->dmac_filter = true;
1026 		flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
1027 						  flow_cfg->dmacflt_max_flows);
1028 		fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
1029 		flow->flow_spec.location = fsp->location;
1030 		flow->location = fsp->location;
1031 
1032 		set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1033 		otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1034 
1035 	} else {
1036 		if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
1037 			netdev_warn(pfvf->netdev,
1038 				    "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1039 				    flow->location,
1040 				    flow_cfg->ntuple_max_flows - 1);
1041 			err = -EINVAL;
1042 		} else {
1043 			flow->entry = flow_cfg->flow_ent[flow->location];
1044 			err = otx2_add_flow_msg(pfvf, flow);
1045 		}
1046 	}
1047 
1048 	if (err) {
1049 		if (err == MBOX_MSG_INVALID)
1050 			err = -EINVAL;
1051 		if (new)
1052 			kfree(flow);
1053 		return err;
1054 	}
1055 
1056 	/* add the new flow installed to list */
1057 	if (new) {
1058 		otx2_add_flow_to_list(pfvf, flow);
1059 		flow_cfg->nr_flows++;
1060 	}
1061 
1062 	return 0;
1063 }
1064 
1065 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1066 {
1067 	struct npc_delete_flow_req *req;
1068 	int err;
1069 
1070 	mutex_lock(&pfvf->mbox.lock);
1071 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1072 	if (!req) {
1073 		mutex_unlock(&pfvf->mbox.lock);
1074 		return -ENOMEM;
1075 	}
1076 
1077 	req->entry = entry;
1078 	if (all)
1079 		req->all = 1;
1080 
1081 	/* Send message to AF */
1082 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1083 	mutex_unlock(&pfvf->mbox.lock);
1084 	return err;
1085 }
1086 
1087 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1088 {
1089 	struct otx2_flow *iter;
1090 	struct ethhdr *eth_hdr;
1091 	bool found = false;
1092 
1093 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1094 		if (iter->dmac_filter && iter->entry == 0) {
1095 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1096 			if (req == DMAC_ADDR_DEL) {
1097 				otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1098 						    0);
1099 				clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
1100 				found = true;
1101 			} else {
1102 				ether_addr_copy(eth_hdr->h_dest,
1103 						pfvf->netdev->dev_addr);
1104 				otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1105 			}
1106 			break;
1107 		}
1108 	}
1109 
1110 	if (found) {
1111 		list_del(&iter->list);
1112 		kfree(iter);
1113 		pfvf->flow_cfg->nr_flows--;
1114 	}
1115 }
1116 
1117 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1118 {
1119 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1120 	struct otx2_flow *flow;
1121 	int err;
1122 
1123 	if (location >= otx2_get_maxflows(flow_cfg))
1124 		return -EINVAL;
1125 
1126 	flow = otx2_find_flow(pfvf, location);
1127 	if (!flow)
1128 		return -ENOENT;
1129 
1130 	if (flow->dmac_filter) {
1131 		struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1132 
1133 		/* user not allowed to remove dmac filter with interface mac */
1134 		if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1135 			return -EPERM;
1136 
1137 		err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1138 					  flow->entry);
1139 		clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1140 		/* If all dmac filters are removed delete macfilter with
1141 		 * interface mac address and configure CGX/RPM block in
1142 		 * promiscuous mode
1143 		 */
1144 		if (bitmap_weight(&flow_cfg->dmacflt_bmap,
1145 				  flow_cfg->dmacflt_max_flows) == 1)
1146 			otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1147 	} else {
1148 		err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1149 	}
1150 
1151 	if (err)
1152 		return err;
1153 
1154 	list_del(&flow->list);
1155 	kfree(flow);
1156 	flow_cfg->nr_flows--;
1157 
1158 	return 0;
1159 }
1160 
1161 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1162 {
1163 	struct otx2_flow *flow, *tmp;
1164 	int err;
1165 
1166 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1167 		if (flow->rss_ctx_id != ctx_id)
1168 			continue;
1169 		err = otx2_remove_flow(pfvf, flow->location);
1170 		if (err)
1171 			netdev_warn(pfvf->netdev,
1172 				    "Can't delete the rule %d associated with this rss group err:%d",
1173 				    flow->location, err);
1174 	}
1175 }
1176 
1177 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1178 {
1179 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1180 	struct npc_delete_flow_req *req;
1181 	struct otx2_flow *iter, *tmp;
1182 	int err;
1183 
1184 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1185 		return 0;
1186 
1187 	mutex_lock(&pfvf->mbox.lock);
1188 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1189 	if (!req) {
1190 		mutex_unlock(&pfvf->mbox.lock);
1191 		return -ENOMEM;
1192 	}
1193 
1194 	req->start = flow_cfg->flow_ent[0];
1195 	req->end   = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
1196 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1197 	mutex_unlock(&pfvf->mbox.lock);
1198 
1199 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1200 		list_del(&iter->list);
1201 		kfree(iter);
1202 		flow_cfg->nr_flows--;
1203 	}
1204 	return err;
1205 }
1206 
1207 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1208 {
1209 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1210 	struct npc_mcam_free_entry_req *req;
1211 	struct otx2_flow *iter, *tmp;
1212 	int err;
1213 
1214 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1215 		return 0;
1216 
1217 	/* remove all flows */
1218 	err = otx2_remove_flow_msg(pfvf, 0, true);
1219 	if (err)
1220 		return err;
1221 
1222 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1223 		list_del(&iter->list);
1224 		kfree(iter);
1225 		flow_cfg->nr_flows--;
1226 	}
1227 
1228 	mutex_lock(&pfvf->mbox.lock);
1229 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1230 	if (!req) {
1231 		mutex_unlock(&pfvf->mbox.lock);
1232 		return -ENOMEM;
1233 	}
1234 
1235 	req->all = 1;
1236 	/* Send message to AF to free MCAM entries */
1237 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1238 	if (err) {
1239 		mutex_unlock(&pfvf->mbox.lock);
1240 		return err;
1241 	}
1242 
1243 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1244 	mutex_unlock(&pfvf->mbox.lock);
1245 
1246 	return 0;
1247 }
1248 
1249 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1250 {
1251 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1252 	struct npc_install_flow_req *req;
1253 	int err;
1254 
1255 	mutex_lock(&pfvf->mbox.lock);
1256 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1257 	if (!req) {
1258 		mutex_unlock(&pfvf->mbox.lock);
1259 		return -ENOMEM;
1260 	}
1261 
1262 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1263 	req->intf = NIX_INTF_RX;
1264 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1265 	eth_broadcast_addr((u8 *)&req->mask.dmac);
1266 	req->channel = pfvf->hw.rx_chan_base;
1267 	req->op = NIX_RX_ACTION_DEFAULT;
1268 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1269 	req->vtag0_valid = true;
1270 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1271 
1272 	/* Send message to AF */
1273 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1274 	mutex_unlock(&pfvf->mbox.lock);
1275 	return err;
1276 }
1277 
1278 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1279 {
1280 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1281 	struct npc_delete_flow_req *req;
1282 	int err;
1283 
1284 	mutex_lock(&pfvf->mbox.lock);
1285 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1286 	if (!req) {
1287 		mutex_unlock(&pfvf->mbox.lock);
1288 		return -ENOMEM;
1289 	}
1290 
1291 	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1292 	/* Send message to AF */
1293 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1294 	mutex_unlock(&pfvf->mbox.lock);
1295 	return err;
1296 }
1297 
1298 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1299 {
1300 	struct nix_vtag_config *req;
1301 	struct mbox_msghdr *rsp_hdr;
1302 	int err;
1303 
1304 	/* Dont have enough mcam entries */
1305 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1306 		return -ENOMEM;
1307 
1308 	if (enable) {
1309 		err = otx2_install_rxvlan_offload_flow(pf);
1310 		if (err)
1311 			return err;
1312 	} else {
1313 		err = otx2_delete_rxvlan_offload_flow(pf);
1314 		if (err)
1315 			return err;
1316 	}
1317 
1318 	mutex_lock(&pf->mbox.lock);
1319 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1320 	if (!req) {
1321 		mutex_unlock(&pf->mbox.lock);
1322 		return -ENOMEM;
1323 	}
1324 
1325 	/* config strip, capture and size */
1326 	req->vtag_size = VTAGSIZE_T4;
1327 	req->cfg_type = 1; /* rx vlan cfg */
1328 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1329 	req->rx.strip_vtag = enable;
1330 	req->rx.capture_vtag = enable;
1331 
1332 	err = otx2_sync_mbox_msg(&pf->mbox);
1333 	if (err) {
1334 		mutex_unlock(&pf->mbox.lock);
1335 		return err;
1336 	}
1337 
1338 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1339 	if (IS_ERR(rsp_hdr)) {
1340 		mutex_unlock(&pf->mbox.lock);
1341 		return PTR_ERR(rsp_hdr);
1342 	}
1343 
1344 	mutex_unlock(&pf->mbox.lock);
1345 	return rsp_hdr->rc;
1346 }
1347 
1348 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1349 {
1350 	struct otx2_flow *iter;
1351 	struct ethhdr *eth_hdr;
1352 
1353 	list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1354 		if (iter->dmac_filter) {
1355 			eth_hdr = &iter->flow_spec.h_u.ether_spec;
1356 			otx2_dmacflt_add(pf, eth_hdr->h_dest,
1357 					 iter->entry);
1358 		}
1359 	}
1360 }
1361 
1362 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1363 {
1364 	otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1365 }
1366