1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 
9 #include "otx2_common.h"
10 
11 #define OTX2_DEFAULT_ACTION	0x1
12 
13 struct otx2_flow {
14 	struct ethtool_rx_flow_spec flow_spec;
15 	struct list_head list;
16 	u32 location;
17 	u16 entry;
18 	bool is_vf;
19 	u8 rss_ctx_id;
20 	int vf;
21 };
22 
23 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
24 {
25 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
26 	struct npc_mcam_alloc_entry_req *req;
27 	struct npc_mcam_alloc_entry_rsp *rsp;
28 	int vf_vlan_max_flows;
29 	int i;
30 
31 	mutex_lock(&pfvf->mbox.lock);
32 
33 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
34 	if (!req) {
35 		mutex_unlock(&pfvf->mbox.lock);
36 		return -ENOMEM;
37 	}
38 
39 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
40 	req->contig = false;
41 	req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
42 
43 	/* Send message to AF */
44 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
45 		mutex_unlock(&pfvf->mbox.lock);
46 		return -EINVAL;
47 	}
48 
49 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
50 	       (&pfvf->mbox.mbox, 0, &req->hdr);
51 
52 	if (rsp->count != req->count) {
53 		netdev_info(pfvf->netdev,
54 			    "Unable to allocate %d MCAM entries, got %d\n",
55 			    req->count, rsp->count);
56 		/* support only ntuples here */
57 		flow_cfg->ntuple_max_flows = rsp->count;
58 		flow_cfg->ntuple_offset = 0;
59 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
60 		flow_cfg->tc_max_flows = flow_cfg->ntuple_max_flows;
61 		pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
62 	} else {
63 		flow_cfg->vf_vlan_offset = 0;
64 		flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
65 						vf_vlan_max_flows;
66 		flow_cfg->tc_flower_offset = flow_cfg->ntuple_offset;
67 		flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
68 						OTX2_MAX_NTUPLE_FLOWS;
69 		flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
70 						OTX2_MAX_UNICAST_FLOWS;
71 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
72 		pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
73 		pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
74 		pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
75 		pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
76 	}
77 
78 	for (i = 0; i < rsp->count; i++)
79 		flow_cfg->entry[i] = rsp->entry_list[i];
80 
81 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
82 
83 	mutex_unlock(&pfvf->mbox.lock);
84 
85 	return 0;
86 }
87 
88 int otx2_mcam_flow_init(struct otx2_nic *pf)
89 {
90 	int err;
91 
92 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
93 				    GFP_KERNEL);
94 	if (!pf->flow_cfg)
95 		return -ENOMEM;
96 
97 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
98 
99 	pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
100 	pf->flow_cfg->tc_max_flows = pf->flow_cfg->ntuple_max_flows;
101 
102 	err = otx2_alloc_mcam_entries(pf);
103 	if (err)
104 		return err;
105 
106 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
107 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
108 	if (!pf->mac_table)
109 		return -ENOMEM;
110 
111 	return 0;
112 }
113 
114 void otx2_mcam_flow_del(struct otx2_nic *pf)
115 {
116 	otx2_destroy_mcam_flows(pf);
117 }
118 
119 /*  On success adds mcam entry
120  *  On failure enable promisous mode
121  */
122 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
123 {
124 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
125 	struct npc_install_flow_req *req;
126 	int err, i;
127 
128 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
129 		return -ENOMEM;
130 
131 	/* dont have free mcam entries or uc list is greater than alloted */
132 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
133 		return -ENOMEM;
134 
135 	mutex_lock(&pf->mbox.lock);
136 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
137 	if (!req) {
138 		mutex_unlock(&pf->mbox.lock);
139 		return -ENOMEM;
140 	}
141 
142 	/* unicast offset starts with 32 0..31 for ntuple */
143 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
144 		if (pf->mac_table[i].inuse)
145 			continue;
146 		ether_addr_copy(pf->mac_table[i].addr, mac);
147 		pf->mac_table[i].inuse = true;
148 		pf->mac_table[i].mcam_entry =
149 			flow_cfg->entry[i + flow_cfg->unicast_offset];
150 		req->entry =  pf->mac_table[i].mcam_entry;
151 		break;
152 	}
153 
154 	ether_addr_copy(req->packet.dmac, mac);
155 	eth_broadcast_addr((u8 *)&req->mask.dmac);
156 	req->features = BIT_ULL(NPC_DMAC);
157 	req->channel = pf->hw.rx_chan_base;
158 	req->intf = NIX_INTF_RX;
159 	req->op = NIX_RX_ACTION_DEFAULT;
160 	req->set_cntr = 1;
161 
162 	err = otx2_sync_mbox_msg(&pf->mbox);
163 	mutex_unlock(&pf->mbox.lock);
164 
165 	return err;
166 }
167 
168 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
169 {
170 	struct otx2_nic *pf = netdev_priv(netdev);
171 
172 	return otx2_do_add_macfilter(pf, mac);
173 }
174 
175 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
176 				       int *mcam_entry)
177 {
178 	int i;
179 
180 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
181 		if (!pf->mac_table[i].inuse)
182 			continue;
183 
184 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
185 			*mcam_entry = pf->mac_table[i].mcam_entry;
186 			pf->mac_table[i].inuse = false;
187 			return true;
188 		}
189 	}
190 	return false;
191 }
192 
193 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
194 {
195 	struct otx2_nic *pf = netdev_priv(netdev);
196 	struct npc_delete_flow_req *req;
197 	int err, mcam_entry;
198 
199 	/* check does mcam entry exists for given mac */
200 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
201 		return 0;
202 
203 	mutex_lock(&pf->mbox.lock);
204 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
205 	if (!req) {
206 		mutex_unlock(&pf->mbox.lock);
207 		return -ENOMEM;
208 	}
209 	req->entry = mcam_entry;
210 	/* Send message to AF */
211 	err = otx2_sync_mbox_msg(&pf->mbox);
212 	mutex_unlock(&pf->mbox.lock);
213 
214 	return err;
215 }
216 
217 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
218 {
219 	struct otx2_flow *iter;
220 
221 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
222 		if (iter->location == location)
223 			return iter;
224 	}
225 
226 	return NULL;
227 }
228 
229 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
230 {
231 	struct list_head *head = &pfvf->flow_cfg->flow_list;
232 	struct otx2_flow *iter;
233 
234 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
235 		if (iter->location > flow->location)
236 			break;
237 		head = &iter->list;
238 	}
239 
240 	list_add(&flow->list, head);
241 }
242 
243 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
244 		  u32 location)
245 {
246 	struct otx2_flow *iter;
247 
248 	if (location >= pfvf->flow_cfg->ntuple_max_flows)
249 		return -EINVAL;
250 
251 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
252 		if (iter->location == location) {
253 			nfc->fs = iter->flow_spec;
254 			nfc->rss_context = iter->rss_ctx_id;
255 			return 0;
256 		}
257 	}
258 
259 	return -ENOENT;
260 }
261 
262 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
263 		       u32 *rule_locs)
264 {
265 	u32 rule_cnt = nfc->rule_cnt;
266 	u32 location = 0;
267 	int idx = 0;
268 	int err = 0;
269 
270 	nfc->data = pfvf->flow_cfg->ntuple_max_flows;
271 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
272 		err = otx2_get_flow(pfvf, nfc, location);
273 		if (!err)
274 			rule_locs[idx++] = location;
275 		location++;
276 	}
277 	nfc->rule_cnt = rule_cnt;
278 
279 	return err;
280 }
281 
282 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
283 				  struct npc_install_flow_req *req,
284 				  u32 flow_type)
285 {
286 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
287 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
288 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
289 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
290 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
291 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
292 	struct flow_msg *pmask = &req->mask;
293 	struct flow_msg *pkt = &req->packet;
294 
295 	switch (flow_type) {
296 	case IP_USER_FLOW:
297 		if (ipv4_usr_mask->ip4src) {
298 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
299 			       sizeof(pkt->ip4src));
300 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
301 			       sizeof(pmask->ip4src));
302 			req->features |= BIT_ULL(NPC_SIP_IPV4);
303 		}
304 		if (ipv4_usr_mask->ip4dst) {
305 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
306 			       sizeof(pkt->ip4dst));
307 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
308 			       sizeof(pmask->ip4dst));
309 			req->features |= BIT_ULL(NPC_DIP_IPV4);
310 		}
311 		if (ipv4_usr_mask->tos) {
312 			pkt->tos = ipv4_usr_hdr->tos;
313 			pmask->tos = ipv4_usr_mask->tos;
314 			req->features |= BIT_ULL(NPC_TOS);
315 		}
316 		if (ipv4_usr_mask->proto) {
317 			switch (ipv4_usr_hdr->proto) {
318 			case IPPROTO_ICMP:
319 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
320 				break;
321 			case IPPROTO_TCP:
322 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
323 				break;
324 			case IPPROTO_UDP:
325 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
326 				break;
327 			case IPPROTO_SCTP:
328 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
329 				break;
330 			case IPPROTO_AH:
331 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
332 				break;
333 			case IPPROTO_ESP:
334 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
335 				break;
336 			default:
337 				return -EOPNOTSUPP;
338 			}
339 		}
340 		pkt->etype = cpu_to_be16(ETH_P_IP);
341 		pmask->etype = cpu_to_be16(0xFFFF);
342 		req->features |= BIT_ULL(NPC_ETYPE);
343 		break;
344 	case TCP_V4_FLOW:
345 	case UDP_V4_FLOW:
346 	case SCTP_V4_FLOW:
347 		pkt->etype = cpu_to_be16(ETH_P_IP);
348 		pmask->etype = cpu_to_be16(0xFFFF);
349 		req->features |= BIT_ULL(NPC_ETYPE);
350 		if (ipv4_l4_mask->ip4src) {
351 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
352 			       sizeof(pkt->ip4src));
353 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
354 			       sizeof(pmask->ip4src));
355 			req->features |= BIT_ULL(NPC_SIP_IPV4);
356 		}
357 		if (ipv4_l4_mask->ip4dst) {
358 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
359 			       sizeof(pkt->ip4dst));
360 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
361 			       sizeof(pmask->ip4dst));
362 			req->features |= BIT_ULL(NPC_DIP_IPV4);
363 		}
364 		if (ipv4_l4_mask->tos) {
365 			pkt->tos = ipv4_l4_hdr->tos;
366 			pmask->tos = ipv4_l4_mask->tos;
367 			req->features |= BIT_ULL(NPC_TOS);
368 		}
369 		if (ipv4_l4_mask->psrc) {
370 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
371 			       sizeof(pkt->sport));
372 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
373 			       sizeof(pmask->sport));
374 			if (flow_type == UDP_V4_FLOW)
375 				req->features |= BIT_ULL(NPC_SPORT_UDP);
376 			else if (flow_type == TCP_V4_FLOW)
377 				req->features |= BIT_ULL(NPC_SPORT_TCP);
378 			else
379 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
380 		}
381 		if (ipv4_l4_mask->pdst) {
382 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
383 			       sizeof(pkt->dport));
384 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
385 			       sizeof(pmask->dport));
386 			if (flow_type == UDP_V4_FLOW)
387 				req->features |= BIT_ULL(NPC_DPORT_UDP);
388 			else if (flow_type == TCP_V4_FLOW)
389 				req->features |= BIT_ULL(NPC_DPORT_TCP);
390 			else
391 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
392 		}
393 		if (flow_type == UDP_V4_FLOW)
394 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
395 		else if (flow_type == TCP_V4_FLOW)
396 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
397 		else
398 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
399 		break;
400 	case AH_V4_FLOW:
401 	case ESP_V4_FLOW:
402 		pkt->etype = cpu_to_be16(ETH_P_IP);
403 		pmask->etype = cpu_to_be16(0xFFFF);
404 		req->features |= BIT_ULL(NPC_ETYPE);
405 		if (ah_esp_mask->ip4src) {
406 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
407 			       sizeof(pkt->ip4src));
408 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
409 			       sizeof(pmask->ip4src));
410 			req->features |= BIT_ULL(NPC_SIP_IPV4);
411 		}
412 		if (ah_esp_mask->ip4dst) {
413 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
414 			       sizeof(pkt->ip4dst));
415 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
416 			       sizeof(pmask->ip4dst));
417 			req->features |= BIT_ULL(NPC_DIP_IPV4);
418 		}
419 		if (ah_esp_mask->tos) {
420 			pkt->tos = ah_esp_hdr->tos;
421 			pmask->tos = ah_esp_mask->tos;
422 			req->features |= BIT_ULL(NPC_TOS);
423 		}
424 
425 		/* NPC profile doesn't extract AH/ESP header fields */
426 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
427 			return -EOPNOTSUPP;
428 
429 		if (flow_type == AH_V4_FLOW)
430 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
431 		else
432 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
433 		break;
434 	default:
435 		break;
436 	}
437 
438 	return 0;
439 }
440 
441 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
442 				  struct npc_install_flow_req *req,
443 				  u32 flow_type)
444 {
445 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
446 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
447 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
448 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
449 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
450 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
451 	struct flow_msg *pmask = &req->mask;
452 	struct flow_msg *pkt = &req->packet;
453 
454 	switch (flow_type) {
455 	case IPV6_USER_FLOW:
456 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
457 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
458 			       sizeof(pkt->ip6src));
459 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
460 			       sizeof(pmask->ip6src));
461 			req->features |= BIT_ULL(NPC_SIP_IPV6);
462 		}
463 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
464 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
465 			       sizeof(pkt->ip6dst));
466 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
467 			       sizeof(pmask->ip6dst));
468 			req->features |= BIT_ULL(NPC_DIP_IPV6);
469 		}
470 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
471 		pmask->etype = cpu_to_be16(0xFFFF);
472 		req->features |= BIT_ULL(NPC_ETYPE);
473 		break;
474 	case TCP_V6_FLOW:
475 	case UDP_V6_FLOW:
476 	case SCTP_V6_FLOW:
477 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
478 		pmask->etype = cpu_to_be16(0xFFFF);
479 		req->features |= BIT_ULL(NPC_ETYPE);
480 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
481 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
482 			       sizeof(pkt->ip6src));
483 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
484 			       sizeof(pmask->ip6src));
485 			req->features |= BIT_ULL(NPC_SIP_IPV6);
486 		}
487 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
488 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
489 			       sizeof(pkt->ip6dst));
490 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
491 			       sizeof(pmask->ip6dst));
492 			req->features |= BIT_ULL(NPC_DIP_IPV6);
493 		}
494 		if (ipv6_l4_mask->psrc) {
495 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
496 			       sizeof(pkt->sport));
497 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
498 			       sizeof(pmask->sport));
499 			if (flow_type == UDP_V6_FLOW)
500 				req->features |= BIT_ULL(NPC_SPORT_UDP);
501 			else if (flow_type == TCP_V6_FLOW)
502 				req->features |= BIT_ULL(NPC_SPORT_TCP);
503 			else
504 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
505 		}
506 		if (ipv6_l4_mask->pdst) {
507 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
508 			       sizeof(pkt->dport));
509 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
510 			       sizeof(pmask->dport));
511 			if (flow_type == UDP_V6_FLOW)
512 				req->features |= BIT_ULL(NPC_DPORT_UDP);
513 			else if (flow_type == TCP_V6_FLOW)
514 				req->features |= BIT_ULL(NPC_DPORT_TCP);
515 			else
516 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
517 		}
518 		if (flow_type == UDP_V6_FLOW)
519 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
520 		else if (flow_type == TCP_V6_FLOW)
521 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
522 		else
523 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
524 		break;
525 	case AH_V6_FLOW:
526 	case ESP_V6_FLOW:
527 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
528 		pmask->etype = cpu_to_be16(0xFFFF);
529 		req->features |= BIT_ULL(NPC_ETYPE);
530 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
531 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
532 			       sizeof(pkt->ip6src));
533 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
534 			       sizeof(pmask->ip6src));
535 			req->features |= BIT_ULL(NPC_SIP_IPV6);
536 		}
537 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
538 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
539 			       sizeof(pkt->ip6dst));
540 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
541 			       sizeof(pmask->ip6dst));
542 			req->features |= BIT_ULL(NPC_DIP_IPV6);
543 		}
544 
545 		/* NPC profile doesn't extract AH/ESP header fields */
546 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
547 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
548 			return -EOPNOTSUPP;
549 
550 		if (flow_type == AH_V6_FLOW)
551 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
552 		else
553 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
554 		break;
555 	default:
556 		break;
557 	}
558 
559 	return 0;
560 }
561 
562 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
563 			      struct npc_install_flow_req *req)
564 {
565 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
566 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
567 	struct flow_msg *pmask = &req->mask;
568 	struct flow_msg *pkt = &req->packet;
569 	u32 flow_type;
570 	int ret;
571 
572 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
573 	switch (flow_type) {
574 	/* bits not set in mask are don't care */
575 	case ETHER_FLOW:
576 		if (!is_zero_ether_addr(eth_mask->h_source)) {
577 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
578 			ether_addr_copy(pmask->smac, eth_mask->h_source);
579 			req->features |= BIT_ULL(NPC_SMAC);
580 		}
581 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
582 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
583 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
584 			req->features |= BIT_ULL(NPC_DMAC);
585 		}
586 		if (eth_mask->h_proto) {
587 			memcpy(&pkt->etype, &eth_hdr->h_proto,
588 			       sizeof(pkt->etype));
589 			memcpy(&pmask->etype, &eth_mask->h_proto,
590 			       sizeof(pmask->etype));
591 			req->features |= BIT_ULL(NPC_ETYPE);
592 		}
593 		break;
594 	case IP_USER_FLOW:
595 	case TCP_V4_FLOW:
596 	case UDP_V4_FLOW:
597 	case SCTP_V4_FLOW:
598 	case AH_V4_FLOW:
599 	case ESP_V4_FLOW:
600 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
601 		if (ret)
602 			return ret;
603 		break;
604 	case IPV6_USER_FLOW:
605 	case TCP_V6_FLOW:
606 	case UDP_V6_FLOW:
607 	case SCTP_V6_FLOW:
608 	case AH_V6_FLOW:
609 	case ESP_V6_FLOW:
610 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
611 		if (ret)
612 			return ret;
613 		break;
614 	default:
615 		return -EOPNOTSUPP;
616 	}
617 	if (fsp->flow_type & FLOW_EXT) {
618 		if (fsp->m_ext.vlan_etype)
619 			return -EINVAL;
620 		if (fsp->m_ext.vlan_tci) {
621 			if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
622 				return -EINVAL;
623 			if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
624 				return -EINVAL;
625 
626 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
627 			       sizeof(pkt->vlan_tci));
628 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
629 			       sizeof(pmask->vlan_tci));
630 			req->features |= BIT_ULL(NPC_OUTER_VID);
631 		}
632 
633 		/* Not Drop/Direct to queue but use action in default entry */
634 		if (fsp->m_ext.data[1] &&
635 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
636 			req->op = NIX_RX_ACTION_DEFAULT;
637 	}
638 
639 	if (fsp->flow_type & FLOW_MAC_EXT &&
640 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
641 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
642 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
643 		req->features |= BIT_ULL(NPC_DMAC);
644 	}
645 
646 	if (!req->features)
647 		return -EOPNOTSUPP;
648 
649 	return 0;
650 }
651 
652 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
653 {
654 	u64 ring_cookie = flow->flow_spec.ring_cookie;
655 	struct npc_install_flow_req *req;
656 	int err, vf = 0;
657 
658 	mutex_lock(&pfvf->mbox.lock);
659 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
660 	if (!req) {
661 		mutex_unlock(&pfvf->mbox.lock);
662 		return -ENOMEM;
663 	}
664 
665 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
666 	if (err) {
667 		/* free the allocated msg above */
668 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
669 		mutex_unlock(&pfvf->mbox.lock);
670 		return err;
671 	}
672 
673 	req->entry = flow->entry;
674 	req->intf = NIX_INTF_RX;
675 	req->set_cntr = 1;
676 	req->channel = pfvf->hw.rx_chan_base;
677 	if (ring_cookie == RX_CLS_FLOW_DISC) {
678 		req->op = NIX_RX_ACTIONOP_DROP;
679 	} else {
680 		/* change to unicast only if action of default entry is not
681 		 * requested by user
682 		 */
683 		if (flow->flow_spec.flow_type & FLOW_RSS) {
684 			req->op = NIX_RX_ACTIONOP_RSS;
685 			req->index = flow->rss_ctx_id;
686 		} else {
687 			req->op = NIX_RX_ACTIONOP_UCAST;
688 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
689 		}
690 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
691 		if (vf > pci_num_vf(pfvf->pdev)) {
692 			mutex_unlock(&pfvf->mbox.lock);
693 			return -EINVAL;
694 		}
695 	}
696 
697 	/* ethtool ring_cookie has (VF + 1) for VF */
698 	if (vf) {
699 		req->vf = vf;
700 		flow->is_vf = true;
701 		flow->vf = vf;
702 	}
703 
704 	/* Send message to AF */
705 	err = otx2_sync_mbox_msg(&pfvf->mbox);
706 	mutex_unlock(&pfvf->mbox.lock);
707 	return err;
708 }
709 
710 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
711 {
712 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
713 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
714 	struct otx2_flow *flow;
715 	bool new = false;
716 	u32 ring;
717 	int err;
718 
719 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
720 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
721 		return -ENOMEM;
722 
723 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
724 		return -EINVAL;
725 
726 	if (fsp->location >= flow_cfg->ntuple_max_flows)
727 		return -EINVAL;
728 
729 	flow = otx2_find_flow(pfvf, fsp->location);
730 	if (!flow) {
731 		flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
732 		if (!flow)
733 			return -ENOMEM;
734 		flow->location = fsp->location;
735 		flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
736 						flow->location];
737 		new = true;
738 	}
739 	/* struct copy */
740 	flow->flow_spec = *fsp;
741 
742 	if (fsp->flow_type & FLOW_RSS)
743 		flow->rss_ctx_id = nfc->rss_context;
744 
745 	err = otx2_add_flow_msg(pfvf, flow);
746 	if (err) {
747 		if (new)
748 			kfree(flow);
749 		return err;
750 	}
751 
752 	/* add the new flow installed to list */
753 	if (new) {
754 		otx2_add_flow_to_list(pfvf, flow);
755 		flow_cfg->nr_flows++;
756 	}
757 
758 	return 0;
759 }
760 
761 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
762 {
763 	struct npc_delete_flow_req *req;
764 	int err;
765 
766 	mutex_lock(&pfvf->mbox.lock);
767 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
768 	if (!req) {
769 		mutex_unlock(&pfvf->mbox.lock);
770 		return -ENOMEM;
771 	}
772 
773 	req->entry = entry;
774 	if (all)
775 		req->all = 1;
776 
777 	/* Send message to AF */
778 	err = otx2_sync_mbox_msg(&pfvf->mbox);
779 	mutex_unlock(&pfvf->mbox.lock);
780 	return err;
781 }
782 
783 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
784 {
785 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
786 	struct otx2_flow *flow;
787 	int err;
788 
789 	if (location >= flow_cfg->ntuple_max_flows)
790 		return -EINVAL;
791 
792 	flow = otx2_find_flow(pfvf, location);
793 	if (!flow)
794 		return -ENOENT;
795 
796 	err = otx2_remove_flow_msg(pfvf, flow->entry, false);
797 	if (err)
798 		return err;
799 
800 	list_del(&flow->list);
801 	kfree(flow);
802 	flow_cfg->nr_flows--;
803 
804 	return 0;
805 }
806 
807 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
808 {
809 	struct otx2_flow *flow, *tmp;
810 	int err;
811 
812 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
813 		if (flow->rss_ctx_id != ctx_id)
814 			continue;
815 		err = otx2_remove_flow(pfvf, flow->location);
816 		if (err)
817 			netdev_warn(pfvf->netdev,
818 				    "Can't delete the rule %d associated with this rss group err:%d",
819 				    flow->location, err);
820 	}
821 }
822 
823 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
824 {
825 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
826 	struct npc_delete_flow_req *req;
827 	struct otx2_flow *iter, *tmp;
828 	int err;
829 
830 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
831 		return 0;
832 
833 	mutex_lock(&pfvf->mbox.lock);
834 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
835 	if (!req) {
836 		mutex_unlock(&pfvf->mbox.lock);
837 		return -ENOMEM;
838 	}
839 
840 	req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
841 	req->end   = flow_cfg->entry[flow_cfg->ntuple_offset +
842 				      flow_cfg->ntuple_max_flows - 1];
843 	err = otx2_sync_mbox_msg(&pfvf->mbox);
844 	mutex_unlock(&pfvf->mbox.lock);
845 
846 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
847 		list_del(&iter->list);
848 		kfree(iter);
849 		flow_cfg->nr_flows--;
850 	}
851 	return err;
852 }
853 
854 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
855 {
856 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
857 	struct npc_mcam_free_entry_req *req;
858 	struct otx2_flow *iter, *tmp;
859 	int err;
860 
861 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
862 		return 0;
863 
864 	/* remove all flows */
865 	err = otx2_remove_flow_msg(pfvf, 0, true);
866 	if (err)
867 		return err;
868 
869 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
870 		list_del(&iter->list);
871 		kfree(iter);
872 		flow_cfg->nr_flows--;
873 	}
874 
875 	mutex_lock(&pfvf->mbox.lock);
876 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
877 	if (!req) {
878 		mutex_unlock(&pfvf->mbox.lock);
879 		return -ENOMEM;
880 	}
881 
882 	req->all = 1;
883 	/* Send message to AF to free MCAM entries */
884 	err = otx2_sync_mbox_msg(&pfvf->mbox);
885 	if (err) {
886 		mutex_unlock(&pfvf->mbox.lock);
887 		return err;
888 	}
889 
890 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
891 	mutex_unlock(&pfvf->mbox.lock);
892 
893 	return 0;
894 }
895 
896 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
897 {
898 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
899 	struct npc_install_flow_req *req;
900 	int err;
901 
902 	mutex_lock(&pfvf->mbox.lock);
903 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
904 	if (!req) {
905 		mutex_unlock(&pfvf->mbox.lock);
906 		return -ENOMEM;
907 	}
908 
909 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
910 	req->intf = NIX_INTF_RX;
911 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
912 	eth_broadcast_addr((u8 *)&req->mask.dmac);
913 	req->channel = pfvf->hw.rx_chan_base;
914 	req->op = NIX_RX_ACTION_DEFAULT;
915 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
916 	req->vtag0_valid = true;
917 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
918 
919 	/* Send message to AF */
920 	err = otx2_sync_mbox_msg(&pfvf->mbox);
921 	mutex_unlock(&pfvf->mbox.lock);
922 	return err;
923 }
924 
925 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
926 {
927 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
928 	struct npc_delete_flow_req *req;
929 	int err;
930 
931 	mutex_lock(&pfvf->mbox.lock);
932 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
933 	if (!req) {
934 		mutex_unlock(&pfvf->mbox.lock);
935 		return -ENOMEM;
936 	}
937 
938 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
939 	/* Send message to AF */
940 	err = otx2_sync_mbox_msg(&pfvf->mbox);
941 	mutex_unlock(&pfvf->mbox.lock);
942 	return err;
943 }
944 
945 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
946 {
947 	struct nix_vtag_config *req;
948 	struct mbox_msghdr *rsp_hdr;
949 	int err;
950 
951 	/* Dont have enough mcam entries */
952 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
953 		return -ENOMEM;
954 
955 	if (enable) {
956 		err = otx2_install_rxvlan_offload_flow(pf);
957 		if (err)
958 			return err;
959 	} else {
960 		err = otx2_delete_rxvlan_offload_flow(pf);
961 		if (err)
962 			return err;
963 	}
964 
965 	mutex_lock(&pf->mbox.lock);
966 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
967 	if (!req) {
968 		mutex_unlock(&pf->mbox.lock);
969 		return -ENOMEM;
970 	}
971 
972 	/* config strip, capture and size */
973 	req->vtag_size = VTAGSIZE_T4;
974 	req->cfg_type = 1; /* rx vlan cfg */
975 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
976 	req->rx.strip_vtag = enable;
977 	req->rx.capture_vtag = enable;
978 
979 	err = otx2_sync_mbox_msg(&pf->mbox);
980 	if (err) {
981 		mutex_unlock(&pf->mbox.lock);
982 		return err;
983 	}
984 
985 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
986 	if (IS_ERR(rsp_hdr)) {
987 		mutex_unlock(&pf->mbox.lock);
988 		return PTR_ERR(rsp_hdr);
989 	}
990 
991 	mutex_unlock(&pf->mbox.lock);
992 	return rsp_hdr->rc;
993 }
994