1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 
9 #include "otx2_common.h"
10 
11 #define OTX2_DEFAULT_ACTION	0x1
12 
13 struct otx2_flow {
14 	struct ethtool_rx_flow_spec flow_spec;
15 	struct list_head list;
16 	u32 location;
17 	u16 entry;
18 	bool is_vf;
19 	u8 rss_ctx_id;
20 	int vf;
21 };
22 
23 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
24 {
25 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
26 	struct npc_mcam_alloc_entry_req *req;
27 	struct npc_mcam_alloc_entry_rsp *rsp;
28 	int vf_vlan_max_flows;
29 	int i;
30 
31 	mutex_lock(&pfvf->mbox.lock);
32 
33 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
34 	if (!req) {
35 		mutex_unlock(&pfvf->mbox.lock);
36 		return -ENOMEM;
37 	}
38 
39 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
40 	req->contig = false;
41 	req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
42 
43 	/* Send message to AF */
44 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
45 		mutex_unlock(&pfvf->mbox.lock);
46 		return -EINVAL;
47 	}
48 
49 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
50 	       (&pfvf->mbox.mbox, 0, &req->hdr);
51 
52 	if (rsp->count != req->count) {
53 		netdev_info(pfvf->netdev,
54 			    "Unable to allocate %d MCAM entries, got %d\n",
55 			    req->count, rsp->count);
56 		/* support only ntuples here */
57 		flow_cfg->ntuple_max_flows = rsp->count;
58 		flow_cfg->ntuple_offset = 0;
59 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
60 		flow_cfg->tc_max_flows = flow_cfg->ntuple_max_flows;
61 		pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
62 	} else {
63 		flow_cfg->vf_vlan_offset = 0;
64 		flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
65 						vf_vlan_max_flows;
66 		flow_cfg->tc_flower_offset = flow_cfg->ntuple_offset;
67 		flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
68 						OTX2_MAX_NTUPLE_FLOWS;
69 		flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
70 						OTX2_MAX_UNICAST_FLOWS;
71 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
72 		pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
73 		pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
74 		pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
75 		pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
76 	}
77 
78 	for (i = 0; i < rsp->count; i++)
79 		flow_cfg->entry[i] = rsp->entry_list[i];
80 
81 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
82 
83 	mutex_unlock(&pfvf->mbox.lock);
84 
85 	return 0;
86 }
87 
88 int otx2_mcam_flow_init(struct otx2_nic *pf)
89 {
90 	int err;
91 
92 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
93 				    GFP_KERNEL);
94 	if (!pf->flow_cfg)
95 		return -ENOMEM;
96 
97 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
98 
99 	pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
100 	pf->flow_cfg->tc_max_flows = pf->flow_cfg->ntuple_max_flows;
101 
102 	err = otx2_alloc_mcam_entries(pf);
103 	if (err)
104 		return err;
105 
106 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
107 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
108 	if (!pf->mac_table)
109 		return -ENOMEM;
110 
111 	return 0;
112 }
113 
114 void otx2_mcam_flow_del(struct otx2_nic *pf)
115 {
116 	otx2_destroy_mcam_flows(pf);
117 }
118 
119 /*  On success adds mcam entry
120  *  On failure enable promisous mode
121  */
122 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
123 {
124 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
125 	struct npc_install_flow_req *req;
126 	int err, i;
127 
128 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
129 		return -ENOMEM;
130 
131 	/* dont have free mcam entries or uc list is greater than alloted */
132 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
133 		return -ENOMEM;
134 
135 	mutex_lock(&pf->mbox.lock);
136 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
137 	if (!req) {
138 		mutex_unlock(&pf->mbox.lock);
139 		return -ENOMEM;
140 	}
141 
142 	/* unicast offset starts with 32 0..31 for ntuple */
143 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
144 		if (pf->mac_table[i].inuse)
145 			continue;
146 		ether_addr_copy(pf->mac_table[i].addr, mac);
147 		pf->mac_table[i].inuse = true;
148 		pf->mac_table[i].mcam_entry =
149 			flow_cfg->entry[i + flow_cfg->unicast_offset];
150 		req->entry =  pf->mac_table[i].mcam_entry;
151 		break;
152 	}
153 
154 	ether_addr_copy(req->packet.dmac, mac);
155 	eth_broadcast_addr((u8 *)&req->mask.dmac);
156 	req->features = BIT_ULL(NPC_DMAC);
157 	req->channel = pf->hw.rx_chan_base;
158 	req->intf = NIX_INTF_RX;
159 	req->op = NIX_RX_ACTION_DEFAULT;
160 	req->set_cntr = 1;
161 
162 	err = otx2_sync_mbox_msg(&pf->mbox);
163 	mutex_unlock(&pf->mbox.lock);
164 
165 	return err;
166 }
167 
168 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
169 {
170 	struct otx2_nic *pf = netdev_priv(netdev);
171 
172 	return otx2_do_add_macfilter(pf, mac);
173 }
174 
175 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
176 				       int *mcam_entry)
177 {
178 	int i;
179 
180 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
181 		if (!pf->mac_table[i].inuse)
182 			continue;
183 
184 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
185 			*mcam_entry = pf->mac_table[i].mcam_entry;
186 			pf->mac_table[i].inuse = false;
187 			return true;
188 		}
189 	}
190 	return false;
191 }
192 
193 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
194 {
195 	struct otx2_nic *pf = netdev_priv(netdev);
196 	struct npc_delete_flow_req *req;
197 	int err, mcam_entry;
198 
199 	/* check does mcam entry exists for given mac */
200 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
201 		return 0;
202 
203 	mutex_lock(&pf->mbox.lock);
204 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
205 	if (!req) {
206 		mutex_unlock(&pf->mbox.lock);
207 		return -ENOMEM;
208 	}
209 	req->entry = mcam_entry;
210 	/* Send message to AF */
211 	err = otx2_sync_mbox_msg(&pf->mbox);
212 	mutex_unlock(&pf->mbox.lock);
213 
214 	return err;
215 }
216 
217 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
218 {
219 	struct otx2_flow *iter;
220 
221 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
222 		if (iter->location == location)
223 			return iter;
224 	}
225 
226 	return NULL;
227 }
228 
229 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
230 {
231 	struct list_head *head = &pfvf->flow_cfg->flow_list;
232 	struct otx2_flow *iter;
233 
234 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
235 		if (iter->location > flow->location)
236 			break;
237 		head = &iter->list;
238 	}
239 
240 	list_add(&flow->list, head);
241 }
242 
243 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
244 		  u32 location)
245 {
246 	struct otx2_flow *iter;
247 
248 	if (location >= pfvf->flow_cfg->ntuple_max_flows)
249 		return -EINVAL;
250 
251 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
252 		if (iter->location == location) {
253 			nfc->fs = iter->flow_spec;
254 			nfc->rss_context = iter->rss_ctx_id;
255 			return 0;
256 		}
257 	}
258 
259 	return -ENOENT;
260 }
261 
262 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
263 		       u32 *rule_locs)
264 {
265 	u32 rule_cnt = nfc->rule_cnt;
266 	u32 location = 0;
267 	int idx = 0;
268 	int err = 0;
269 
270 	nfc->data = pfvf->flow_cfg->ntuple_max_flows;
271 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
272 		err = otx2_get_flow(pfvf, nfc, location);
273 		if (!err)
274 			rule_locs[idx++] = location;
275 		location++;
276 	}
277 	nfc->rule_cnt = rule_cnt;
278 
279 	return err;
280 }
281 
282 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
283 				  struct npc_install_flow_req *req,
284 				  u32 flow_type)
285 {
286 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
287 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
288 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
289 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
290 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
291 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
292 	struct flow_msg *pmask = &req->mask;
293 	struct flow_msg *pkt = &req->packet;
294 
295 	switch (flow_type) {
296 	case IP_USER_FLOW:
297 		if (ipv4_usr_mask->ip4src) {
298 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
299 			       sizeof(pkt->ip4src));
300 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
301 			       sizeof(pmask->ip4src));
302 			req->features |= BIT_ULL(NPC_SIP_IPV4);
303 		}
304 		if (ipv4_usr_mask->ip4dst) {
305 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
306 			       sizeof(pkt->ip4dst));
307 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
308 			       sizeof(pmask->ip4dst));
309 			req->features |= BIT_ULL(NPC_DIP_IPV4);
310 		}
311 		if (ipv4_usr_mask->tos) {
312 			pkt->tos = ipv4_usr_hdr->tos;
313 			pmask->tos = ipv4_usr_mask->tos;
314 			req->features |= BIT_ULL(NPC_TOS);
315 		}
316 		if (ipv4_usr_mask->proto) {
317 			switch (ipv4_usr_hdr->proto) {
318 			case IPPROTO_ICMP:
319 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
320 				break;
321 			case IPPROTO_TCP:
322 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
323 				break;
324 			case IPPROTO_UDP:
325 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
326 				break;
327 			case IPPROTO_SCTP:
328 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
329 				break;
330 			case IPPROTO_AH:
331 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
332 				break;
333 			case IPPROTO_ESP:
334 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
335 				break;
336 			default:
337 				return -EOPNOTSUPP;
338 			}
339 		}
340 		pkt->etype = cpu_to_be16(ETH_P_IP);
341 		pmask->etype = cpu_to_be16(0xFFFF);
342 		req->features |= BIT_ULL(NPC_ETYPE);
343 		break;
344 	case TCP_V4_FLOW:
345 	case UDP_V4_FLOW:
346 	case SCTP_V4_FLOW:
347 		pkt->etype = cpu_to_be16(ETH_P_IP);
348 		pmask->etype = cpu_to_be16(0xFFFF);
349 		req->features |= BIT_ULL(NPC_ETYPE);
350 		if (ipv4_l4_mask->ip4src) {
351 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
352 			       sizeof(pkt->ip4src));
353 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
354 			       sizeof(pmask->ip4src));
355 			req->features |= BIT_ULL(NPC_SIP_IPV4);
356 		}
357 		if (ipv4_l4_mask->ip4dst) {
358 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
359 			       sizeof(pkt->ip4dst));
360 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
361 			       sizeof(pmask->ip4dst));
362 			req->features |= BIT_ULL(NPC_DIP_IPV4);
363 		}
364 		if (ipv4_l4_mask->tos) {
365 			pkt->tos = ipv4_l4_hdr->tos;
366 			pmask->tos = ipv4_l4_mask->tos;
367 			req->features |= BIT_ULL(NPC_TOS);
368 		}
369 		if (ipv4_l4_mask->psrc) {
370 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
371 			       sizeof(pkt->sport));
372 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
373 			       sizeof(pmask->sport));
374 			if (flow_type == UDP_V4_FLOW)
375 				req->features |= BIT_ULL(NPC_SPORT_UDP);
376 			else if (flow_type == TCP_V4_FLOW)
377 				req->features |= BIT_ULL(NPC_SPORT_TCP);
378 			else
379 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
380 		}
381 		if (ipv4_l4_mask->pdst) {
382 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
383 			       sizeof(pkt->dport));
384 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
385 			       sizeof(pmask->dport));
386 			if (flow_type == UDP_V4_FLOW)
387 				req->features |= BIT_ULL(NPC_DPORT_UDP);
388 			else if (flow_type == TCP_V4_FLOW)
389 				req->features |= BIT_ULL(NPC_DPORT_TCP);
390 			else
391 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
392 		}
393 		if (flow_type == UDP_V4_FLOW)
394 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
395 		else if (flow_type == TCP_V4_FLOW)
396 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
397 		else
398 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
399 		break;
400 	case AH_V4_FLOW:
401 	case ESP_V4_FLOW:
402 		pkt->etype = cpu_to_be16(ETH_P_IP);
403 		pmask->etype = cpu_to_be16(0xFFFF);
404 		req->features |= BIT_ULL(NPC_ETYPE);
405 		if (ah_esp_mask->ip4src) {
406 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
407 			       sizeof(pkt->ip4src));
408 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
409 			       sizeof(pmask->ip4src));
410 			req->features |= BIT_ULL(NPC_SIP_IPV4);
411 		}
412 		if (ah_esp_mask->ip4dst) {
413 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
414 			       sizeof(pkt->ip4dst));
415 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
416 			       sizeof(pmask->ip4dst));
417 			req->features |= BIT_ULL(NPC_DIP_IPV4);
418 		}
419 		if (ah_esp_mask->tos) {
420 			pkt->tos = ah_esp_hdr->tos;
421 			pmask->tos = ah_esp_mask->tos;
422 			req->features |= BIT_ULL(NPC_TOS);
423 		}
424 
425 		/* NPC profile doesn't extract AH/ESP header fields */
426 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
427 			return -EOPNOTSUPP;
428 
429 		if (flow_type == AH_V4_FLOW)
430 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
431 		else
432 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
433 		break;
434 	default:
435 		break;
436 	}
437 
438 	return 0;
439 }
440 
441 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
442 				  struct npc_install_flow_req *req,
443 				  u32 flow_type)
444 {
445 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
446 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
447 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
448 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
449 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
450 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
451 	struct flow_msg *pmask = &req->mask;
452 	struct flow_msg *pkt = &req->packet;
453 
454 	switch (flow_type) {
455 	case IPV6_USER_FLOW:
456 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
457 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
458 			       sizeof(pkt->ip6src));
459 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
460 			       sizeof(pmask->ip6src));
461 			req->features |= BIT_ULL(NPC_SIP_IPV6);
462 		}
463 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
464 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
465 			       sizeof(pkt->ip6dst));
466 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
467 			       sizeof(pmask->ip6dst));
468 			req->features |= BIT_ULL(NPC_DIP_IPV6);
469 		}
470 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
471 		pmask->etype = cpu_to_be16(0xFFFF);
472 		req->features |= BIT_ULL(NPC_ETYPE);
473 		break;
474 	case TCP_V6_FLOW:
475 	case UDP_V6_FLOW:
476 	case SCTP_V6_FLOW:
477 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
478 		pmask->etype = cpu_to_be16(0xFFFF);
479 		req->features |= BIT_ULL(NPC_ETYPE);
480 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
481 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
482 			       sizeof(pkt->ip6src));
483 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
484 			       sizeof(pmask->ip6src));
485 			req->features |= BIT_ULL(NPC_SIP_IPV6);
486 		}
487 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
488 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
489 			       sizeof(pkt->ip6dst));
490 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
491 			       sizeof(pmask->ip6dst));
492 			req->features |= BIT_ULL(NPC_DIP_IPV6);
493 		}
494 		if (ipv6_l4_mask->psrc) {
495 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
496 			       sizeof(pkt->sport));
497 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
498 			       sizeof(pmask->sport));
499 			if (flow_type == UDP_V6_FLOW)
500 				req->features |= BIT_ULL(NPC_SPORT_UDP);
501 			else if (flow_type == TCP_V6_FLOW)
502 				req->features |= BIT_ULL(NPC_SPORT_TCP);
503 			else
504 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
505 		}
506 		if (ipv6_l4_mask->pdst) {
507 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
508 			       sizeof(pkt->dport));
509 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
510 			       sizeof(pmask->dport));
511 			if (flow_type == UDP_V6_FLOW)
512 				req->features |= BIT_ULL(NPC_DPORT_UDP);
513 			else if (flow_type == TCP_V6_FLOW)
514 				req->features |= BIT_ULL(NPC_DPORT_TCP);
515 			else
516 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
517 		}
518 		if (flow_type == UDP_V6_FLOW)
519 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
520 		else if (flow_type == TCP_V6_FLOW)
521 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
522 		else
523 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
524 		break;
525 	case AH_V6_FLOW:
526 	case ESP_V6_FLOW:
527 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
528 		pmask->etype = cpu_to_be16(0xFFFF);
529 		req->features |= BIT_ULL(NPC_ETYPE);
530 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
531 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
532 			       sizeof(pkt->ip6src));
533 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
534 			       sizeof(pmask->ip6src));
535 			req->features |= BIT_ULL(NPC_SIP_IPV6);
536 		}
537 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
538 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
539 			       sizeof(pkt->ip6dst));
540 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
541 			       sizeof(pmask->ip6dst));
542 			req->features |= BIT_ULL(NPC_DIP_IPV6);
543 		}
544 
545 		/* NPC profile doesn't extract AH/ESP header fields */
546 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
547 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
548 			return -EOPNOTSUPP;
549 
550 		if (flow_type == AH_V6_FLOW)
551 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
552 		else
553 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
554 	default:
555 		break;
556 	}
557 
558 	return 0;
559 }
560 
561 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
562 			      struct npc_install_flow_req *req)
563 {
564 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
565 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
566 	struct flow_msg *pmask = &req->mask;
567 	struct flow_msg *pkt = &req->packet;
568 	u32 flow_type;
569 	int ret;
570 
571 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
572 	switch (flow_type) {
573 	/* bits not set in mask are don't care */
574 	case ETHER_FLOW:
575 		if (!is_zero_ether_addr(eth_mask->h_source)) {
576 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
577 			ether_addr_copy(pmask->smac, eth_mask->h_source);
578 			req->features |= BIT_ULL(NPC_SMAC);
579 		}
580 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
581 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
582 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
583 			req->features |= BIT_ULL(NPC_DMAC);
584 		}
585 		if (eth_mask->h_proto) {
586 			memcpy(&pkt->etype, &eth_hdr->h_proto,
587 			       sizeof(pkt->etype));
588 			memcpy(&pmask->etype, &eth_mask->h_proto,
589 			       sizeof(pmask->etype));
590 			req->features |= BIT_ULL(NPC_ETYPE);
591 		}
592 		break;
593 	case IP_USER_FLOW:
594 	case TCP_V4_FLOW:
595 	case UDP_V4_FLOW:
596 	case SCTP_V4_FLOW:
597 	case AH_V4_FLOW:
598 	case ESP_V4_FLOW:
599 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
600 		if (ret)
601 			return ret;
602 		break;
603 	case IPV6_USER_FLOW:
604 	case TCP_V6_FLOW:
605 	case UDP_V6_FLOW:
606 	case SCTP_V6_FLOW:
607 	case AH_V6_FLOW:
608 	case ESP_V6_FLOW:
609 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
610 		if (ret)
611 			return ret;
612 		break;
613 	default:
614 		return -EOPNOTSUPP;
615 	}
616 	if (fsp->flow_type & FLOW_EXT) {
617 		if (fsp->m_ext.vlan_etype)
618 			return -EINVAL;
619 		if (fsp->m_ext.vlan_tci) {
620 			if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
621 				return -EINVAL;
622 			if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
623 				return -EINVAL;
624 
625 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
626 			       sizeof(pkt->vlan_tci));
627 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
628 			       sizeof(pmask->vlan_tci));
629 			req->features |= BIT_ULL(NPC_OUTER_VID);
630 		}
631 
632 		/* Not Drop/Direct to queue but use action in default entry */
633 		if (fsp->m_ext.data[1] &&
634 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
635 			req->op = NIX_RX_ACTION_DEFAULT;
636 	}
637 
638 	if (fsp->flow_type & FLOW_MAC_EXT &&
639 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
640 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
641 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
642 		req->features |= BIT_ULL(NPC_DMAC);
643 	}
644 
645 	if (!req->features)
646 		return -EOPNOTSUPP;
647 
648 	return 0;
649 }
650 
651 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
652 {
653 	u64 ring_cookie = flow->flow_spec.ring_cookie;
654 	struct npc_install_flow_req *req;
655 	int err, vf = 0;
656 
657 	mutex_lock(&pfvf->mbox.lock);
658 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
659 	if (!req) {
660 		mutex_unlock(&pfvf->mbox.lock);
661 		return -ENOMEM;
662 	}
663 
664 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
665 	if (err) {
666 		/* free the allocated msg above */
667 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
668 		mutex_unlock(&pfvf->mbox.lock);
669 		return err;
670 	}
671 
672 	req->entry = flow->entry;
673 	req->intf = NIX_INTF_RX;
674 	req->set_cntr = 1;
675 	req->channel = pfvf->hw.rx_chan_base;
676 	if (ring_cookie == RX_CLS_FLOW_DISC) {
677 		req->op = NIX_RX_ACTIONOP_DROP;
678 	} else {
679 		/* change to unicast only if action of default entry is not
680 		 * requested by user
681 		 */
682 		if (flow->flow_spec.flow_type & FLOW_RSS) {
683 			req->op = NIX_RX_ACTIONOP_RSS;
684 			req->index = flow->rss_ctx_id;
685 		} else {
686 			req->op = NIX_RX_ACTIONOP_UCAST;
687 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
688 		}
689 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
690 		if (vf > pci_num_vf(pfvf->pdev)) {
691 			mutex_unlock(&pfvf->mbox.lock);
692 			return -EINVAL;
693 		}
694 	}
695 
696 	/* ethtool ring_cookie has (VF + 1) for VF */
697 	if (vf) {
698 		req->vf = vf;
699 		flow->is_vf = true;
700 		flow->vf = vf;
701 	}
702 
703 	/* Send message to AF */
704 	err = otx2_sync_mbox_msg(&pfvf->mbox);
705 	mutex_unlock(&pfvf->mbox.lock);
706 	return err;
707 }
708 
709 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
710 {
711 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
712 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
713 	struct otx2_flow *flow;
714 	bool new = false;
715 	u32 ring;
716 	int err;
717 
718 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
719 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
720 		return -ENOMEM;
721 
722 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
723 		return -EINVAL;
724 
725 	if (fsp->location >= flow_cfg->ntuple_max_flows)
726 		return -EINVAL;
727 
728 	flow = otx2_find_flow(pfvf, fsp->location);
729 	if (!flow) {
730 		flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
731 		if (!flow)
732 			return -ENOMEM;
733 		flow->location = fsp->location;
734 		flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
735 						flow->location];
736 		new = true;
737 	}
738 	/* struct copy */
739 	flow->flow_spec = *fsp;
740 
741 	if (fsp->flow_type & FLOW_RSS)
742 		flow->rss_ctx_id = nfc->rss_context;
743 
744 	err = otx2_add_flow_msg(pfvf, flow);
745 	if (err) {
746 		if (new)
747 			kfree(flow);
748 		return err;
749 	}
750 
751 	/* add the new flow installed to list */
752 	if (new) {
753 		otx2_add_flow_to_list(pfvf, flow);
754 		flow_cfg->nr_flows++;
755 	}
756 
757 	return 0;
758 }
759 
760 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
761 {
762 	struct npc_delete_flow_req *req;
763 	int err;
764 
765 	mutex_lock(&pfvf->mbox.lock);
766 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
767 	if (!req) {
768 		mutex_unlock(&pfvf->mbox.lock);
769 		return -ENOMEM;
770 	}
771 
772 	req->entry = entry;
773 	if (all)
774 		req->all = 1;
775 
776 	/* Send message to AF */
777 	err = otx2_sync_mbox_msg(&pfvf->mbox);
778 	mutex_unlock(&pfvf->mbox.lock);
779 	return err;
780 }
781 
782 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
783 {
784 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
785 	struct otx2_flow *flow;
786 	int err;
787 
788 	if (location >= flow_cfg->ntuple_max_flows)
789 		return -EINVAL;
790 
791 	flow = otx2_find_flow(pfvf, location);
792 	if (!flow)
793 		return -ENOENT;
794 
795 	err = otx2_remove_flow_msg(pfvf, flow->entry, false);
796 	if (err)
797 		return err;
798 
799 	list_del(&flow->list);
800 	kfree(flow);
801 	flow_cfg->nr_flows--;
802 
803 	return 0;
804 }
805 
806 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
807 {
808 	struct otx2_flow *flow, *tmp;
809 	int err;
810 
811 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
812 		if (flow->rss_ctx_id != ctx_id)
813 			continue;
814 		err = otx2_remove_flow(pfvf, flow->location);
815 		if (err)
816 			netdev_warn(pfvf->netdev,
817 				    "Can't delete the rule %d associated with this rss group err:%d",
818 				    flow->location, err);
819 	}
820 }
821 
822 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
823 {
824 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
825 	struct npc_delete_flow_req *req;
826 	struct otx2_flow *iter, *tmp;
827 	int err;
828 
829 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
830 		return 0;
831 
832 	mutex_lock(&pfvf->mbox.lock);
833 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
834 	if (!req) {
835 		mutex_unlock(&pfvf->mbox.lock);
836 		return -ENOMEM;
837 	}
838 
839 	req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
840 	req->end   = flow_cfg->entry[flow_cfg->ntuple_offset +
841 				      flow_cfg->ntuple_max_flows - 1];
842 	err = otx2_sync_mbox_msg(&pfvf->mbox);
843 	mutex_unlock(&pfvf->mbox.lock);
844 
845 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
846 		list_del(&iter->list);
847 		kfree(iter);
848 		flow_cfg->nr_flows--;
849 	}
850 	return err;
851 }
852 
853 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
854 {
855 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
856 	struct npc_mcam_free_entry_req *req;
857 	struct otx2_flow *iter, *tmp;
858 	int err;
859 
860 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
861 		return 0;
862 
863 	/* remove all flows */
864 	err = otx2_remove_flow_msg(pfvf, 0, true);
865 	if (err)
866 		return err;
867 
868 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
869 		list_del(&iter->list);
870 		kfree(iter);
871 		flow_cfg->nr_flows--;
872 	}
873 
874 	mutex_lock(&pfvf->mbox.lock);
875 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
876 	if (!req) {
877 		mutex_unlock(&pfvf->mbox.lock);
878 		return -ENOMEM;
879 	}
880 
881 	req->all = 1;
882 	/* Send message to AF to free MCAM entries */
883 	err = otx2_sync_mbox_msg(&pfvf->mbox);
884 	if (err) {
885 		mutex_unlock(&pfvf->mbox.lock);
886 		return err;
887 	}
888 
889 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
890 	mutex_unlock(&pfvf->mbox.lock);
891 
892 	return 0;
893 }
894 
895 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
896 {
897 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
898 	struct npc_install_flow_req *req;
899 	int err;
900 
901 	mutex_lock(&pfvf->mbox.lock);
902 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
903 	if (!req) {
904 		mutex_unlock(&pfvf->mbox.lock);
905 		return -ENOMEM;
906 	}
907 
908 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
909 	req->intf = NIX_INTF_RX;
910 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
911 	eth_broadcast_addr((u8 *)&req->mask.dmac);
912 	req->channel = pfvf->hw.rx_chan_base;
913 	req->op = NIX_RX_ACTION_DEFAULT;
914 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
915 	req->vtag0_valid = true;
916 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
917 
918 	/* Send message to AF */
919 	err = otx2_sync_mbox_msg(&pfvf->mbox);
920 	mutex_unlock(&pfvf->mbox.lock);
921 	return err;
922 }
923 
924 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
925 {
926 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
927 	struct npc_delete_flow_req *req;
928 	int err;
929 
930 	mutex_lock(&pfvf->mbox.lock);
931 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
932 	if (!req) {
933 		mutex_unlock(&pfvf->mbox.lock);
934 		return -ENOMEM;
935 	}
936 
937 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
938 	/* Send message to AF */
939 	err = otx2_sync_mbox_msg(&pfvf->mbox);
940 	mutex_unlock(&pfvf->mbox.lock);
941 	return err;
942 }
943 
944 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
945 {
946 	struct nix_vtag_config *req;
947 	struct mbox_msghdr *rsp_hdr;
948 	int err;
949 
950 	/* Dont have enough mcam entries */
951 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
952 		return -ENOMEM;
953 
954 	if (enable) {
955 		err = otx2_install_rxvlan_offload_flow(pf);
956 		if (err)
957 			return err;
958 	} else {
959 		err = otx2_delete_rxvlan_offload_flow(pf);
960 		if (err)
961 			return err;
962 	}
963 
964 	mutex_lock(&pf->mbox.lock);
965 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
966 	if (!req) {
967 		mutex_unlock(&pf->mbox.lock);
968 		return -ENOMEM;
969 	}
970 
971 	/* config strip, capture and size */
972 	req->vtag_size = VTAGSIZE_T4;
973 	req->cfg_type = 1; /* rx vlan cfg */
974 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
975 	req->rx.strip_vtag = enable;
976 	req->rx.capture_vtag = enable;
977 
978 	err = otx2_sync_mbox_msg(&pf->mbox);
979 	if (err) {
980 		mutex_unlock(&pf->mbox.lock);
981 		return err;
982 	}
983 
984 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
985 	if (IS_ERR(rsp_hdr)) {
986 		mutex_unlock(&pf->mbox.lock);
987 		return PTR_ERR(rsp_hdr);
988 	}
989 
990 	mutex_unlock(&pf->mbox.lock);
991 	return rsp_hdr->rc;
992 }
993