1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 
9 #include "otx2_common.h"
10 
11 #define OTX2_DEFAULT_ACTION	0x1
12 
13 struct otx2_flow {
14 	struct ethtool_rx_flow_spec flow_spec;
15 	struct list_head list;
16 	u32 location;
17 	u16 entry;
18 	bool is_vf;
19 	u8 rss_ctx_id;
20 	int vf;
21 };
22 
23 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
24 {
25 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
26 	struct npc_mcam_alloc_entry_req *req;
27 	struct npc_mcam_alloc_entry_rsp *rsp;
28 	int vf_vlan_max_flows;
29 	int i;
30 
31 	mutex_lock(&pfvf->mbox.lock);
32 
33 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
34 	if (!req) {
35 		mutex_unlock(&pfvf->mbox.lock);
36 		return -ENOMEM;
37 	}
38 
39 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
40 	req->contig = false;
41 	req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
42 
43 	/* Send message to AF */
44 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
45 		mutex_unlock(&pfvf->mbox.lock);
46 		return -EINVAL;
47 	}
48 
49 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
50 	       (&pfvf->mbox.mbox, 0, &req->hdr);
51 
52 	if (rsp->count != req->count) {
53 		netdev_info(pfvf->netdev,
54 			    "Unable to allocate %d MCAM entries, got %d\n",
55 			    req->count, rsp->count);
56 		/* support only ntuples here */
57 		flow_cfg->ntuple_max_flows = rsp->count;
58 		flow_cfg->ntuple_offset = 0;
59 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
60 		flow_cfg->tc_max_flows = flow_cfg->ntuple_max_flows;
61 		pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
62 	} else {
63 		flow_cfg->vf_vlan_offset = 0;
64 		flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
65 						vf_vlan_max_flows;
66 		flow_cfg->tc_flower_offset = flow_cfg->ntuple_offset;
67 		flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
68 						OTX2_MAX_NTUPLE_FLOWS;
69 		flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
70 						OTX2_MAX_UNICAST_FLOWS;
71 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
72 		pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
73 		pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
74 		pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
75 		pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
76 	}
77 
78 	for (i = 0; i < rsp->count; i++)
79 		flow_cfg->entry[i] = rsp->entry_list[i];
80 
81 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
82 
83 	mutex_unlock(&pfvf->mbox.lock);
84 
85 	return 0;
86 }
87 
88 int otx2_mcam_flow_init(struct otx2_nic *pf)
89 {
90 	int err;
91 
92 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
93 				    GFP_KERNEL);
94 	if (!pf->flow_cfg)
95 		return -ENOMEM;
96 
97 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
98 
99 	pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
100 	pf->flow_cfg->tc_max_flows = pf->flow_cfg->ntuple_max_flows;
101 
102 	err = otx2_alloc_mcam_entries(pf);
103 	if (err)
104 		return err;
105 
106 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
107 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
108 	if (!pf->mac_table)
109 		return -ENOMEM;
110 
111 	return 0;
112 }
113 
114 void otx2_mcam_flow_del(struct otx2_nic *pf)
115 {
116 	otx2_destroy_mcam_flows(pf);
117 }
118 
119 /*  On success adds mcam entry
120  *  On failure enable promisous mode
121  */
122 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
123 {
124 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
125 	struct npc_install_flow_req *req;
126 	int err, i;
127 
128 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
129 		return -ENOMEM;
130 
131 	/* dont have free mcam entries or uc list is greater than alloted */
132 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
133 		return -ENOMEM;
134 
135 	mutex_lock(&pf->mbox.lock);
136 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
137 	if (!req) {
138 		mutex_unlock(&pf->mbox.lock);
139 		return -ENOMEM;
140 	}
141 
142 	/* unicast offset starts with 32 0..31 for ntuple */
143 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
144 		if (pf->mac_table[i].inuse)
145 			continue;
146 		ether_addr_copy(pf->mac_table[i].addr, mac);
147 		pf->mac_table[i].inuse = true;
148 		pf->mac_table[i].mcam_entry =
149 			flow_cfg->entry[i + flow_cfg->unicast_offset];
150 		req->entry =  pf->mac_table[i].mcam_entry;
151 		break;
152 	}
153 
154 	ether_addr_copy(req->packet.dmac, mac);
155 	eth_broadcast_addr((u8 *)&req->mask.dmac);
156 	req->features = BIT_ULL(NPC_DMAC);
157 	req->channel = pf->hw.rx_chan_base;
158 	req->intf = NIX_INTF_RX;
159 	req->op = NIX_RX_ACTION_DEFAULT;
160 	req->set_cntr = 1;
161 
162 	err = otx2_sync_mbox_msg(&pf->mbox);
163 	mutex_unlock(&pf->mbox.lock);
164 
165 	return err;
166 }
167 
168 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
169 {
170 	struct otx2_nic *pf = netdev_priv(netdev);
171 
172 	return otx2_do_add_macfilter(pf, mac);
173 }
174 
175 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
176 				       int *mcam_entry)
177 {
178 	int i;
179 
180 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
181 		if (!pf->mac_table[i].inuse)
182 			continue;
183 
184 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
185 			*mcam_entry = pf->mac_table[i].mcam_entry;
186 			pf->mac_table[i].inuse = false;
187 			return true;
188 		}
189 	}
190 	return false;
191 }
192 
193 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
194 {
195 	struct otx2_nic *pf = netdev_priv(netdev);
196 	struct npc_delete_flow_req *req;
197 	int err, mcam_entry;
198 
199 	/* check does mcam entry exists for given mac */
200 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
201 		return 0;
202 
203 	mutex_lock(&pf->mbox.lock);
204 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
205 	if (!req) {
206 		mutex_unlock(&pf->mbox.lock);
207 		return -ENOMEM;
208 	}
209 	req->entry = mcam_entry;
210 	/* Send message to AF */
211 	err = otx2_sync_mbox_msg(&pf->mbox);
212 	mutex_unlock(&pf->mbox.lock);
213 
214 	return err;
215 }
216 
217 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
218 {
219 	struct otx2_flow *iter;
220 
221 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
222 		if (iter->location == location)
223 			return iter;
224 	}
225 
226 	return NULL;
227 }
228 
229 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
230 {
231 	struct list_head *head = &pfvf->flow_cfg->flow_list;
232 	struct otx2_flow *iter;
233 
234 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
235 		if (iter->location > flow->location)
236 			break;
237 		head = &iter->list;
238 	}
239 
240 	list_add(&flow->list, head);
241 }
242 
243 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
244 		  u32 location)
245 {
246 	struct otx2_flow *iter;
247 
248 	if (location >= pfvf->flow_cfg->ntuple_max_flows)
249 		return -EINVAL;
250 
251 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
252 		if (iter->location == location) {
253 			nfc->fs = iter->flow_spec;
254 			nfc->rss_context = iter->rss_ctx_id;
255 			return 0;
256 		}
257 	}
258 
259 	return -ENOENT;
260 }
261 
262 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
263 		       u32 *rule_locs)
264 {
265 	u32 location = 0;
266 	int idx = 0;
267 	int err = 0;
268 
269 	nfc->data = pfvf->flow_cfg->ntuple_max_flows;
270 	while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) {
271 		err = otx2_get_flow(pfvf, nfc, location);
272 		if (!err)
273 			rule_locs[idx++] = location;
274 		location++;
275 	}
276 
277 	return err;
278 }
279 
280 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
281 				  struct npc_install_flow_req *req,
282 				  u32 flow_type)
283 {
284 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
285 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
286 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
287 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
288 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
289 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
290 	struct flow_msg *pmask = &req->mask;
291 	struct flow_msg *pkt = &req->packet;
292 
293 	switch (flow_type) {
294 	case IP_USER_FLOW:
295 		if (ipv4_usr_mask->ip4src) {
296 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
297 			       sizeof(pkt->ip4src));
298 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
299 			       sizeof(pmask->ip4src));
300 			req->features |= BIT_ULL(NPC_SIP_IPV4);
301 		}
302 		if (ipv4_usr_mask->ip4dst) {
303 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
304 			       sizeof(pkt->ip4dst));
305 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
306 			       sizeof(pmask->ip4dst));
307 			req->features |= BIT_ULL(NPC_DIP_IPV4);
308 		}
309 		if (ipv4_usr_mask->tos) {
310 			pkt->tos = ipv4_usr_hdr->tos;
311 			pmask->tos = ipv4_usr_mask->tos;
312 			req->features |= BIT_ULL(NPC_TOS);
313 		}
314 		if (ipv4_usr_mask->proto) {
315 			switch (ipv4_usr_hdr->proto) {
316 			case IPPROTO_ICMP:
317 				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
318 				break;
319 			case IPPROTO_TCP:
320 				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
321 				break;
322 			case IPPROTO_UDP:
323 				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
324 				break;
325 			case IPPROTO_SCTP:
326 				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
327 				break;
328 			case IPPROTO_AH:
329 				req->features |= BIT_ULL(NPC_IPPROTO_AH);
330 				break;
331 			case IPPROTO_ESP:
332 				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
333 				break;
334 			default:
335 				return -EOPNOTSUPP;
336 			}
337 		}
338 		pkt->etype = cpu_to_be16(ETH_P_IP);
339 		pmask->etype = cpu_to_be16(0xFFFF);
340 		req->features |= BIT_ULL(NPC_ETYPE);
341 		break;
342 	case TCP_V4_FLOW:
343 	case UDP_V4_FLOW:
344 	case SCTP_V4_FLOW:
345 		pkt->etype = cpu_to_be16(ETH_P_IP);
346 		pmask->etype = cpu_to_be16(0xFFFF);
347 		req->features |= BIT_ULL(NPC_ETYPE);
348 		if (ipv4_l4_mask->ip4src) {
349 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
350 			       sizeof(pkt->ip4src));
351 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
352 			       sizeof(pmask->ip4src));
353 			req->features |= BIT_ULL(NPC_SIP_IPV4);
354 		}
355 		if (ipv4_l4_mask->ip4dst) {
356 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
357 			       sizeof(pkt->ip4dst));
358 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
359 			       sizeof(pmask->ip4dst));
360 			req->features |= BIT_ULL(NPC_DIP_IPV4);
361 		}
362 		if (ipv4_l4_mask->tos) {
363 			pkt->tos = ipv4_l4_hdr->tos;
364 			pmask->tos = ipv4_l4_mask->tos;
365 			req->features |= BIT_ULL(NPC_TOS);
366 		}
367 		if (ipv4_l4_mask->psrc) {
368 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
369 			       sizeof(pkt->sport));
370 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
371 			       sizeof(pmask->sport));
372 			if (flow_type == UDP_V4_FLOW)
373 				req->features |= BIT_ULL(NPC_SPORT_UDP);
374 			else if (flow_type == TCP_V4_FLOW)
375 				req->features |= BIT_ULL(NPC_SPORT_TCP);
376 			else
377 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
378 		}
379 		if (ipv4_l4_mask->pdst) {
380 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
381 			       sizeof(pkt->dport));
382 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
383 			       sizeof(pmask->dport));
384 			if (flow_type == UDP_V4_FLOW)
385 				req->features |= BIT_ULL(NPC_DPORT_UDP);
386 			else if (flow_type == TCP_V4_FLOW)
387 				req->features |= BIT_ULL(NPC_DPORT_TCP);
388 			else
389 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
390 		}
391 		if (flow_type == UDP_V4_FLOW)
392 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
393 		else if (flow_type == TCP_V4_FLOW)
394 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
395 		else
396 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
397 		break;
398 	case AH_V4_FLOW:
399 	case ESP_V4_FLOW:
400 		pkt->etype = cpu_to_be16(ETH_P_IP);
401 		pmask->etype = cpu_to_be16(0xFFFF);
402 		req->features |= BIT_ULL(NPC_ETYPE);
403 		if (ah_esp_mask->ip4src) {
404 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
405 			       sizeof(pkt->ip4src));
406 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
407 			       sizeof(pmask->ip4src));
408 			req->features |= BIT_ULL(NPC_SIP_IPV4);
409 		}
410 		if (ah_esp_mask->ip4dst) {
411 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
412 			       sizeof(pkt->ip4dst));
413 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
414 			       sizeof(pmask->ip4dst));
415 			req->features |= BIT_ULL(NPC_DIP_IPV4);
416 		}
417 		if (ah_esp_mask->tos) {
418 			pkt->tos = ah_esp_hdr->tos;
419 			pmask->tos = ah_esp_mask->tos;
420 			req->features |= BIT_ULL(NPC_TOS);
421 		}
422 
423 		/* NPC profile doesn't extract AH/ESP header fields */
424 		if (ah_esp_mask->spi & ah_esp_hdr->spi)
425 			return -EOPNOTSUPP;
426 
427 		if (flow_type == AH_V4_FLOW)
428 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
429 		else
430 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
431 		break;
432 	default:
433 		break;
434 	}
435 
436 	return 0;
437 }
438 
439 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
440 				  struct npc_install_flow_req *req,
441 				  u32 flow_type)
442 {
443 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
444 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
445 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
446 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
447 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
448 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
449 	struct flow_msg *pmask = &req->mask;
450 	struct flow_msg *pkt = &req->packet;
451 
452 	switch (flow_type) {
453 	case IPV6_USER_FLOW:
454 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
455 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
456 			       sizeof(pkt->ip6src));
457 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
458 			       sizeof(pmask->ip6src));
459 			req->features |= BIT_ULL(NPC_SIP_IPV6);
460 		}
461 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
462 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
463 			       sizeof(pkt->ip6dst));
464 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
465 			       sizeof(pmask->ip6dst));
466 			req->features |= BIT_ULL(NPC_DIP_IPV6);
467 		}
468 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
469 		pmask->etype = cpu_to_be16(0xFFFF);
470 		req->features |= BIT_ULL(NPC_ETYPE);
471 		break;
472 	case TCP_V6_FLOW:
473 	case UDP_V6_FLOW:
474 	case SCTP_V6_FLOW:
475 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
476 		pmask->etype = cpu_to_be16(0xFFFF);
477 		req->features |= BIT_ULL(NPC_ETYPE);
478 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
479 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
480 			       sizeof(pkt->ip6src));
481 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
482 			       sizeof(pmask->ip6src));
483 			req->features |= BIT_ULL(NPC_SIP_IPV6);
484 		}
485 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
486 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
487 			       sizeof(pkt->ip6dst));
488 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
489 			       sizeof(pmask->ip6dst));
490 			req->features |= BIT_ULL(NPC_DIP_IPV6);
491 		}
492 		if (ipv6_l4_mask->psrc) {
493 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
494 			       sizeof(pkt->sport));
495 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
496 			       sizeof(pmask->sport));
497 			if (flow_type == UDP_V6_FLOW)
498 				req->features |= BIT_ULL(NPC_SPORT_UDP);
499 			else if (flow_type == TCP_V6_FLOW)
500 				req->features |= BIT_ULL(NPC_SPORT_TCP);
501 			else
502 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
503 		}
504 		if (ipv6_l4_mask->pdst) {
505 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
506 			       sizeof(pkt->dport));
507 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
508 			       sizeof(pmask->dport));
509 			if (flow_type == UDP_V6_FLOW)
510 				req->features |= BIT_ULL(NPC_DPORT_UDP);
511 			else if (flow_type == TCP_V6_FLOW)
512 				req->features |= BIT_ULL(NPC_DPORT_TCP);
513 			else
514 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
515 		}
516 		if (flow_type == UDP_V6_FLOW)
517 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
518 		else if (flow_type == TCP_V6_FLOW)
519 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
520 		else
521 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
522 		break;
523 	case AH_V6_FLOW:
524 	case ESP_V6_FLOW:
525 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
526 		pmask->etype = cpu_to_be16(0xFFFF);
527 		req->features |= BIT_ULL(NPC_ETYPE);
528 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
529 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
530 			       sizeof(pkt->ip6src));
531 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
532 			       sizeof(pmask->ip6src));
533 			req->features |= BIT_ULL(NPC_SIP_IPV6);
534 		}
535 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
536 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
537 			       sizeof(pkt->ip6dst));
538 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
539 			       sizeof(pmask->ip6dst));
540 			req->features |= BIT_ULL(NPC_DIP_IPV6);
541 		}
542 
543 		/* NPC profile doesn't extract AH/ESP header fields */
544 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
545 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
546 			return -EOPNOTSUPP;
547 
548 		if (flow_type == AH_V6_FLOW)
549 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
550 		else
551 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
552 	default:
553 		break;
554 	}
555 
556 	return 0;
557 }
558 
559 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
560 			      struct npc_install_flow_req *req)
561 {
562 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
563 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
564 	struct flow_msg *pmask = &req->mask;
565 	struct flow_msg *pkt = &req->packet;
566 	u32 flow_type;
567 	int ret;
568 
569 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
570 	switch (flow_type) {
571 	/* bits not set in mask are don't care */
572 	case ETHER_FLOW:
573 		if (!is_zero_ether_addr(eth_mask->h_source)) {
574 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
575 			ether_addr_copy(pmask->smac, eth_mask->h_source);
576 			req->features |= BIT_ULL(NPC_SMAC);
577 		}
578 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
579 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
580 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
581 			req->features |= BIT_ULL(NPC_DMAC);
582 		}
583 		if (eth_mask->h_proto) {
584 			memcpy(&pkt->etype, &eth_hdr->h_proto,
585 			       sizeof(pkt->etype));
586 			memcpy(&pmask->etype, &eth_mask->h_proto,
587 			       sizeof(pmask->etype));
588 			req->features |= BIT_ULL(NPC_ETYPE);
589 		}
590 		break;
591 	case IP_USER_FLOW:
592 	case TCP_V4_FLOW:
593 	case UDP_V4_FLOW:
594 	case SCTP_V4_FLOW:
595 	case AH_V4_FLOW:
596 	case ESP_V4_FLOW:
597 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
598 		if (ret)
599 			return ret;
600 		break;
601 	case IPV6_USER_FLOW:
602 	case TCP_V6_FLOW:
603 	case UDP_V6_FLOW:
604 	case SCTP_V6_FLOW:
605 	case AH_V6_FLOW:
606 	case ESP_V6_FLOW:
607 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
608 		if (ret)
609 			return ret;
610 		break;
611 	default:
612 		return -EOPNOTSUPP;
613 	}
614 	if (fsp->flow_type & FLOW_EXT) {
615 		if (fsp->m_ext.vlan_etype)
616 			return -EINVAL;
617 		if (fsp->m_ext.vlan_tci) {
618 			if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
619 				return -EINVAL;
620 			if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
621 				return -EINVAL;
622 
623 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
624 			       sizeof(pkt->vlan_tci));
625 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
626 			       sizeof(pmask->vlan_tci));
627 			req->features |= BIT_ULL(NPC_OUTER_VID);
628 		}
629 
630 		/* Not Drop/Direct to queue but use action in default entry */
631 		if (fsp->m_ext.data[1] &&
632 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
633 			req->op = NIX_RX_ACTION_DEFAULT;
634 	}
635 
636 	if (fsp->flow_type & FLOW_MAC_EXT &&
637 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
638 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
639 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
640 		req->features |= BIT_ULL(NPC_DMAC);
641 	}
642 
643 	if (!req->features)
644 		return -EOPNOTSUPP;
645 
646 	return 0;
647 }
648 
649 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
650 {
651 	u64 ring_cookie = flow->flow_spec.ring_cookie;
652 	struct npc_install_flow_req *req;
653 	int err, vf = 0;
654 
655 	mutex_lock(&pfvf->mbox.lock);
656 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
657 	if (!req) {
658 		mutex_unlock(&pfvf->mbox.lock);
659 		return -ENOMEM;
660 	}
661 
662 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
663 	if (err) {
664 		/* free the allocated msg above */
665 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
666 		mutex_unlock(&pfvf->mbox.lock);
667 		return err;
668 	}
669 
670 	req->entry = flow->entry;
671 	req->intf = NIX_INTF_RX;
672 	req->set_cntr = 1;
673 	req->channel = pfvf->hw.rx_chan_base;
674 	if (ring_cookie == RX_CLS_FLOW_DISC) {
675 		req->op = NIX_RX_ACTIONOP_DROP;
676 	} else {
677 		/* change to unicast only if action of default entry is not
678 		 * requested by user
679 		 */
680 		if (flow->flow_spec.flow_type & FLOW_RSS) {
681 			req->op = NIX_RX_ACTIONOP_RSS;
682 			req->index = flow->rss_ctx_id;
683 		} else {
684 			req->op = NIX_RX_ACTIONOP_UCAST;
685 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
686 		}
687 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
688 		if (vf > pci_num_vf(pfvf->pdev)) {
689 			mutex_unlock(&pfvf->mbox.lock);
690 			return -EINVAL;
691 		}
692 	}
693 
694 	/* ethtool ring_cookie has (VF + 1) for VF */
695 	if (vf) {
696 		req->vf = vf;
697 		flow->is_vf = true;
698 		flow->vf = vf;
699 	}
700 
701 	/* Send message to AF */
702 	err = otx2_sync_mbox_msg(&pfvf->mbox);
703 	mutex_unlock(&pfvf->mbox.lock);
704 	return err;
705 }
706 
707 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
708 {
709 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
710 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
711 	struct otx2_flow *flow;
712 	bool new = false;
713 	u32 ring;
714 	int err;
715 
716 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
717 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
718 		return -ENOMEM;
719 
720 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
721 		return -EINVAL;
722 
723 	if (fsp->location >= flow_cfg->ntuple_max_flows)
724 		return -EINVAL;
725 
726 	flow = otx2_find_flow(pfvf, fsp->location);
727 	if (!flow) {
728 		flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
729 		if (!flow)
730 			return -ENOMEM;
731 		flow->location = fsp->location;
732 		flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
733 						flow->location];
734 		new = true;
735 	}
736 	/* struct copy */
737 	flow->flow_spec = *fsp;
738 
739 	if (fsp->flow_type & FLOW_RSS)
740 		flow->rss_ctx_id = nfc->rss_context;
741 
742 	err = otx2_add_flow_msg(pfvf, flow);
743 	if (err) {
744 		if (new)
745 			kfree(flow);
746 		return err;
747 	}
748 
749 	/* add the new flow installed to list */
750 	if (new) {
751 		otx2_add_flow_to_list(pfvf, flow);
752 		flow_cfg->nr_flows++;
753 	}
754 
755 	return 0;
756 }
757 
758 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
759 {
760 	struct npc_delete_flow_req *req;
761 	int err;
762 
763 	mutex_lock(&pfvf->mbox.lock);
764 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
765 	if (!req) {
766 		mutex_unlock(&pfvf->mbox.lock);
767 		return -ENOMEM;
768 	}
769 
770 	req->entry = entry;
771 	if (all)
772 		req->all = 1;
773 
774 	/* Send message to AF */
775 	err = otx2_sync_mbox_msg(&pfvf->mbox);
776 	mutex_unlock(&pfvf->mbox.lock);
777 	return err;
778 }
779 
780 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
781 {
782 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
783 	struct otx2_flow *flow;
784 	int err;
785 
786 	if (location >= flow_cfg->ntuple_max_flows)
787 		return -EINVAL;
788 
789 	flow = otx2_find_flow(pfvf, location);
790 	if (!flow)
791 		return -ENOENT;
792 
793 	err = otx2_remove_flow_msg(pfvf, flow->entry, false);
794 	if (err)
795 		return err;
796 
797 	list_del(&flow->list);
798 	kfree(flow);
799 	flow_cfg->nr_flows--;
800 
801 	return 0;
802 }
803 
804 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
805 {
806 	struct otx2_flow *flow, *tmp;
807 	int err;
808 
809 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
810 		if (flow->rss_ctx_id != ctx_id)
811 			continue;
812 		err = otx2_remove_flow(pfvf, flow->location);
813 		if (err)
814 			netdev_warn(pfvf->netdev,
815 				    "Can't delete the rule %d associated with this rss group err:%d",
816 				    flow->location, err);
817 	}
818 }
819 
820 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
821 {
822 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
823 	struct npc_delete_flow_req *req;
824 	struct otx2_flow *iter, *tmp;
825 	int err;
826 
827 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
828 		return 0;
829 
830 	mutex_lock(&pfvf->mbox.lock);
831 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
832 	if (!req) {
833 		mutex_unlock(&pfvf->mbox.lock);
834 		return -ENOMEM;
835 	}
836 
837 	req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
838 	req->end   = flow_cfg->entry[flow_cfg->ntuple_offset +
839 				      flow_cfg->ntuple_max_flows - 1];
840 	err = otx2_sync_mbox_msg(&pfvf->mbox);
841 	mutex_unlock(&pfvf->mbox.lock);
842 
843 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
844 		list_del(&iter->list);
845 		kfree(iter);
846 		flow_cfg->nr_flows--;
847 	}
848 	return err;
849 }
850 
851 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
852 {
853 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
854 	struct npc_mcam_free_entry_req *req;
855 	struct otx2_flow *iter, *tmp;
856 	int err;
857 
858 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
859 		return 0;
860 
861 	/* remove all flows */
862 	err = otx2_remove_flow_msg(pfvf, 0, true);
863 	if (err)
864 		return err;
865 
866 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
867 		list_del(&iter->list);
868 		kfree(iter);
869 		flow_cfg->nr_flows--;
870 	}
871 
872 	mutex_lock(&pfvf->mbox.lock);
873 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
874 	if (!req) {
875 		mutex_unlock(&pfvf->mbox.lock);
876 		return -ENOMEM;
877 	}
878 
879 	req->all = 1;
880 	/* Send message to AF to free MCAM entries */
881 	err = otx2_sync_mbox_msg(&pfvf->mbox);
882 	if (err) {
883 		mutex_unlock(&pfvf->mbox.lock);
884 		return err;
885 	}
886 
887 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
888 	mutex_unlock(&pfvf->mbox.lock);
889 
890 	return 0;
891 }
892 
893 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
894 {
895 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
896 	struct npc_install_flow_req *req;
897 	int err;
898 
899 	mutex_lock(&pfvf->mbox.lock);
900 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
901 	if (!req) {
902 		mutex_unlock(&pfvf->mbox.lock);
903 		return -ENOMEM;
904 	}
905 
906 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
907 	req->intf = NIX_INTF_RX;
908 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
909 	eth_broadcast_addr((u8 *)&req->mask.dmac);
910 	req->channel = pfvf->hw.rx_chan_base;
911 	req->op = NIX_RX_ACTION_DEFAULT;
912 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
913 	req->vtag0_valid = true;
914 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
915 
916 	/* Send message to AF */
917 	err = otx2_sync_mbox_msg(&pfvf->mbox);
918 	mutex_unlock(&pfvf->mbox.lock);
919 	return err;
920 }
921 
922 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
923 {
924 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
925 	struct npc_delete_flow_req *req;
926 	int err;
927 
928 	mutex_lock(&pfvf->mbox.lock);
929 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
930 	if (!req) {
931 		mutex_unlock(&pfvf->mbox.lock);
932 		return -ENOMEM;
933 	}
934 
935 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
936 	/* Send message to AF */
937 	err = otx2_sync_mbox_msg(&pfvf->mbox);
938 	mutex_unlock(&pfvf->mbox.lock);
939 	return err;
940 }
941 
942 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
943 {
944 	struct nix_vtag_config *req;
945 	struct mbox_msghdr *rsp_hdr;
946 	int err;
947 
948 	/* Dont have enough mcam entries */
949 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
950 		return -ENOMEM;
951 
952 	if (enable) {
953 		err = otx2_install_rxvlan_offload_flow(pf);
954 		if (err)
955 			return err;
956 	} else {
957 		err = otx2_delete_rxvlan_offload_flow(pf);
958 		if (err)
959 			return err;
960 	}
961 
962 	mutex_lock(&pf->mbox.lock);
963 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
964 	if (!req) {
965 		mutex_unlock(&pf->mbox.lock);
966 		return -ENOMEM;
967 	}
968 
969 	/* config strip, capture and size */
970 	req->vtag_size = VTAGSIZE_T4;
971 	req->cfg_type = 1; /* rx vlan cfg */
972 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
973 	req->rx.strip_vtag = enable;
974 	req->rx.capture_vtag = enable;
975 
976 	err = otx2_sync_mbox_msg(&pf->mbox);
977 	if (err) {
978 		mutex_unlock(&pf->mbox.lock);
979 		return err;
980 	}
981 
982 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
983 	if (IS_ERR(rsp_hdr)) {
984 		mutex_unlock(&pf->mbox.lock);
985 		return PTR_ERR(rsp_hdr);
986 	}
987 
988 	mutex_unlock(&pf->mbox.lock);
989 	return rsp_hdr->rc;
990 }
991