1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 
9 #include "otx2_common.h"
10 
11 #define OTX2_DEFAULT_ACTION	0x1
12 
13 struct otx2_flow {
14 	struct ethtool_rx_flow_spec flow_spec;
15 	struct list_head list;
16 	u32 location;
17 	u16 entry;
18 	bool is_vf;
19 	u8 rss_ctx_id;
20 	int vf;
21 };
22 
23 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
24 {
25 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
26 	struct npc_mcam_alloc_entry_req *req;
27 	struct npc_mcam_alloc_entry_rsp *rsp;
28 	int vf_vlan_max_flows;
29 	int i;
30 
31 	mutex_lock(&pfvf->mbox.lock);
32 
33 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
34 	if (!req) {
35 		mutex_unlock(&pfvf->mbox.lock);
36 		return -ENOMEM;
37 	}
38 
39 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
40 	req->contig = false;
41 	req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
42 
43 	/* Send message to AF */
44 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
45 		mutex_unlock(&pfvf->mbox.lock);
46 		return -EINVAL;
47 	}
48 
49 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
50 	       (&pfvf->mbox.mbox, 0, &req->hdr);
51 
52 	if (rsp->count != req->count) {
53 		netdev_info(pfvf->netdev,
54 			    "Unable to allocate %d MCAM entries, got %d\n",
55 			    req->count, rsp->count);
56 		/* support only ntuples here */
57 		flow_cfg->ntuple_max_flows = rsp->count;
58 		flow_cfg->ntuple_offset = 0;
59 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
60 	} else {
61 		flow_cfg->vf_vlan_offset = 0;
62 		flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
63 						vf_vlan_max_flows;
64 		flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
65 						OTX2_MAX_NTUPLE_FLOWS;
66 		flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
67 						OTX2_MAX_UNICAST_FLOWS;
68 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
69 		pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
70 		pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
71 		pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
72 	}
73 
74 	for (i = 0; i < rsp->count; i++)
75 		flow_cfg->entry[i] = rsp->entry_list[i];
76 
77 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
78 
79 	mutex_unlock(&pfvf->mbox.lock);
80 
81 	return 0;
82 }
83 
84 int otx2_mcam_flow_init(struct otx2_nic *pf)
85 {
86 	int err;
87 
88 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
89 				    GFP_KERNEL);
90 	if (!pf->flow_cfg)
91 		return -ENOMEM;
92 
93 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
94 
95 	pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
96 
97 	err = otx2_alloc_mcam_entries(pf);
98 	if (err)
99 		return err;
100 
101 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
102 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
103 	if (!pf->mac_table)
104 		return -ENOMEM;
105 
106 	return 0;
107 }
108 
109 void otx2_mcam_flow_del(struct otx2_nic *pf)
110 {
111 	otx2_destroy_mcam_flows(pf);
112 }
113 
114 /*  On success adds mcam entry
115  *  On failure enable promisous mode
116  */
117 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
118 {
119 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
120 	struct npc_install_flow_req *req;
121 	int err, i;
122 
123 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
124 		return -ENOMEM;
125 
126 	/* dont have free mcam entries or uc list is greater than alloted */
127 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
128 		return -ENOMEM;
129 
130 	mutex_lock(&pf->mbox.lock);
131 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
132 	if (!req) {
133 		mutex_unlock(&pf->mbox.lock);
134 		return -ENOMEM;
135 	}
136 
137 	/* unicast offset starts with 32 0..31 for ntuple */
138 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
139 		if (pf->mac_table[i].inuse)
140 			continue;
141 		ether_addr_copy(pf->mac_table[i].addr, mac);
142 		pf->mac_table[i].inuse = true;
143 		pf->mac_table[i].mcam_entry =
144 			flow_cfg->entry[i + flow_cfg->unicast_offset];
145 		req->entry =  pf->mac_table[i].mcam_entry;
146 		break;
147 	}
148 
149 	ether_addr_copy(req->packet.dmac, mac);
150 	eth_broadcast_addr((u8 *)&req->mask.dmac);
151 	req->features = BIT_ULL(NPC_DMAC);
152 	req->channel = pf->hw.rx_chan_base;
153 	req->intf = NIX_INTF_RX;
154 	req->op = NIX_RX_ACTION_DEFAULT;
155 	req->set_cntr = 1;
156 
157 	err = otx2_sync_mbox_msg(&pf->mbox);
158 	mutex_unlock(&pf->mbox.lock);
159 
160 	return err;
161 }
162 
163 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
164 {
165 	struct otx2_nic *pf = netdev_priv(netdev);
166 
167 	return otx2_do_add_macfilter(pf, mac);
168 }
169 
170 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
171 				       int *mcam_entry)
172 {
173 	int i;
174 
175 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
176 		if (!pf->mac_table[i].inuse)
177 			continue;
178 
179 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
180 			*mcam_entry = pf->mac_table[i].mcam_entry;
181 			pf->mac_table[i].inuse = false;
182 			return true;
183 		}
184 	}
185 	return false;
186 }
187 
188 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
189 {
190 	struct otx2_nic *pf = netdev_priv(netdev);
191 	struct npc_delete_flow_req *req;
192 	int err, mcam_entry;
193 
194 	/* check does mcam entry exists for given mac */
195 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
196 		return 0;
197 
198 	mutex_lock(&pf->mbox.lock);
199 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
200 	if (!req) {
201 		mutex_unlock(&pf->mbox.lock);
202 		return -ENOMEM;
203 	}
204 	req->entry = mcam_entry;
205 	/* Send message to AF */
206 	err = otx2_sync_mbox_msg(&pf->mbox);
207 	mutex_unlock(&pf->mbox.lock);
208 
209 	return err;
210 }
211 
212 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
213 {
214 	struct otx2_flow *iter;
215 
216 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
217 		if (iter->location == location)
218 			return iter;
219 	}
220 
221 	return NULL;
222 }
223 
224 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
225 {
226 	struct list_head *head = &pfvf->flow_cfg->flow_list;
227 	struct otx2_flow *iter;
228 
229 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
230 		if (iter->location > flow->location)
231 			break;
232 		head = &iter->list;
233 	}
234 
235 	list_add(&flow->list, head);
236 }
237 
238 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
239 		  u32 location)
240 {
241 	struct otx2_flow *iter;
242 
243 	if (location >= pfvf->flow_cfg->ntuple_max_flows)
244 		return -EINVAL;
245 
246 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
247 		if (iter->location == location) {
248 			nfc->fs = iter->flow_spec;
249 			nfc->rss_context = iter->rss_ctx_id;
250 			return 0;
251 		}
252 	}
253 
254 	return -ENOENT;
255 }
256 
257 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
258 		       u32 *rule_locs)
259 {
260 	u32 location = 0;
261 	int idx = 0;
262 	int err = 0;
263 
264 	nfc->data = pfvf->flow_cfg->ntuple_max_flows;
265 	while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) {
266 		err = otx2_get_flow(pfvf, nfc, location);
267 		if (!err)
268 			rule_locs[idx++] = location;
269 		location++;
270 	}
271 
272 	return err;
273 }
274 
275 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
276 				  struct npc_install_flow_req *req,
277 				  u32 flow_type)
278 {
279 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
280 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
281 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
282 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
283 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
284 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
285 	struct flow_msg *pmask = &req->mask;
286 	struct flow_msg *pkt = &req->packet;
287 
288 	switch (flow_type) {
289 	case IP_USER_FLOW:
290 		if (ipv4_usr_mask->ip4src) {
291 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
292 			       sizeof(pkt->ip4src));
293 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
294 			       sizeof(pmask->ip4src));
295 			req->features |= BIT_ULL(NPC_SIP_IPV4);
296 		}
297 		if (ipv4_usr_mask->ip4dst) {
298 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
299 			       sizeof(pkt->ip4dst));
300 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
301 			       sizeof(pmask->ip4dst));
302 			req->features |= BIT_ULL(NPC_DIP_IPV4);
303 		}
304 		pkt->etype = cpu_to_be16(ETH_P_IP);
305 		pmask->etype = cpu_to_be16(0xFFFF);
306 		req->features |= BIT_ULL(NPC_ETYPE);
307 		break;
308 	case TCP_V4_FLOW:
309 	case UDP_V4_FLOW:
310 	case SCTP_V4_FLOW:
311 		pkt->etype = cpu_to_be16(ETH_P_IP);
312 		pmask->etype = cpu_to_be16(0xFFFF);
313 		req->features |= BIT_ULL(NPC_ETYPE);
314 		if (ipv4_l4_mask->ip4src) {
315 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
316 			       sizeof(pkt->ip4src));
317 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
318 			       sizeof(pmask->ip4src));
319 			req->features |= BIT_ULL(NPC_SIP_IPV4);
320 		}
321 		if (ipv4_l4_mask->ip4dst) {
322 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
323 			       sizeof(pkt->ip4dst));
324 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
325 			       sizeof(pmask->ip4dst));
326 			req->features |= BIT_ULL(NPC_DIP_IPV4);
327 		}
328 		if (ipv4_l4_mask->psrc) {
329 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
330 			       sizeof(pkt->sport));
331 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
332 			       sizeof(pmask->sport));
333 			if (flow_type == UDP_V4_FLOW)
334 				req->features |= BIT_ULL(NPC_SPORT_UDP);
335 			else if (flow_type == TCP_V4_FLOW)
336 				req->features |= BIT_ULL(NPC_SPORT_TCP);
337 			else
338 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
339 		}
340 		if (ipv4_l4_mask->pdst) {
341 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
342 			       sizeof(pkt->dport));
343 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
344 			       sizeof(pmask->dport));
345 			if (flow_type == UDP_V4_FLOW)
346 				req->features |= BIT_ULL(NPC_DPORT_UDP);
347 			else if (flow_type == TCP_V4_FLOW)
348 				req->features |= BIT_ULL(NPC_DPORT_TCP);
349 			else
350 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
351 		}
352 		if (flow_type == UDP_V4_FLOW)
353 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
354 		else if (flow_type == TCP_V4_FLOW)
355 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
356 		else
357 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
358 		break;
359 	case AH_V4_FLOW:
360 	case ESP_V4_FLOW:
361 		pkt->etype = cpu_to_be16(ETH_P_IP);
362 		pmask->etype = cpu_to_be16(0xFFFF);
363 		req->features |= BIT_ULL(NPC_ETYPE);
364 		if (ah_esp_mask->ip4src) {
365 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
366 			       sizeof(pkt->ip4src));
367 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
368 			       sizeof(pmask->ip4src));
369 			req->features |= BIT_ULL(NPC_SIP_IPV4);
370 		}
371 		if (ah_esp_mask->ip4dst) {
372 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
373 			       sizeof(pkt->ip4dst));
374 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
375 			       sizeof(pmask->ip4dst));
376 			req->features |= BIT_ULL(NPC_DIP_IPV4);
377 		}
378 
379 		/* NPC profile doesn't extract AH/ESP header fields */
380 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
381 		    (ah_esp_mask->tos & ah_esp_mask->tos))
382 			return -EOPNOTSUPP;
383 
384 		if (flow_type == AH_V4_FLOW)
385 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
386 		else
387 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
388 		break;
389 	default:
390 		break;
391 	}
392 
393 	return 0;
394 }
395 
396 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
397 				  struct npc_install_flow_req *req,
398 				  u32 flow_type)
399 {
400 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
401 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
402 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
403 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
404 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
405 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
406 	struct flow_msg *pmask = &req->mask;
407 	struct flow_msg *pkt = &req->packet;
408 
409 	switch (flow_type) {
410 	case IPV6_USER_FLOW:
411 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
412 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
413 			       sizeof(pkt->ip6src));
414 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
415 			       sizeof(pmask->ip6src));
416 			req->features |= BIT_ULL(NPC_SIP_IPV6);
417 		}
418 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
419 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
420 			       sizeof(pkt->ip6dst));
421 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
422 			       sizeof(pmask->ip6dst));
423 			req->features |= BIT_ULL(NPC_DIP_IPV6);
424 		}
425 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
426 		pmask->etype = cpu_to_be16(0xFFFF);
427 		req->features |= BIT_ULL(NPC_ETYPE);
428 		break;
429 	case TCP_V6_FLOW:
430 	case UDP_V6_FLOW:
431 	case SCTP_V6_FLOW:
432 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
433 		pmask->etype = cpu_to_be16(0xFFFF);
434 		req->features |= BIT_ULL(NPC_ETYPE);
435 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
436 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
437 			       sizeof(pkt->ip6src));
438 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
439 			       sizeof(pmask->ip6src));
440 			req->features |= BIT_ULL(NPC_SIP_IPV6);
441 		}
442 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
443 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
444 			       sizeof(pkt->ip6dst));
445 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
446 			       sizeof(pmask->ip6dst));
447 			req->features |= BIT_ULL(NPC_DIP_IPV6);
448 		}
449 		if (ipv6_l4_mask->psrc) {
450 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
451 			       sizeof(pkt->sport));
452 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
453 			       sizeof(pmask->sport));
454 			if (flow_type == UDP_V6_FLOW)
455 				req->features |= BIT_ULL(NPC_SPORT_UDP);
456 			else if (flow_type == TCP_V6_FLOW)
457 				req->features |= BIT_ULL(NPC_SPORT_TCP);
458 			else
459 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
460 		}
461 		if (ipv6_l4_mask->pdst) {
462 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
463 			       sizeof(pkt->dport));
464 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
465 			       sizeof(pmask->dport));
466 			if (flow_type == UDP_V6_FLOW)
467 				req->features |= BIT_ULL(NPC_DPORT_UDP);
468 			else if (flow_type == TCP_V6_FLOW)
469 				req->features |= BIT_ULL(NPC_DPORT_TCP);
470 			else
471 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
472 		}
473 		if (flow_type == UDP_V6_FLOW)
474 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
475 		else if (flow_type == TCP_V6_FLOW)
476 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
477 		else
478 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
479 		break;
480 	case AH_V6_FLOW:
481 	case ESP_V6_FLOW:
482 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
483 		pmask->etype = cpu_to_be16(0xFFFF);
484 		req->features |= BIT_ULL(NPC_ETYPE);
485 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
486 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
487 			       sizeof(pkt->ip6src));
488 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
489 			       sizeof(pmask->ip6src));
490 			req->features |= BIT_ULL(NPC_SIP_IPV6);
491 		}
492 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
493 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
494 			       sizeof(pkt->ip6dst));
495 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
496 			       sizeof(pmask->ip6dst));
497 			req->features |= BIT_ULL(NPC_DIP_IPV6);
498 		}
499 
500 		/* NPC profile doesn't extract AH/ESP header fields */
501 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
502 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
503 			return -EOPNOTSUPP;
504 
505 		if (flow_type == AH_V6_FLOW)
506 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
507 		else
508 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
509 	default:
510 		break;
511 	}
512 
513 	return 0;
514 }
515 
516 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
517 			      struct npc_install_flow_req *req)
518 {
519 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
520 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
521 	struct flow_msg *pmask = &req->mask;
522 	struct flow_msg *pkt = &req->packet;
523 	u32 flow_type;
524 	int ret;
525 
526 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
527 	switch (flow_type) {
528 	/* bits not set in mask are don't care */
529 	case ETHER_FLOW:
530 		if (!is_zero_ether_addr(eth_mask->h_source)) {
531 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
532 			ether_addr_copy(pmask->smac, eth_mask->h_source);
533 			req->features |= BIT_ULL(NPC_SMAC);
534 		}
535 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
536 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
537 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
538 			req->features |= BIT_ULL(NPC_DMAC);
539 		}
540 		if (eth_mask->h_proto) {
541 			memcpy(&pkt->etype, &eth_hdr->h_proto,
542 			       sizeof(pkt->etype));
543 			memcpy(&pmask->etype, &eth_mask->h_proto,
544 			       sizeof(pmask->etype));
545 			req->features |= BIT_ULL(NPC_ETYPE);
546 		}
547 		break;
548 	case IP_USER_FLOW:
549 	case TCP_V4_FLOW:
550 	case UDP_V4_FLOW:
551 	case SCTP_V4_FLOW:
552 	case AH_V4_FLOW:
553 	case ESP_V4_FLOW:
554 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
555 		if (ret)
556 			return ret;
557 		break;
558 	case IPV6_USER_FLOW:
559 	case TCP_V6_FLOW:
560 	case UDP_V6_FLOW:
561 	case SCTP_V6_FLOW:
562 	case AH_V6_FLOW:
563 	case ESP_V6_FLOW:
564 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
565 		if (ret)
566 			return ret;
567 		break;
568 	default:
569 		return -EOPNOTSUPP;
570 	}
571 	if (fsp->flow_type & FLOW_EXT) {
572 		if (fsp->m_ext.vlan_etype)
573 			return -EINVAL;
574 		if (fsp->m_ext.vlan_tci) {
575 			if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
576 				return -EINVAL;
577 			if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
578 				return -EINVAL;
579 
580 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
581 			       sizeof(pkt->vlan_tci));
582 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
583 			       sizeof(pmask->vlan_tci));
584 			req->features |= BIT_ULL(NPC_OUTER_VID);
585 		}
586 
587 		/* Not Drop/Direct to queue but use action in default entry */
588 		if (fsp->m_ext.data[1] &&
589 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
590 			req->op = NIX_RX_ACTION_DEFAULT;
591 	}
592 
593 	if (fsp->flow_type & FLOW_MAC_EXT &&
594 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
595 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
596 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
597 		req->features |= BIT_ULL(NPC_DMAC);
598 	}
599 
600 	if (!req->features)
601 		return -EOPNOTSUPP;
602 
603 	return 0;
604 }
605 
606 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
607 {
608 	u64 ring_cookie = flow->flow_spec.ring_cookie;
609 	struct npc_install_flow_req *req;
610 	int err, vf = 0;
611 
612 	mutex_lock(&pfvf->mbox.lock);
613 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
614 	if (!req) {
615 		mutex_unlock(&pfvf->mbox.lock);
616 		return -ENOMEM;
617 	}
618 
619 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
620 	if (err) {
621 		/* free the allocated msg above */
622 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
623 		mutex_unlock(&pfvf->mbox.lock);
624 		return err;
625 	}
626 
627 	req->entry = flow->entry;
628 	req->intf = NIX_INTF_RX;
629 	req->set_cntr = 1;
630 	req->channel = pfvf->hw.rx_chan_base;
631 	if (ring_cookie == RX_CLS_FLOW_DISC) {
632 		req->op = NIX_RX_ACTIONOP_DROP;
633 	} else {
634 		/* change to unicast only if action of default entry is not
635 		 * requested by user
636 		 */
637 		if (flow->flow_spec.flow_type & FLOW_RSS) {
638 			req->op = NIX_RX_ACTIONOP_RSS;
639 			req->index = flow->rss_ctx_id;
640 		} else {
641 			req->op = NIX_RX_ACTIONOP_UCAST;
642 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
643 		}
644 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
645 		if (vf > pci_num_vf(pfvf->pdev)) {
646 			mutex_unlock(&pfvf->mbox.lock);
647 			return -EINVAL;
648 		}
649 	}
650 
651 	/* ethtool ring_cookie has (VF + 1) for VF */
652 	if (vf) {
653 		req->vf = vf;
654 		flow->is_vf = true;
655 		flow->vf = vf;
656 	}
657 
658 	/* Send message to AF */
659 	err = otx2_sync_mbox_msg(&pfvf->mbox);
660 	mutex_unlock(&pfvf->mbox.lock);
661 	return err;
662 }
663 
664 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
665 {
666 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
667 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
668 	struct otx2_flow *flow;
669 	bool new = false;
670 	u32 ring;
671 	int err;
672 
673 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
674 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
675 		return -ENOMEM;
676 
677 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
678 		return -EINVAL;
679 
680 	if (fsp->location >= flow_cfg->ntuple_max_flows)
681 		return -EINVAL;
682 
683 	flow = otx2_find_flow(pfvf, fsp->location);
684 	if (!flow) {
685 		flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
686 		if (!flow)
687 			return -ENOMEM;
688 		flow->location = fsp->location;
689 		flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
690 						flow->location];
691 		new = true;
692 	}
693 	/* struct copy */
694 	flow->flow_spec = *fsp;
695 
696 	if (fsp->flow_type & FLOW_RSS)
697 		flow->rss_ctx_id = nfc->rss_context;
698 
699 	err = otx2_add_flow_msg(pfvf, flow);
700 	if (err) {
701 		if (new)
702 			kfree(flow);
703 		return err;
704 	}
705 
706 	/* add the new flow installed to list */
707 	if (new) {
708 		otx2_add_flow_to_list(pfvf, flow);
709 		flow_cfg->nr_flows++;
710 	}
711 
712 	return 0;
713 }
714 
715 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
716 {
717 	struct npc_delete_flow_req *req;
718 	int err;
719 
720 	mutex_lock(&pfvf->mbox.lock);
721 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
722 	if (!req) {
723 		mutex_unlock(&pfvf->mbox.lock);
724 		return -ENOMEM;
725 	}
726 
727 	req->entry = entry;
728 	if (all)
729 		req->all = 1;
730 
731 	/* Send message to AF */
732 	err = otx2_sync_mbox_msg(&pfvf->mbox);
733 	mutex_unlock(&pfvf->mbox.lock);
734 	return err;
735 }
736 
737 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
738 {
739 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
740 	struct otx2_flow *flow;
741 	int err;
742 
743 	if (location >= flow_cfg->ntuple_max_flows)
744 		return -EINVAL;
745 
746 	flow = otx2_find_flow(pfvf, location);
747 	if (!flow)
748 		return -ENOENT;
749 
750 	err = otx2_remove_flow_msg(pfvf, flow->entry, false);
751 	if (err)
752 		return err;
753 
754 	list_del(&flow->list);
755 	kfree(flow);
756 	flow_cfg->nr_flows--;
757 
758 	return 0;
759 }
760 
761 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
762 {
763 	struct otx2_flow *flow, *tmp;
764 	int err;
765 
766 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
767 		if (flow->rss_ctx_id != ctx_id)
768 			continue;
769 		err = otx2_remove_flow(pfvf, flow->location);
770 		if (err)
771 			netdev_warn(pfvf->netdev,
772 				    "Can't delete the rule %d associated with this rss group err:%d",
773 				    flow->location, err);
774 	}
775 }
776 
777 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
778 {
779 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
780 	struct npc_delete_flow_req *req;
781 	struct otx2_flow *iter, *tmp;
782 	int err;
783 
784 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
785 		return 0;
786 
787 	mutex_lock(&pfvf->mbox.lock);
788 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
789 	if (!req) {
790 		mutex_unlock(&pfvf->mbox.lock);
791 		return -ENOMEM;
792 	}
793 
794 	req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
795 	req->end   = flow_cfg->entry[flow_cfg->ntuple_offset +
796 				      flow_cfg->ntuple_max_flows - 1];
797 	err = otx2_sync_mbox_msg(&pfvf->mbox);
798 	mutex_unlock(&pfvf->mbox.lock);
799 
800 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
801 		list_del(&iter->list);
802 		kfree(iter);
803 		flow_cfg->nr_flows--;
804 	}
805 	return err;
806 }
807 
808 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
809 {
810 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
811 	struct npc_mcam_free_entry_req *req;
812 	struct otx2_flow *iter, *tmp;
813 	int err;
814 
815 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
816 		return 0;
817 
818 	/* remove all flows */
819 	err = otx2_remove_flow_msg(pfvf, 0, true);
820 	if (err)
821 		return err;
822 
823 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
824 		list_del(&iter->list);
825 		kfree(iter);
826 		flow_cfg->nr_flows--;
827 	}
828 
829 	mutex_lock(&pfvf->mbox.lock);
830 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
831 	if (!req) {
832 		mutex_unlock(&pfvf->mbox.lock);
833 		return -ENOMEM;
834 	}
835 
836 	req->all = 1;
837 	/* Send message to AF to free MCAM entries */
838 	err = otx2_sync_mbox_msg(&pfvf->mbox);
839 	if (err) {
840 		mutex_unlock(&pfvf->mbox.lock);
841 		return err;
842 	}
843 
844 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
845 	mutex_unlock(&pfvf->mbox.lock);
846 
847 	return 0;
848 }
849 
850 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
851 {
852 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
853 	struct npc_install_flow_req *req;
854 	int err;
855 
856 	mutex_lock(&pfvf->mbox.lock);
857 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
858 	if (!req) {
859 		mutex_unlock(&pfvf->mbox.lock);
860 		return -ENOMEM;
861 	}
862 
863 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
864 	req->intf = NIX_INTF_RX;
865 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
866 	eth_broadcast_addr((u8 *)&req->mask.dmac);
867 	req->channel = pfvf->hw.rx_chan_base;
868 	req->op = NIX_RX_ACTION_DEFAULT;
869 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
870 	req->vtag0_valid = true;
871 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
872 
873 	/* Send message to AF */
874 	err = otx2_sync_mbox_msg(&pfvf->mbox);
875 	mutex_unlock(&pfvf->mbox.lock);
876 	return err;
877 }
878 
879 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
880 {
881 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
882 	struct npc_delete_flow_req *req;
883 	int err;
884 
885 	mutex_lock(&pfvf->mbox.lock);
886 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
887 	if (!req) {
888 		mutex_unlock(&pfvf->mbox.lock);
889 		return -ENOMEM;
890 	}
891 
892 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
893 	/* Send message to AF */
894 	err = otx2_sync_mbox_msg(&pfvf->mbox);
895 	mutex_unlock(&pfvf->mbox.lock);
896 	return err;
897 }
898 
899 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
900 {
901 	struct nix_vtag_config *req;
902 	struct mbox_msghdr *rsp_hdr;
903 	int err;
904 
905 	/* Dont have enough mcam entries */
906 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
907 		return -ENOMEM;
908 
909 	if (enable) {
910 		err = otx2_install_rxvlan_offload_flow(pf);
911 		if (err)
912 			return err;
913 	} else {
914 		err = otx2_delete_rxvlan_offload_flow(pf);
915 		if (err)
916 			return err;
917 	}
918 
919 	mutex_lock(&pf->mbox.lock);
920 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
921 	if (!req) {
922 		mutex_unlock(&pf->mbox.lock);
923 		return -ENOMEM;
924 	}
925 
926 	/* config strip, capture and size */
927 	req->vtag_size = VTAGSIZE_T4;
928 	req->cfg_type = 1; /* rx vlan cfg */
929 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
930 	req->rx.strip_vtag = enable;
931 	req->rx.capture_vtag = enable;
932 
933 	err = otx2_sync_mbox_msg(&pf->mbox);
934 	if (err) {
935 		mutex_unlock(&pf->mbox.lock);
936 		return err;
937 	}
938 
939 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
940 	if (IS_ERR(rsp_hdr)) {
941 		mutex_unlock(&pf->mbox.lock);
942 		return PTR_ERR(rsp_hdr);
943 	}
944 
945 	mutex_unlock(&pf->mbox.lock);
946 	return rsp_hdr->rc;
947 }
948