1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 
9 #include "otx2_common.h"
10 
11 #define OTX2_DEFAULT_ACTION	0x1
12 
13 struct otx2_flow {
14 	struct ethtool_rx_flow_spec flow_spec;
15 	struct list_head list;
16 	u32 location;
17 	u16 entry;
18 	bool is_vf;
19 	u8 rss_ctx_id;
20 	int vf;
21 };
22 
23 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
24 {
25 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
26 	struct npc_mcam_alloc_entry_req *req;
27 	struct npc_mcam_alloc_entry_rsp *rsp;
28 	int vf_vlan_max_flows;
29 	int i;
30 
31 	mutex_lock(&pfvf->mbox.lock);
32 
33 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
34 	if (!req) {
35 		mutex_unlock(&pfvf->mbox.lock);
36 		return -ENOMEM;
37 	}
38 
39 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
40 	req->contig = false;
41 	req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
42 
43 	/* Send message to AF */
44 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
45 		mutex_unlock(&pfvf->mbox.lock);
46 		return -EINVAL;
47 	}
48 
49 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
50 	       (&pfvf->mbox.mbox, 0, &req->hdr);
51 
52 	if (rsp->count != req->count) {
53 		netdev_info(pfvf->netdev,
54 			    "Unable to allocate %d MCAM entries, got %d\n",
55 			    req->count, rsp->count);
56 		/* support only ntuples here */
57 		flow_cfg->ntuple_max_flows = rsp->count;
58 		flow_cfg->ntuple_offset = 0;
59 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
60 	} else {
61 		flow_cfg->vf_vlan_offset = 0;
62 		flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
63 						vf_vlan_max_flows;
64 		flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
65 						OTX2_MAX_NTUPLE_FLOWS;
66 		flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
67 						OTX2_MAX_UNICAST_FLOWS;
68 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
69 		pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
70 		pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
71 		pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
72 	}
73 
74 	for (i = 0; i < rsp->count; i++)
75 		flow_cfg->entry[i] = rsp->entry_list[i];
76 
77 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
78 
79 	mutex_unlock(&pfvf->mbox.lock);
80 
81 	return 0;
82 }
83 
84 int otx2_mcam_flow_init(struct otx2_nic *pf)
85 {
86 	int err;
87 
88 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
89 				    GFP_KERNEL);
90 	if (!pf->flow_cfg)
91 		return -ENOMEM;
92 
93 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
94 
95 	pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
96 
97 	err = otx2_alloc_mcam_entries(pf);
98 	if (err)
99 		return err;
100 
101 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
102 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
103 	if (!pf->mac_table)
104 		return -ENOMEM;
105 
106 	return 0;
107 }
108 
109 void otx2_mcam_flow_del(struct otx2_nic *pf)
110 {
111 	otx2_destroy_mcam_flows(pf);
112 }
113 
114 /*  On success adds mcam entry
115  *  On failure enable promisous mode
116  */
117 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
118 {
119 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
120 	struct npc_install_flow_req *req;
121 	int err, i;
122 
123 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
124 		return -ENOMEM;
125 
126 	/* dont have free mcam entries or uc list is greater than alloted */
127 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
128 		return -ENOMEM;
129 
130 	mutex_lock(&pf->mbox.lock);
131 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
132 	if (!req) {
133 		mutex_unlock(&pf->mbox.lock);
134 		return -ENOMEM;
135 	}
136 
137 	/* unicast offset starts with 32 0..31 for ntuple */
138 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
139 		if (pf->mac_table[i].inuse)
140 			continue;
141 		ether_addr_copy(pf->mac_table[i].addr, mac);
142 		pf->mac_table[i].inuse = true;
143 		pf->mac_table[i].mcam_entry =
144 			flow_cfg->entry[i + flow_cfg->unicast_offset];
145 		req->entry =  pf->mac_table[i].mcam_entry;
146 		break;
147 	}
148 
149 	ether_addr_copy(req->packet.dmac, mac);
150 	eth_broadcast_addr((u8 *)&req->mask.dmac);
151 	req->features = BIT_ULL(NPC_DMAC);
152 	req->channel = pf->hw.rx_chan_base;
153 	req->intf = NIX_INTF_RX;
154 	req->op = NIX_RX_ACTION_DEFAULT;
155 	req->set_cntr = 1;
156 
157 	err = otx2_sync_mbox_msg(&pf->mbox);
158 	mutex_unlock(&pf->mbox.lock);
159 
160 	return err;
161 }
162 
163 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
164 {
165 	struct otx2_nic *pf = netdev_priv(netdev);
166 
167 	return otx2_do_add_macfilter(pf, mac);
168 }
169 
170 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
171 				       int *mcam_entry)
172 {
173 	int i;
174 
175 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
176 		if (!pf->mac_table[i].inuse)
177 			continue;
178 
179 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
180 			*mcam_entry = pf->mac_table[i].mcam_entry;
181 			pf->mac_table[i].inuse = false;
182 			return true;
183 		}
184 	}
185 	return false;
186 }
187 
188 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
189 {
190 	struct otx2_nic *pf = netdev_priv(netdev);
191 	struct npc_delete_flow_req *req;
192 	int err, mcam_entry;
193 
194 	/* check does mcam entry exists for given mac */
195 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
196 		return 0;
197 
198 	mutex_lock(&pf->mbox.lock);
199 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
200 	if (!req) {
201 		mutex_unlock(&pf->mbox.lock);
202 		return -ENOMEM;
203 	}
204 	req->entry = mcam_entry;
205 	/* Send message to AF */
206 	err = otx2_sync_mbox_msg(&pf->mbox);
207 	mutex_unlock(&pf->mbox.lock);
208 
209 	return err;
210 }
211 
212 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
213 {
214 	struct otx2_flow *iter;
215 
216 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
217 		if (iter->location == location)
218 			return iter;
219 	}
220 
221 	return NULL;
222 }
223 
224 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
225 {
226 	struct list_head *head = &pfvf->flow_cfg->flow_list;
227 	struct otx2_flow *iter;
228 
229 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
230 		if (iter->location > flow->location)
231 			break;
232 		head = &iter->list;
233 	}
234 
235 	list_add(&flow->list, head);
236 }
237 
238 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
239 		  u32 location)
240 {
241 	struct otx2_flow *iter;
242 
243 	if (location >= pfvf->flow_cfg->ntuple_max_flows)
244 		return -EINVAL;
245 
246 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
247 		if (iter->location == location) {
248 			nfc->fs = iter->flow_spec;
249 			nfc->rss_context = iter->rss_ctx_id;
250 			return 0;
251 		}
252 	}
253 
254 	return -ENOENT;
255 }
256 
257 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
258 		       u32 *rule_locs)
259 {
260 	u32 rule_cnt = nfc->rule_cnt;
261 	u32 location = 0;
262 	int idx = 0;
263 	int err = 0;
264 
265 	nfc->data = pfvf->flow_cfg->ntuple_max_flows;
266 	while ((!err || err == -ENOENT) && idx < rule_cnt) {
267 		err = otx2_get_flow(pfvf, nfc, location);
268 		if (!err)
269 			rule_locs[idx++] = location;
270 		location++;
271 	}
272 	nfc->rule_cnt = rule_cnt;
273 
274 	return err;
275 }
276 
277 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
278 				  struct npc_install_flow_req *req,
279 				  u32 flow_type)
280 {
281 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
282 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
283 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
284 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
285 	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
286 	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
287 	struct flow_msg *pmask = &req->mask;
288 	struct flow_msg *pkt = &req->packet;
289 
290 	switch (flow_type) {
291 	case IP_USER_FLOW:
292 		if (ipv4_usr_mask->ip4src) {
293 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
294 			       sizeof(pkt->ip4src));
295 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
296 			       sizeof(pmask->ip4src));
297 			req->features |= BIT_ULL(NPC_SIP_IPV4);
298 		}
299 		if (ipv4_usr_mask->ip4dst) {
300 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
301 			       sizeof(pkt->ip4dst));
302 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
303 			       sizeof(pmask->ip4dst));
304 			req->features |= BIT_ULL(NPC_DIP_IPV4);
305 		}
306 		pkt->etype = cpu_to_be16(ETH_P_IP);
307 		pmask->etype = cpu_to_be16(0xFFFF);
308 		req->features |= BIT_ULL(NPC_ETYPE);
309 		break;
310 	case TCP_V4_FLOW:
311 	case UDP_V4_FLOW:
312 	case SCTP_V4_FLOW:
313 		pkt->etype = cpu_to_be16(ETH_P_IP);
314 		pmask->etype = cpu_to_be16(0xFFFF);
315 		req->features |= BIT_ULL(NPC_ETYPE);
316 		if (ipv4_l4_mask->ip4src) {
317 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
318 			       sizeof(pkt->ip4src));
319 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
320 			       sizeof(pmask->ip4src));
321 			req->features |= BIT_ULL(NPC_SIP_IPV4);
322 		}
323 		if (ipv4_l4_mask->ip4dst) {
324 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
325 			       sizeof(pkt->ip4dst));
326 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
327 			       sizeof(pmask->ip4dst));
328 			req->features |= BIT_ULL(NPC_DIP_IPV4);
329 		}
330 		if (ipv4_l4_mask->psrc) {
331 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
332 			       sizeof(pkt->sport));
333 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
334 			       sizeof(pmask->sport));
335 			if (flow_type == UDP_V4_FLOW)
336 				req->features |= BIT_ULL(NPC_SPORT_UDP);
337 			else if (flow_type == TCP_V4_FLOW)
338 				req->features |= BIT_ULL(NPC_SPORT_TCP);
339 			else
340 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
341 		}
342 		if (ipv4_l4_mask->pdst) {
343 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
344 			       sizeof(pkt->dport));
345 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
346 			       sizeof(pmask->dport));
347 			if (flow_type == UDP_V4_FLOW)
348 				req->features |= BIT_ULL(NPC_DPORT_UDP);
349 			else if (flow_type == TCP_V4_FLOW)
350 				req->features |= BIT_ULL(NPC_DPORT_TCP);
351 			else
352 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
353 		}
354 		if (flow_type == UDP_V4_FLOW)
355 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
356 		else if (flow_type == TCP_V4_FLOW)
357 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
358 		else
359 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
360 		break;
361 	case AH_V4_FLOW:
362 	case ESP_V4_FLOW:
363 		pkt->etype = cpu_to_be16(ETH_P_IP);
364 		pmask->etype = cpu_to_be16(0xFFFF);
365 		req->features |= BIT_ULL(NPC_ETYPE);
366 		if (ah_esp_mask->ip4src) {
367 			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
368 			       sizeof(pkt->ip4src));
369 			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
370 			       sizeof(pmask->ip4src));
371 			req->features |= BIT_ULL(NPC_SIP_IPV4);
372 		}
373 		if (ah_esp_mask->ip4dst) {
374 			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
375 			       sizeof(pkt->ip4dst));
376 			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
377 			       sizeof(pmask->ip4dst));
378 			req->features |= BIT_ULL(NPC_DIP_IPV4);
379 		}
380 
381 		/* NPC profile doesn't extract AH/ESP header fields */
382 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
383 		    (ah_esp_mask->tos & ah_esp_mask->tos))
384 			return -EOPNOTSUPP;
385 
386 		if (flow_type == AH_V4_FLOW)
387 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
388 		else
389 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
390 		break;
391 	default:
392 		break;
393 	}
394 
395 	return 0;
396 }
397 
398 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
399 				  struct npc_install_flow_req *req,
400 				  u32 flow_type)
401 {
402 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
403 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
404 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
405 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
406 	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
407 	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
408 	struct flow_msg *pmask = &req->mask;
409 	struct flow_msg *pkt = &req->packet;
410 
411 	switch (flow_type) {
412 	case IPV6_USER_FLOW:
413 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
414 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
415 			       sizeof(pkt->ip6src));
416 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
417 			       sizeof(pmask->ip6src));
418 			req->features |= BIT_ULL(NPC_SIP_IPV6);
419 		}
420 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
421 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
422 			       sizeof(pkt->ip6dst));
423 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
424 			       sizeof(pmask->ip6dst));
425 			req->features |= BIT_ULL(NPC_DIP_IPV6);
426 		}
427 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
428 		pmask->etype = cpu_to_be16(0xFFFF);
429 		req->features |= BIT_ULL(NPC_ETYPE);
430 		break;
431 	case TCP_V6_FLOW:
432 	case UDP_V6_FLOW:
433 	case SCTP_V6_FLOW:
434 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
435 		pmask->etype = cpu_to_be16(0xFFFF);
436 		req->features |= BIT_ULL(NPC_ETYPE);
437 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
438 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
439 			       sizeof(pkt->ip6src));
440 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
441 			       sizeof(pmask->ip6src));
442 			req->features |= BIT_ULL(NPC_SIP_IPV6);
443 		}
444 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
445 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
446 			       sizeof(pkt->ip6dst));
447 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
448 			       sizeof(pmask->ip6dst));
449 			req->features |= BIT_ULL(NPC_DIP_IPV6);
450 		}
451 		if (ipv6_l4_mask->psrc) {
452 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
453 			       sizeof(pkt->sport));
454 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
455 			       sizeof(pmask->sport));
456 			if (flow_type == UDP_V6_FLOW)
457 				req->features |= BIT_ULL(NPC_SPORT_UDP);
458 			else if (flow_type == TCP_V6_FLOW)
459 				req->features |= BIT_ULL(NPC_SPORT_TCP);
460 			else
461 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
462 		}
463 		if (ipv6_l4_mask->pdst) {
464 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
465 			       sizeof(pkt->dport));
466 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
467 			       sizeof(pmask->dport));
468 			if (flow_type == UDP_V6_FLOW)
469 				req->features |= BIT_ULL(NPC_DPORT_UDP);
470 			else if (flow_type == TCP_V6_FLOW)
471 				req->features |= BIT_ULL(NPC_DPORT_TCP);
472 			else
473 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
474 		}
475 		if (flow_type == UDP_V6_FLOW)
476 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
477 		else if (flow_type == TCP_V6_FLOW)
478 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
479 		else
480 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
481 		break;
482 	case AH_V6_FLOW:
483 	case ESP_V6_FLOW:
484 		pkt->etype = cpu_to_be16(ETH_P_IPV6);
485 		pmask->etype = cpu_to_be16(0xFFFF);
486 		req->features |= BIT_ULL(NPC_ETYPE);
487 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
488 			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
489 			       sizeof(pkt->ip6src));
490 			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
491 			       sizeof(pmask->ip6src));
492 			req->features |= BIT_ULL(NPC_SIP_IPV6);
493 		}
494 		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
495 			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
496 			       sizeof(pkt->ip6dst));
497 			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
498 			       sizeof(pmask->ip6dst));
499 			req->features |= BIT_ULL(NPC_DIP_IPV6);
500 		}
501 
502 		/* NPC profile doesn't extract AH/ESP header fields */
503 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
504 		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
505 			return -EOPNOTSUPP;
506 
507 		if (flow_type == AH_V6_FLOW)
508 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
509 		else
510 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
511 	default:
512 		break;
513 	}
514 
515 	return 0;
516 }
517 
518 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
519 			      struct npc_install_flow_req *req)
520 {
521 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
522 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
523 	struct flow_msg *pmask = &req->mask;
524 	struct flow_msg *pkt = &req->packet;
525 	u32 flow_type;
526 	int ret;
527 
528 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
529 	switch (flow_type) {
530 	/* bits not set in mask are don't care */
531 	case ETHER_FLOW:
532 		if (!is_zero_ether_addr(eth_mask->h_source)) {
533 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
534 			ether_addr_copy(pmask->smac, eth_mask->h_source);
535 			req->features |= BIT_ULL(NPC_SMAC);
536 		}
537 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
538 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
539 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
540 			req->features |= BIT_ULL(NPC_DMAC);
541 		}
542 		if (eth_mask->h_proto) {
543 			memcpy(&pkt->etype, &eth_hdr->h_proto,
544 			       sizeof(pkt->etype));
545 			memcpy(&pmask->etype, &eth_mask->h_proto,
546 			       sizeof(pmask->etype));
547 			req->features |= BIT_ULL(NPC_ETYPE);
548 		}
549 		break;
550 	case IP_USER_FLOW:
551 	case TCP_V4_FLOW:
552 	case UDP_V4_FLOW:
553 	case SCTP_V4_FLOW:
554 	case AH_V4_FLOW:
555 	case ESP_V4_FLOW:
556 		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
557 		if (ret)
558 			return ret;
559 		break;
560 	case IPV6_USER_FLOW:
561 	case TCP_V6_FLOW:
562 	case UDP_V6_FLOW:
563 	case SCTP_V6_FLOW:
564 	case AH_V6_FLOW:
565 	case ESP_V6_FLOW:
566 		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
567 		if (ret)
568 			return ret;
569 		break;
570 	default:
571 		return -EOPNOTSUPP;
572 	}
573 	if (fsp->flow_type & FLOW_EXT) {
574 		if (fsp->m_ext.vlan_etype)
575 			return -EINVAL;
576 		if (fsp->m_ext.vlan_tci) {
577 			if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
578 				return -EINVAL;
579 			if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
580 				return -EINVAL;
581 
582 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
583 			       sizeof(pkt->vlan_tci));
584 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
585 			       sizeof(pmask->vlan_tci));
586 			req->features |= BIT_ULL(NPC_OUTER_VID);
587 		}
588 
589 		/* Not Drop/Direct to queue but use action in default entry */
590 		if (fsp->m_ext.data[1] &&
591 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
592 			req->op = NIX_RX_ACTION_DEFAULT;
593 	}
594 
595 	if (fsp->flow_type & FLOW_MAC_EXT &&
596 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
597 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
598 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
599 		req->features |= BIT_ULL(NPC_DMAC);
600 	}
601 
602 	if (!req->features)
603 		return -EOPNOTSUPP;
604 
605 	return 0;
606 }
607 
608 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
609 {
610 	u64 ring_cookie = flow->flow_spec.ring_cookie;
611 	struct npc_install_flow_req *req;
612 	int err, vf = 0;
613 
614 	mutex_lock(&pfvf->mbox.lock);
615 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
616 	if (!req) {
617 		mutex_unlock(&pfvf->mbox.lock);
618 		return -ENOMEM;
619 	}
620 
621 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
622 	if (err) {
623 		/* free the allocated msg above */
624 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
625 		mutex_unlock(&pfvf->mbox.lock);
626 		return err;
627 	}
628 
629 	req->entry = flow->entry;
630 	req->intf = NIX_INTF_RX;
631 	req->set_cntr = 1;
632 	req->channel = pfvf->hw.rx_chan_base;
633 	if (ring_cookie == RX_CLS_FLOW_DISC) {
634 		req->op = NIX_RX_ACTIONOP_DROP;
635 	} else {
636 		/* change to unicast only if action of default entry is not
637 		 * requested by user
638 		 */
639 		if (flow->flow_spec.flow_type & FLOW_RSS) {
640 			req->op = NIX_RX_ACTIONOP_RSS;
641 			req->index = flow->rss_ctx_id;
642 		} else {
643 			req->op = NIX_RX_ACTIONOP_UCAST;
644 			req->index = ethtool_get_flow_spec_ring(ring_cookie);
645 		}
646 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
647 		if (vf > pci_num_vf(pfvf->pdev)) {
648 			mutex_unlock(&pfvf->mbox.lock);
649 			return -EINVAL;
650 		}
651 	}
652 
653 	/* ethtool ring_cookie has (VF + 1) for VF */
654 	if (vf) {
655 		req->vf = vf;
656 		flow->is_vf = true;
657 		flow->vf = vf;
658 	}
659 
660 	/* Send message to AF */
661 	err = otx2_sync_mbox_msg(&pfvf->mbox);
662 	mutex_unlock(&pfvf->mbox.lock);
663 	return err;
664 }
665 
666 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
667 {
668 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
669 	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
670 	struct otx2_flow *flow;
671 	bool new = false;
672 	u32 ring;
673 	int err;
674 
675 	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
676 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
677 		return -ENOMEM;
678 
679 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
680 		return -EINVAL;
681 
682 	if (fsp->location >= flow_cfg->ntuple_max_flows)
683 		return -EINVAL;
684 
685 	flow = otx2_find_flow(pfvf, fsp->location);
686 	if (!flow) {
687 		flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
688 		if (!flow)
689 			return -ENOMEM;
690 		flow->location = fsp->location;
691 		flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
692 						flow->location];
693 		new = true;
694 	}
695 	/* struct copy */
696 	flow->flow_spec = *fsp;
697 
698 	if (fsp->flow_type & FLOW_RSS)
699 		flow->rss_ctx_id = nfc->rss_context;
700 
701 	err = otx2_add_flow_msg(pfvf, flow);
702 	if (err) {
703 		if (new)
704 			kfree(flow);
705 		return err;
706 	}
707 
708 	/* add the new flow installed to list */
709 	if (new) {
710 		otx2_add_flow_to_list(pfvf, flow);
711 		flow_cfg->nr_flows++;
712 	}
713 
714 	return 0;
715 }
716 
717 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
718 {
719 	struct npc_delete_flow_req *req;
720 	int err;
721 
722 	mutex_lock(&pfvf->mbox.lock);
723 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
724 	if (!req) {
725 		mutex_unlock(&pfvf->mbox.lock);
726 		return -ENOMEM;
727 	}
728 
729 	req->entry = entry;
730 	if (all)
731 		req->all = 1;
732 
733 	/* Send message to AF */
734 	err = otx2_sync_mbox_msg(&pfvf->mbox);
735 	mutex_unlock(&pfvf->mbox.lock);
736 	return err;
737 }
738 
739 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
740 {
741 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
742 	struct otx2_flow *flow;
743 	int err;
744 
745 	if (location >= flow_cfg->ntuple_max_flows)
746 		return -EINVAL;
747 
748 	flow = otx2_find_flow(pfvf, location);
749 	if (!flow)
750 		return -ENOENT;
751 
752 	err = otx2_remove_flow_msg(pfvf, flow->entry, false);
753 	if (err)
754 		return err;
755 
756 	list_del(&flow->list);
757 	kfree(flow);
758 	flow_cfg->nr_flows--;
759 
760 	return 0;
761 }
762 
763 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
764 {
765 	struct otx2_flow *flow, *tmp;
766 	int err;
767 
768 	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
769 		if (flow->rss_ctx_id != ctx_id)
770 			continue;
771 		err = otx2_remove_flow(pfvf, flow->location);
772 		if (err)
773 			netdev_warn(pfvf->netdev,
774 				    "Can't delete the rule %d associated with this rss group err:%d",
775 				    flow->location, err);
776 	}
777 }
778 
779 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
780 {
781 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
782 	struct npc_delete_flow_req *req;
783 	struct otx2_flow *iter, *tmp;
784 	int err;
785 
786 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
787 		return 0;
788 
789 	mutex_lock(&pfvf->mbox.lock);
790 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
791 	if (!req) {
792 		mutex_unlock(&pfvf->mbox.lock);
793 		return -ENOMEM;
794 	}
795 
796 	req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
797 	req->end   = flow_cfg->entry[flow_cfg->ntuple_offset +
798 				      flow_cfg->ntuple_max_flows - 1];
799 	err = otx2_sync_mbox_msg(&pfvf->mbox);
800 	mutex_unlock(&pfvf->mbox.lock);
801 
802 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
803 		list_del(&iter->list);
804 		kfree(iter);
805 		flow_cfg->nr_flows--;
806 	}
807 	return err;
808 }
809 
810 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
811 {
812 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
813 	struct npc_mcam_free_entry_req *req;
814 	struct otx2_flow *iter, *tmp;
815 	int err;
816 
817 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
818 		return 0;
819 
820 	/* remove all flows */
821 	err = otx2_remove_flow_msg(pfvf, 0, true);
822 	if (err)
823 		return err;
824 
825 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
826 		list_del(&iter->list);
827 		kfree(iter);
828 		flow_cfg->nr_flows--;
829 	}
830 
831 	mutex_lock(&pfvf->mbox.lock);
832 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
833 	if (!req) {
834 		mutex_unlock(&pfvf->mbox.lock);
835 		return -ENOMEM;
836 	}
837 
838 	req->all = 1;
839 	/* Send message to AF to free MCAM entries */
840 	err = otx2_sync_mbox_msg(&pfvf->mbox);
841 	if (err) {
842 		mutex_unlock(&pfvf->mbox.lock);
843 		return err;
844 	}
845 
846 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
847 	mutex_unlock(&pfvf->mbox.lock);
848 
849 	return 0;
850 }
851 
852 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
853 {
854 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
855 	struct npc_install_flow_req *req;
856 	int err;
857 
858 	mutex_lock(&pfvf->mbox.lock);
859 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
860 	if (!req) {
861 		mutex_unlock(&pfvf->mbox.lock);
862 		return -ENOMEM;
863 	}
864 
865 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
866 	req->intf = NIX_INTF_RX;
867 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
868 	eth_broadcast_addr((u8 *)&req->mask.dmac);
869 	req->channel = pfvf->hw.rx_chan_base;
870 	req->op = NIX_RX_ACTION_DEFAULT;
871 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
872 	req->vtag0_valid = true;
873 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
874 
875 	/* Send message to AF */
876 	err = otx2_sync_mbox_msg(&pfvf->mbox);
877 	mutex_unlock(&pfvf->mbox.lock);
878 	return err;
879 }
880 
881 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
882 {
883 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
884 	struct npc_delete_flow_req *req;
885 	int err;
886 
887 	mutex_lock(&pfvf->mbox.lock);
888 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
889 	if (!req) {
890 		mutex_unlock(&pfvf->mbox.lock);
891 		return -ENOMEM;
892 	}
893 
894 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
895 	/* Send message to AF */
896 	err = otx2_sync_mbox_msg(&pfvf->mbox);
897 	mutex_unlock(&pfvf->mbox.lock);
898 	return err;
899 }
900 
901 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
902 {
903 	struct nix_vtag_config *req;
904 	struct mbox_msghdr *rsp_hdr;
905 	int err;
906 
907 	/* Dont have enough mcam entries */
908 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
909 		return -ENOMEM;
910 
911 	if (enable) {
912 		err = otx2_install_rxvlan_offload_flow(pf);
913 		if (err)
914 			return err;
915 	} else {
916 		err = otx2_delete_rxvlan_offload_flow(pf);
917 		if (err)
918 			return err;
919 	}
920 
921 	mutex_lock(&pf->mbox.lock);
922 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
923 	if (!req) {
924 		mutex_unlock(&pf->mbox.lock);
925 		return -ENOMEM;
926 	}
927 
928 	/* config strip, capture and size */
929 	req->vtag_size = VTAGSIZE_T4;
930 	req->cfg_type = 1; /* rx vlan cfg */
931 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
932 	req->rx.strip_vtag = enable;
933 	req->rx.capture_vtag = enable;
934 
935 	err = otx2_sync_mbox_msg(&pf->mbox);
936 	if (err) {
937 		mutex_unlock(&pf->mbox.lock);
938 		return err;
939 	}
940 
941 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
942 	if (IS_ERR(rsp_hdr)) {
943 		mutex_unlock(&pf->mbox.lock);
944 		return PTR_ERR(rsp_hdr);
945 	}
946 
947 	mutex_unlock(&pf->mbox.lock);
948 	return rsp_hdr->rc;
949 }
950