1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <net/ipv6.h>
9 #include <linux/sort.h>
10
11 #include "otx2_common.h"
12
13 #define OTX2_DEFAULT_ACTION 0x1
14
15 static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
16
17 struct otx2_flow {
18 struct ethtool_rx_flow_spec flow_spec;
19 struct list_head list;
20 u32 location;
21 u32 entry;
22 bool is_vf;
23 u8 rss_ctx_id;
24 #define DMAC_FILTER_RULE BIT(0)
25 #define PFC_FLOWCTRL_RULE BIT(1)
26 u16 rule_type;
27 int vf;
28 };
29
30 enum dmac_req {
31 DMAC_ADDR_UPDATE,
32 DMAC_ADDR_DEL
33 };
34
otx2_clear_ntuple_flow_info(struct otx2_nic * pfvf,struct otx2_flow_config * flow_cfg)35 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
36 {
37 devm_kfree(pfvf->dev, flow_cfg->flow_ent);
38 flow_cfg->flow_ent = NULL;
39 flow_cfg->max_flows = 0;
40 }
41
otx2_free_ntuple_mcam_entries(struct otx2_nic * pfvf)42 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
43 {
44 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
45 struct npc_mcam_free_entry_req *req;
46 int ent, err;
47
48 if (!flow_cfg->max_flows)
49 return 0;
50
51 mutex_lock(&pfvf->mbox.lock);
52 for (ent = 0; ent < flow_cfg->max_flows; ent++) {
53 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
54 if (!req)
55 break;
56
57 req->entry = flow_cfg->flow_ent[ent];
58
59 /* Send message to AF to free MCAM entries */
60 err = otx2_sync_mbox_msg(&pfvf->mbox);
61 if (err)
62 break;
63 }
64 mutex_unlock(&pfvf->mbox.lock);
65 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
66 return 0;
67 }
68
mcam_entry_cmp(const void * a,const void * b)69 static int mcam_entry_cmp(const void *a, const void *b)
70 {
71 return *(u16 *)a - *(u16 *)b;
72 }
73
otx2_alloc_mcam_entries(struct otx2_nic * pfvf,u16 count)74 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
75 {
76 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
77 struct npc_mcam_alloc_entry_req *req;
78 struct npc_mcam_alloc_entry_rsp *rsp;
79 int ent, allocated = 0;
80
81 /* Free current ones and allocate new ones with requested count */
82 otx2_free_ntuple_mcam_entries(pfvf);
83
84 if (!count)
85 return 0;
86
87 flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
88 sizeof(u16), GFP_KERNEL);
89 if (!flow_cfg->flow_ent) {
90 netdev_err(pfvf->netdev,
91 "%s: Unable to allocate memory for flow entries\n",
92 __func__);
93 return -ENOMEM;
94 }
95
96 mutex_lock(&pfvf->mbox.lock);
97
98 /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
99 * can only be allocated.
100 */
101 while (allocated < count) {
102 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
103 if (!req)
104 goto exit;
105
106 req->contig = false;
107 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
108 NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
109
110 /* Allocate higher priority entries for PFs, so that VF's entries
111 * will be on top of PF.
112 */
113 if (!is_otx2_vf(pfvf->pcifunc)) {
114 req->priority = NPC_MCAM_HIGHER_PRIO;
115 req->ref_entry = flow_cfg->def_ent[0];
116 }
117
118 /* Send message to AF */
119 if (otx2_sync_mbox_msg(&pfvf->mbox))
120 goto exit;
121
122 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
123 (&pfvf->mbox.mbox, 0, &req->hdr);
124 if (IS_ERR(rsp))
125 goto exit;
126
127 for (ent = 0; ent < rsp->count; ent++)
128 flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
129
130 allocated += rsp->count;
131
132 /* If this request is not fulfilled, no need to send
133 * further requests.
134 */
135 if (rsp->count != req->count)
136 break;
137 }
138
139 /* Multiple MCAM entry alloc requests could result in non-sequential
140 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
141 * otherwise user installed ntuple filter index and MCAM entry index will
142 * not be in sync.
143 */
144 if (allocated)
145 sort(&flow_cfg->flow_ent[0], allocated,
146 sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
147
148 exit:
149 mutex_unlock(&pfvf->mbox.lock);
150
151 flow_cfg->max_flows = allocated;
152
153 if (allocated) {
154 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
155 pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
156 }
157
158 if (allocated != count)
159 netdev_info(pfvf->netdev,
160 "Unable to allocate %d MCAM entries, got only %d\n",
161 count, allocated);
162 return allocated;
163 }
164 EXPORT_SYMBOL(otx2_alloc_mcam_entries);
165
otx2_mcam_entry_init(struct otx2_nic * pfvf)166 static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
167 {
168 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
169 struct npc_get_field_status_req *freq;
170 struct npc_get_field_status_rsp *frsp;
171 struct npc_mcam_alloc_entry_req *req;
172 struct npc_mcam_alloc_entry_rsp *rsp;
173 int vf_vlan_max_flows;
174 int ent, count;
175
176 vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
177 count = OTX2_MAX_UNICAST_FLOWS +
178 OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
179
180 flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
181 sizeof(u16), GFP_KERNEL);
182 if (!flow_cfg->def_ent)
183 return -ENOMEM;
184
185 mutex_lock(&pfvf->mbox.lock);
186
187 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
188 if (!req) {
189 mutex_unlock(&pfvf->mbox.lock);
190 return -ENOMEM;
191 }
192
193 req->contig = false;
194 req->count = count;
195
196 /* Send message to AF */
197 if (otx2_sync_mbox_msg(&pfvf->mbox)) {
198 mutex_unlock(&pfvf->mbox.lock);
199 return -EINVAL;
200 }
201
202 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
203 (&pfvf->mbox.mbox, 0, &req->hdr);
204 if (IS_ERR(rsp)) {
205 mutex_unlock(&pfvf->mbox.lock);
206 return PTR_ERR(rsp);
207 }
208
209 if (rsp->count != req->count) {
210 netdev_info(pfvf->netdev,
211 "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
212 mutex_unlock(&pfvf->mbox.lock);
213 devm_kfree(pfvf->dev, flow_cfg->def_ent);
214 return 0;
215 }
216
217 for (ent = 0; ent < rsp->count; ent++)
218 flow_cfg->def_ent[ent] = rsp->entry_list[ent];
219
220 flow_cfg->vf_vlan_offset = 0;
221 flow_cfg->unicast_offset = vf_vlan_max_flows;
222 flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
223 OTX2_MAX_UNICAST_FLOWS;
224 pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
225
226 /* Check if NPC_DMAC field is supported
227 * by the mkex profile before setting VLAN support flag.
228 */
229 freq = otx2_mbox_alloc_msg_npc_get_field_status(&pfvf->mbox);
230 if (!freq) {
231 mutex_unlock(&pfvf->mbox.lock);
232 return -ENOMEM;
233 }
234
235 freq->field = NPC_DMAC;
236 if (otx2_sync_mbox_msg(&pfvf->mbox)) {
237 mutex_unlock(&pfvf->mbox.lock);
238 return -EINVAL;
239 }
240
241 frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
242 (&pfvf->mbox.mbox, 0, &freq->hdr);
243 if (IS_ERR(frsp)) {
244 mutex_unlock(&pfvf->mbox.lock);
245 return PTR_ERR(frsp);
246 }
247
248 if (frsp->enable) {
249 pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
250 pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
251 }
252
253 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
254 mutex_unlock(&pfvf->mbox.lock);
255
256 /* Allocate entries for Ntuple filters */
257 count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
258 if (count <= 0) {
259 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
260 return 0;
261 }
262
263 pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
264
265 return 0;
266 }
267
268 /* TODO : revisit on size */
269 #define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
270
otx2vf_mcam_flow_init(struct otx2_nic * pfvf)271 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
272 {
273 struct otx2_flow_config *flow_cfg;
274
275 pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
276 sizeof(struct otx2_flow_config),
277 GFP_KERNEL);
278 if (!pfvf->flow_cfg)
279 return -ENOMEM;
280
281 pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev,
282 BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
283 sizeof(long), GFP_KERNEL);
284 if (!pfvf->flow_cfg->dmacflt_bmap)
285 return -ENOMEM;
286
287 flow_cfg = pfvf->flow_cfg;
288 INIT_LIST_HEAD(&flow_cfg->flow_list);
289 INIT_LIST_HEAD(&flow_cfg->flow_list_tc);
290 flow_cfg->max_flows = 0;
291
292 return 0;
293 }
294 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
295
otx2_mcam_flow_init(struct otx2_nic * pf)296 int otx2_mcam_flow_init(struct otx2_nic *pf)
297 {
298 int err;
299
300 pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
301 GFP_KERNEL);
302 if (!pf->flow_cfg)
303 return -ENOMEM;
304
305 pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev,
306 BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
307 sizeof(long), GFP_KERNEL);
308 if (!pf->flow_cfg->dmacflt_bmap)
309 return -ENOMEM;
310
311 INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
312 INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
313
314 /* Allocate bare minimum number of MCAM entries needed for
315 * unicast and ntuple filters.
316 */
317 err = otx2_mcam_entry_init(pf);
318 if (err)
319 return err;
320
321 /* Check if MCAM entries are allocate or not */
322 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
323 return 0;
324
325 pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
326 * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
327 if (!pf->mac_table)
328 return -ENOMEM;
329
330 otx2_dmacflt_get_max_cnt(pf);
331
332 /* DMAC filters are not allocated */
333 if (!pf->flow_cfg->dmacflt_max_flows)
334 return 0;
335
336 pf->flow_cfg->bmap_to_dmacindex =
337 devm_kzalloc(pf->dev, sizeof(u32) *
338 pf->flow_cfg->dmacflt_max_flows,
339 GFP_KERNEL);
340
341 if (!pf->flow_cfg->bmap_to_dmacindex)
342 return -ENOMEM;
343
344 pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
345
346 return 0;
347 }
348
otx2_mcam_flow_del(struct otx2_nic * pf)349 void otx2_mcam_flow_del(struct otx2_nic *pf)
350 {
351 otx2_destroy_mcam_flows(pf);
352 }
353 EXPORT_SYMBOL(otx2_mcam_flow_del);
354
355 /* On success adds mcam entry
356 * On failure enable promisous mode
357 */
otx2_do_add_macfilter(struct otx2_nic * pf,const u8 * mac)358 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
359 {
360 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
361 struct npc_install_flow_req *req;
362 int err, i;
363
364 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
365 return -ENOMEM;
366
367 /* dont have free mcam entries or uc list is greater than alloted */
368 if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
369 return -ENOMEM;
370
371 mutex_lock(&pf->mbox.lock);
372 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
373 if (!req) {
374 mutex_unlock(&pf->mbox.lock);
375 return -ENOMEM;
376 }
377
378 /* unicast offset starts with 32 0..31 for ntuple */
379 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
380 if (pf->mac_table[i].inuse)
381 continue;
382 ether_addr_copy(pf->mac_table[i].addr, mac);
383 pf->mac_table[i].inuse = true;
384 pf->mac_table[i].mcam_entry =
385 flow_cfg->def_ent[i + flow_cfg->unicast_offset];
386 req->entry = pf->mac_table[i].mcam_entry;
387 break;
388 }
389
390 ether_addr_copy(req->packet.dmac, mac);
391 eth_broadcast_addr((u8 *)&req->mask.dmac);
392 req->features = BIT_ULL(NPC_DMAC);
393 req->channel = pf->hw.rx_chan_base;
394 req->intf = NIX_INTF_RX;
395 req->op = NIX_RX_ACTION_DEFAULT;
396 req->set_cntr = 1;
397
398 err = otx2_sync_mbox_msg(&pf->mbox);
399 mutex_unlock(&pf->mbox.lock);
400
401 return err;
402 }
403
otx2_add_macfilter(struct net_device * netdev,const u8 * mac)404 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
405 {
406 struct otx2_nic *pf = netdev_priv(netdev);
407
408 if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap,
409 pf->flow_cfg->dmacflt_max_flows))
410 netdev_warn(netdev,
411 "Add %pM to CGX/RPM DMAC filters list as well\n",
412 mac);
413
414 return otx2_do_add_macfilter(pf, mac);
415 }
416
otx2_get_mcamentry_for_mac(struct otx2_nic * pf,const u8 * mac,int * mcam_entry)417 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
418 int *mcam_entry)
419 {
420 int i;
421
422 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
423 if (!pf->mac_table[i].inuse)
424 continue;
425
426 if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
427 *mcam_entry = pf->mac_table[i].mcam_entry;
428 pf->mac_table[i].inuse = false;
429 return true;
430 }
431 }
432 return false;
433 }
434
otx2_del_macfilter(struct net_device * netdev,const u8 * mac)435 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
436 {
437 struct otx2_nic *pf = netdev_priv(netdev);
438 struct npc_delete_flow_req *req;
439 int err, mcam_entry;
440
441 /* check does mcam entry exists for given mac */
442 if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
443 return 0;
444
445 mutex_lock(&pf->mbox.lock);
446 req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
447 if (!req) {
448 mutex_unlock(&pf->mbox.lock);
449 return -ENOMEM;
450 }
451 req->entry = mcam_entry;
452 /* Send message to AF */
453 err = otx2_sync_mbox_msg(&pf->mbox);
454 mutex_unlock(&pf->mbox.lock);
455
456 return err;
457 }
458
otx2_find_flow(struct otx2_nic * pfvf,u32 location)459 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
460 {
461 struct otx2_flow *iter;
462
463 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
464 if (iter->location == location)
465 return iter;
466 }
467
468 return NULL;
469 }
470
otx2_add_flow_to_list(struct otx2_nic * pfvf,struct otx2_flow * flow)471 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
472 {
473 struct list_head *head = &pfvf->flow_cfg->flow_list;
474 struct otx2_flow *iter;
475
476 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
477 if (iter->location > flow->location)
478 break;
479 head = &iter->list;
480 }
481
482 list_add(&flow->list, head);
483 }
484
otx2_get_maxflows(struct otx2_flow_config * flow_cfg)485 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
486 {
487 if (!flow_cfg)
488 return 0;
489
490 if (flow_cfg->nr_flows == flow_cfg->max_flows ||
491 !bitmap_empty(flow_cfg->dmacflt_bmap,
492 flow_cfg->dmacflt_max_flows))
493 return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
494 else
495 return flow_cfg->max_flows;
496 }
497 EXPORT_SYMBOL(otx2_get_maxflows);
498
otx2_get_flow(struct otx2_nic * pfvf,struct ethtool_rxnfc * nfc,u32 location)499 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
500 u32 location)
501 {
502 struct otx2_flow *iter;
503
504 if (location >= otx2_get_maxflows(pfvf->flow_cfg))
505 return -EINVAL;
506
507 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
508 if (iter->location == location) {
509 nfc->fs = iter->flow_spec;
510 nfc->rss_context = iter->rss_ctx_id;
511 return 0;
512 }
513 }
514
515 return -ENOENT;
516 }
517
otx2_get_all_flows(struct otx2_nic * pfvf,struct ethtool_rxnfc * nfc,u32 * rule_locs)518 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
519 u32 *rule_locs)
520 {
521 u32 rule_cnt = nfc->rule_cnt;
522 u32 location = 0;
523 int idx = 0;
524 int err = 0;
525
526 nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
527 while ((!err || err == -ENOENT) && idx < rule_cnt) {
528 err = otx2_get_flow(pfvf, nfc, location);
529 if (!err)
530 rule_locs[idx++] = location;
531 location++;
532 }
533 nfc->rule_cnt = rule_cnt;
534
535 return err;
536 }
537
otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec * fsp,struct npc_install_flow_req * req,u32 flow_type)538 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
539 struct npc_install_flow_req *req,
540 u32 flow_type)
541 {
542 struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
543 struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
544 struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
545 struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
546 struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
547 struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
548 struct flow_msg *pmask = &req->mask;
549 struct flow_msg *pkt = &req->packet;
550
551 switch (flow_type) {
552 case IP_USER_FLOW:
553 if (ipv4_usr_mask->ip4src) {
554 memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
555 sizeof(pkt->ip4src));
556 memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
557 sizeof(pmask->ip4src));
558 req->features |= BIT_ULL(NPC_SIP_IPV4);
559 }
560 if (ipv4_usr_mask->ip4dst) {
561 memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
562 sizeof(pkt->ip4dst));
563 memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
564 sizeof(pmask->ip4dst));
565 req->features |= BIT_ULL(NPC_DIP_IPV4);
566 }
567 if (ipv4_usr_mask->tos) {
568 pkt->tos = ipv4_usr_hdr->tos;
569 pmask->tos = ipv4_usr_mask->tos;
570 req->features |= BIT_ULL(NPC_TOS);
571 }
572 if (ipv4_usr_mask->proto) {
573 switch (ipv4_usr_hdr->proto) {
574 case IPPROTO_ICMP:
575 req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
576 break;
577 case IPPROTO_TCP:
578 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
579 break;
580 case IPPROTO_UDP:
581 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
582 break;
583 case IPPROTO_SCTP:
584 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
585 break;
586 case IPPROTO_AH:
587 req->features |= BIT_ULL(NPC_IPPROTO_AH);
588 break;
589 case IPPROTO_ESP:
590 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
591 break;
592 default:
593 return -EOPNOTSUPP;
594 }
595 }
596 pkt->etype = cpu_to_be16(ETH_P_IP);
597 pmask->etype = cpu_to_be16(0xFFFF);
598 req->features |= BIT_ULL(NPC_ETYPE);
599 break;
600 case TCP_V4_FLOW:
601 case UDP_V4_FLOW:
602 case SCTP_V4_FLOW:
603 pkt->etype = cpu_to_be16(ETH_P_IP);
604 pmask->etype = cpu_to_be16(0xFFFF);
605 req->features |= BIT_ULL(NPC_ETYPE);
606 if (ipv4_l4_mask->ip4src) {
607 memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
608 sizeof(pkt->ip4src));
609 memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
610 sizeof(pmask->ip4src));
611 req->features |= BIT_ULL(NPC_SIP_IPV4);
612 }
613 if (ipv4_l4_mask->ip4dst) {
614 memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
615 sizeof(pkt->ip4dst));
616 memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
617 sizeof(pmask->ip4dst));
618 req->features |= BIT_ULL(NPC_DIP_IPV4);
619 }
620 if (ipv4_l4_mask->tos) {
621 pkt->tos = ipv4_l4_hdr->tos;
622 pmask->tos = ipv4_l4_mask->tos;
623 req->features |= BIT_ULL(NPC_TOS);
624 }
625 if (ipv4_l4_mask->psrc) {
626 memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
627 sizeof(pkt->sport));
628 memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
629 sizeof(pmask->sport));
630 if (flow_type == UDP_V4_FLOW)
631 req->features |= BIT_ULL(NPC_SPORT_UDP);
632 else if (flow_type == TCP_V4_FLOW)
633 req->features |= BIT_ULL(NPC_SPORT_TCP);
634 else
635 req->features |= BIT_ULL(NPC_SPORT_SCTP);
636 }
637 if (ipv4_l4_mask->pdst) {
638 memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
639 sizeof(pkt->dport));
640 memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
641 sizeof(pmask->dport));
642 if (flow_type == UDP_V4_FLOW)
643 req->features |= BIT_ULL(NPC_DPORT_UDP);
644 else if (flow_type == TCP_V4_FLOW)
645 req->features |= BIT_ULL(NPC_DPORT_TCP);
646 else
647 req->features |= BIT_ULL(NPC_DPORT_SCTP);
648 }
649 if (flow_type == UDP_V4_FLOW)
650 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
651 else if (flow_type == TCP_V4_FLOW)
652 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
653 else
654 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
655 break;
656 case AH_V4_FLOW:
657 case ESP_V4_FLOW:
658 pkt->etype = cpu_to_be16(ETH_P_IP);
659 pmask->etype = cpu_to_be16(0xFFFF);
660 req->features |= BIT_ULL(NPC_ETYPE);
661 if (ah_esp_mask->ip4src) {
662 memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
663 sizeof(pkt->ip4src));
664 memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
665 sizeof(pmask->ip4src));
666 req->features |= BIT_ULL(NPC_SIP_IPV4);
667 }
668 if (ah_esp_mask->ip4dst) {
669 memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
670 sizeof(pkt->ip4dst));
671 memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
672 sizeof(pmask->ip4dst));
673 req->features |= BIT_ULL(NPC_DIP_IPV4);
674 }
675 if (ah_esp_mask->tos) {
676 pkt->tos = ah_esp_hdr->tos;
677 pmask->tos = ah_esp_mask->tos;
678 req->features |= BIT_ULL(NPC_TOS);
679 }
680
681 /* NPC profile doesn't extract AH/ESP header fields */
682 if (ah_esp_mask->spi & ah_esp_hdr->spi)
683 return -EOPNOTSUPP;
684
685 if (flow_type == AH_V4_FLOW)
686 req->features |= BIT_ULL(NPC_IPPROTO_AH);
687 else
688 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
689 break;
690 default:
691 break;
692 }
693
694 return 0;
695 }
696
otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec * fsp,struct npc_install_flow_req * req,u32 flow_type)697 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
698 struct npc_install_flow_req *req,
699 u32 flow_type)
700 {
701 struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
702 struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
703 struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
704 struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
705 struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
706 struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
707 struct flow_msg *pmask = &req->mask;
708 struct flow_msg *pkt = &req->packet;
709
710 switch (flow_type) {
711 case IPV6_USER_FLOW:
712 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
713 memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
714 sizeof(pkt->ip6src));
715 memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
716 sizeof(pmask->ip6src));
717 req->features |= BIT_ULL(NPC_SIP_IPV6);
718 }
719 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
720 memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
721 sizeof(pkt->ip6dst));
722 memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
723 sizeof(pmask->ip6dst));
724 req->features |= BIT_ULL(NPC_DIP_IPV6);
725 }
726 if (ipv6_usr_hdr->l4_proto == IPPROTO_FRAGMENT) {
727 pkt->next_header = ipv6_usr_hdr->l4_proto;
728 pmask->next_header = ipv6_usr_mask->l4_proto;
729 req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
730 }
731 pkt->etype = cpu_to_be16(ETH_P_IPV6);
732 pmask->etype = cpu_to_be16(0xFFFF);
733 req->features |= BIT_ULL(NPC_ETYPE);
734 break;
735 case TCP_V6_FLOW:
736 case UDP_V6_FLOW:
737 case SCTP_V6_FLOW:
738 pkt->etype = cpu_to_be16(ETH_P_IPV6);
739 pmask->etype = cpu_to_be16(0xFFFF);
740 req->features |= BIT_ULL(NPC_ETYPE);
741 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
742 memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
743 sizeof(pkt->ip6src));
744 memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
745 sizeof(pmask->ip6src));
746 req->features |= BIT_ULL(NPC_SIP_IPV6);
747 }
748 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
749 memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
750 sizeof(pkt->ip6dst));
751 memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
752 sizeof(pmask->ip6dst));
753 req->features |= BIT_ULL(NPC_DIP_IPV6);
754 }
755 if (ipv6_l4_mask->psrc) {
756 memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
757 sizeof(pkt->sport));
758 memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
759 sizeof(pmask->sport));
760 if (flow_type == UDP_V6_FLOW)
761 req->features |= BIT_ULL(NPC_SPORT_UDP);
762 else if (flow_type == TCP_V6_FLOW)
763 req->features |= BIT_ULL(NPC_SPORT_TCP);
764 else
765 req->features |= BIT_ULL(NPC_SPORT_SCTP);
766 }
767 if (ipv6_l4_mask->pdst) {
768 memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
769 sizeof(pkt->dport));
770 memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
771 sizeof(pmask->dport));
772 if (flow_type == UDP_V6_FLOW)
773 req->features |= BIT_ULL(NPC_DPORT_UDP);
774 else if (flow_type == TCP_V6_FLOW)
775 req->features |= BIT_ULL(NPC_DPORT_TCP);
776 else
777 req->features |= BIT_ULL(NPC_DPORT_SCTP);
778 }
779 if (flow_type == UDP_V6_FLOW)
780 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
781 else if (flow_type == TCP_V6_FLOW)
782 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
783 else
784 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
785 break;
786 case AH_V6_FLOW:
787 case ESP_V6_FLOW:
788 pkt->etype = cpu_to_be16(ETH_P_IPV6);
789 pmask->etype = cpu_to_be16(0xFFFF);
790 req->features |= BIT_ULL(NPC_ETYPE);
791 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
792 memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
793 sizeof(pkt->ip6src));
794 memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
795 sizeof(pmask->ip6src));
796 req->features |= BIT_ULL(NPC_SIP_IPV6);
797 }
798 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
799 memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
800 sizeof(pkt->ip6dst));
801 memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
802 sizeof(pmask->ip6dst));
803 req->features |= BIT_ULL(NPC_DIP_IPV6);
804 }
805
806 /* NPC profile doesn't extract AH/ESP header fields */
807 if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
808 (ah_esp_mask->tclass & ah_esp_hdr->tclass))
809 return -EOPNOTSUPP;
810
811 if (flow_type == AH_V6_FLOW)
812 req->features |= BIT_ULL(NPC_IPPROTO_AH);
813 else
814 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
815 break;
816 default:
817 break;
818 }
819
820 return 0;
821 }
822
otx2_prepare_flow_request(struct ethtool_rx_flow_spec * fsp,struct npc_install_flow_req * req)823 static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
824 struct npc_install_flow_req *req)
825 {
826 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
827 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
828 struct flow_msg *pmask = &req->mask;
829 struct flow_msg *pkt = &req->packet;
830 u32 flow_type;
831 int ret;
832
833 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
834 switch (flow_type) {
835 /* bits not set in mask are don't care */
836 case ETHER_FLOW:
837 if (!is_zero_ether_addr(eth_mask->h_source)) {
838 ether_addr_copy(pkt->smac, eth_hdr->h_source);
839 ether_addr_copy(pmask->smac, eth_mask->h_source);
840 req->features |= BIT_ULL(NPC_SMAC);
841 }
842 if (!is_zero_ether_addr(eth_mask->h_dest)) {
843 ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
844 ether_addr_copy(pmask->dmac, eth_mask->h_dest);
845 req->features |= BIT_ULL(NPC_DMAC);
846 }
847 if (eth_hdr->h_proto) {
848 memcpy(&pkt->etype, ð_hdr->h_proto,
849 sizeof(pkt->etype));
850 memcpy(&pmask->etype, ð_mask->h_proto,
851 sizeof(pmask->etype));
852 req->features |= BIT_ULL(NPC_ETYPE);
853 }
854 break;
855 case IP_USER_FLOW:
856 case TCP_V4_FLOW:
857 case UDP_V4_FLOW:
858 case SCTP_V4_FLOW:
859 case AH_V4_FLOW:
860 case ESP_V4_FLOW:
861 ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
862 if (ret)
863 return ret;
864 break;
865 case IPV6_USER_FLOW:
866 case TCP_V6_FLOW:
867 case UDP_V6_FLOW:
868 case SCTP_V6_FLOW:
869 case AH_V6_FLOW:
870 case ESP_V6_FLOW:
871 ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
872 if (ret)
873 return ret;
874 break;
875 default:
876 return -EOPNOTSUPP;
877 }
878 if (fsp->flow_type & FLOW_EXT) {
879 u16 vlan_etype;
880
881 if (fsp->m_ext.vlan_etype) {
882 /* Partial masks not supported */
883 if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF)
884 return -EINVAL;
885
886 vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
887
888 /* Drop rule with vlan_etype == 802.1Q
889 * and vlan_id == 0 is not supported
890 */
891 if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci &&
892 fsp->ring_cookie == RX_CLS_FLOW_DISC)
893 return -EINVAL;
894
895 /* Only ETH_P_8021Q and ETH_P_802AD types supported */
896 if (vlan_etype != ETH_P_8021Q &&
897 vlan_etype != ETH_P_8021AD)
898 return -EINVAL;
899
900 memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype,
901 sizeof(pkt->vlan_etype));
902 memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype,
903 sizeof(pmask->vlan_etype));
904
905 if (vlan_etype == ETH_P_8021Q)
906 req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG);
907 else
908 req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG);
909 }
910
911 if (fsp->m_ext.vlan_tci) {
912 memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
913 sizeof(pkt->vlan_tci));
914 memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
915 sizeof(pmask->vlan_tci));
916 req->features |= BIT_ULL(NPC_OUTER_VID);
917 }
918
919 if (fsp->m_ext.data[1]) {
920 if (flow_type == IP_USER_FLOW) {
921 if (be32_to_cpu(fsp->h_ext.data[1]) != IPV4_FLAG_MORE)
922 return -EINVAL;
923
924 pkt->ip_flag = be32_to_cpu(fsp->h_ext.data[1]);
925 pmask->ip_flag = be32_to_cpu(fsp->m_ext.data[1]);
926 req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
927 } else if (fsp->h_ext.data[1] ==
928 cpu_to_be32(OTX2_DEFAULT_ACTION)) {
929 /* Not Drop/Direct to queue but use action
930 * in default entry
931 */
932 req->op = NIX_RX_ACTION_DEFAULT;
933 }
934 }
935 }
936
937 if (fsp->flow_type & FLOW_MAC_EXT &&
938 !is_zero_ether_addr(fsp->m_ext.h_dest)) {
939 ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
940 ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
941 req->features |= BIT_ULL(NPC_DMAC);
942 }
943
944 if (!req->features)
945 return -EOPNOTSUPP;
946
947 return 0;
948 }
949
otx2_is_flow_rule_dmacfilter(struct otx2_nic * pfvf,struct ethtool_rx_flow_spec * fsp)950 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
951 struct ethtool_rx_flow_spec *fsp)
952 {
953 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
954 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
955 u64 ring_cookie = fsp->ring_cookie;
956 u32 flow_type;
957
958 if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
959 return false;
960
961 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
962
963 /* CGX/RPM block dmac filtering configured for white listing
964 * check for action other than DROP
965 */
966 if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
967 !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
968 if (is_zero_ether_addr(eth_mask->h_dest) &&
969 is_valid_ether_addr(eth_hdr->h_dest))
970 return true;
971 }
972
973 return false;
974 }
975
otx2_add_flow_msg(struct otx2_nic * pfvf,struct otx2_flow * flow)976 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
977 {
978 u64 ring_cookie = flow->flow_spec.ring_cookie;
979 #ifdef CONFIG_DCB
980 int vlan_prio, qidx, pfc_rule = 0;
981 #endif
982 struct npc_install_flow_req *req;
983 int err, vf = 0;
984
985 mutex_lock(&pfvf->mbox.lock);
986 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
987 if (!req) {
988 mutex_unlock(&pfvf->mbox.lock);
989 return -ENOMEM;
990 }
991
992 err = otx2_prepare_flow_request(&flow->flow_spec, req);
993 if (err) {
994 /* free the allocated msg above */
995 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
996 mutex_unlock(&pfvf->mbox.lock);
997 return err;
998 }
999
1000 req->entry = flow->entry;
1001 req->intf = NIX_INTF_RX;
1002 req->set_cntr = 1;
1003 req->channel = pfvf->hw.rx_chan_base;
1004 if (ring_cookie == RX_CLS_FLOW_DISC) {
1005 req->op = NIX_RX_ACTIONOP_DROP;
1006 } else {
1007 /* change to unicast only if action of default entry is not
1008 * requested by user
1009 */
1010 if (flow->flow_spec.flow_type & FLOW_RSS) {
1011 req->op = NIX_RX_ACTIONOP_RSS;
1012 req->index = flow->rss_ctx_id;
1013 req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
1014 } else {
1015 req->op = NIX_RX_ACTIONOP_UCAST;
1016 req->index = ethtool_get_flow_spec_ring(ring_cookie);
1017 }
1018 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
1019 if (vf > pci_num_vf(pfvf->pdev)) {
1020 mutex_unlock(&pfvf->mbox.lock);
1021 return -EINVAL;
1022 }
1023
1024 #ifdef CONFIG_DCB
1025 /* Identify PFC rule if PFC enabled and ntuple rule is vlan */
1026 if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
1027 pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
1028 vlan_prio = ntohs(req->packet.vlan_tci) &
1029 ntohs(req->mask.vlan_tci);
1030
1031 /* Get the priority */
1032 vlan_prio >>= 13;
1033 flow->rule_type |= PFC_FLOWCTRL_RULE;
1034 /* Check if PFC enabled for this priority */
1035 if (pfvf->pfc_en & BIT(vlan_prio)) {
1036 pfc_rule = true;
1037 qidx = req->index;
1038 }
1039 }
1040 #endif
1041 }
1042
1043 /* ethtool ring_cookie has (VF + 1) for VF */
1044 if (vf) {
1045 req->vf = vf;
1046 flow->is_vf = true;
1047 flow->vf = vf;
1048 }
1049
1050 /* Send message to AF */
1051 err = otx2_sync_mbox_msg(&pfvf->mbox);
1052
1053 #ifdef CONFIG_DCB
1054 if (!err && pfc_rule)
1055 otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
1056 #endif
1057
1058 mutex_unlock(&pfvf->mbox.lock);
1059 return err;
1060 }
1061
otx2_add_flow_with_pfmac(struct otx2_nic * pfvf,struct otx2_flow * flow)1062 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
1063 struct otx2_flow *flow)
1064 {
1065 struct otx2_flow *pf_mac;
1066 struct ethhdr *eth_hdr;
1067
1068 pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
1069 if (!pf_mac)
1070 return -ENOMEM;
1071
1072 pf_mac->entry = 0;
1073 pf_mac->rule_type |= DMAC_FILTER_RULE;
1074 pf_mac->location = pfvf->flow_cfg->max_flows;
1075 memcpy(&pf_mac->flow_spec, &flow->flow_spec,
1076 sizeof(struct ethtool_rx_flow_spec));
1077 pf_mac->flow_spec.location = pf_mac->location;
1078
1079 /* Copy PF mac address */
1080 eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
1081 ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
1082
1083 /* Install DMAC filter with PF mac address */
1084 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
1085
1086 otx2_add_flow_to_list(pfvf, pf_mac);
1087 pfvf->flow_cfg->nr_flows++;
1088 set_bit(0, pfvf->flow_cfg->dmacflt_bmap);
1089
1090 return 0;
1091 }
1092
otx2_add_flow(struct otx2_nic * pfvf,struct ethtool_rxnfc * nfc)1093 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
1094 {
1095 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1096 struct ethtool_rx_flow_spec *fsp = &nfc->fs;
1097 struct otx2_flow *flow;
1098 struct ethhdr *eth_hdr;
1099 bool new = false;
1100 int err = 0;
1101 u64 vf_num;
1102 u32 ring;
1103
1104 if (!flow_cfg->max_flows) {
1105 netdev_err(pfvf->netdev,
1106 "Ntuple rule count is 0, allocate and retry\n");
1107 return -EINVAL;
1108 }
1109
1110 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
1111 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1112 return -ENOMEM;
1113
1114 /* Number of queues on a VF can be greater or less than
1115 * the PF's queue. Hence no need to check for the
1116 * queue count. Hence no need to check queue count if PF
1117 * is installing for its VF. Below is the expected vf_num value
1118 * based on the ethtool commands.
1119 *
1120 * e.g.
1121 * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255
1122 * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0
1123 * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==>
1124 * vf_num:vf_idx+1
1125 */
1126 vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
1127 if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
1128 ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
1129 return -EINVAL;
1130
1131 if (fsp->location >= otx2_get_maxflows(flow_cfg))
1132 return -EINVAL;
1133
1134 flow = otx2_find_flow(pfvf, fsp->location);
1135 if (!flow) {
1136 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
1137 if (!flow)
1138 return -ENOMEM;
1139 flow->location = fsp->location;
1140 flow->entry = flow_cfg->flow_ent[flow->location];
1141 new = true;
1142 }
1143 /* struct copy */
1144 flow->flow_spec = *fsp;
1145
1146 if (fsp->flow_type & FLOW_RSS)
1147 flow->rss_ctx_id = nfc->rss_context;
1148
1149 if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
1150 eth_hdr = &flow->flow_spec.h_u.ether_spec;
1151
1152 /* Sync dmac filter table with updated fields */
1153 if (flow->rule_type & DMAC_FILTER_RULE)
1154 return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1155 flow->entry);
1156
1157 if (bitmap_full(flow_cfg->dmacflt_bmap,
1158 flow_cfg->dmacflt_max_flows)) {
1159 netdev_warn(pfvf->netdev,
1160 "Can't insert the rule %d as max allowed dmac filters are %d\n",
1161 flow->location +
1162 flow_cfg->dmacflt_max_flows,
1163 flow_cfg->dmacflt_max_flows);
1164 err = -EINVAL;
1165 if (new)
1166 kfree(flow);
1167 return err;
1168 }
1169
1170 /* Install PF mac address to DMAC filter list */
1171 if (!test_bit(0, flow_cfg->dmacflt_bmap))
1172 otx2_add_flow_with_pfmac(pfvf, flow);
1173
1174 flow->rule_type |= DMAC_FILTER_RULE;
1175 flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap,
1176 flow_cfg->dmacflt_max_flows);
1177 fsp->location = flow_cfg->max_flows + flow->entry;
1178 flow->flow_spec.location = fsp->location;
1179 flow->location = fsp->location;
1180
1181 set_bit(flow->entry, flow_cfg->dmacflt_bmap);
1182 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1183
1184 } else {
1185 if (flow->location >= pfvf->flow_cfg->max_flows) {
1186 netdev_warn(pfvf->netdev,
1187 "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1188 flow->location,
1189 flow_cfg->max_flows - 1);
1190 err = -EINVAL;
1191 } else {
1192 err = otx2_add_flow_msg(pfvf, flow);
1193 }
1194 }
1195
1196 if (err) {
1197 if (err == MBOX_MSG_INVALID)
1198 err = -EINVAL;
1199 if (new)
1200 kfree(flow);
1201 return err;
1202 }
1203
1204 /* add the new flow installed to list */
1205 if (new) {
1206 otx2_add_flow_to_list(pfvf, flow);
1207 flow_cfg->nr_flows++;
1208 }
1209
1210 if (flow->is_vf)
1211 netdev_info(pfvf->netdev,
1212 "Make sure that VF's queue number is within its queue limit\n");
1213 return 0;
1214 }
1215
otx2_remove_flow_msg(struct otx2_nic * pfvf,u16 entry,bool all)1216 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1217 {
1218 struct npc_delete_flow_req *req;
1219 int err;
1220
1221 mutex_lock(&pfvf->mbox.lock);
1222 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1223 if (!req) {
1224 mutex_unlock(&pfvf->mbox.lock);
1225 return -ENOMEM;
1226 }
1227
1228 req->entry = entry;
1229 if (all)
1230 req->all = 1;
1231
1232 /* Send message to AF */
1233 err = otx2_sync_mbox_msg(&pfvf->mbox);
1234 mutex_unlock(&pfvf->mbox.lock);
1235 return err;
1236 }
1237
otx2_update_rem_pfmac(struct otx2_nic * pfvf,int req)1238 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1239 {
1240 struct otx2_flow *iter;
1241 struct ethhdr *eth_hdr;
1242 bool found = false;
1243
1244 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1245 if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
1246 eth_hdr = &iter->flow_spec.h_u.ether_spec;
1247 if (req == DMAC_ADDR_DEL) {
1248 otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1249 0);
1250 clear_bit(0, pfvf->flow_cfg->dmacflt_bmap);
1251 found = true;
1252 } else {
1253 ether_addr_copy(eth_hdr->h_dest,
1254 pfvf->netdev->dev_addr);
1255
1256 otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1257 }
1258 break;
1259 }
1260 }
1261
1262 if (found) {
1263 list_del(&iter->list);
1264 kfree(iter);
1265 pfvf->flow_cfg->nr_flows--;
1266 }
1267 }
1268
otx2_remove_flow(struct otx2_nic * pfvf,u32 location)1269 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1270 {
1271 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1272 struct otx2_flow *flow;
1273 int err;
1274
1275 if (location >= otx2_get_maxflows(flow_cfg))
1276 return -EINVAL;
1277
1278 flow = otx2_find_flow(pfvf, location);
1279 if (!flow)
1280 return -ENOENT;
1281
1282 if (flow->rule_type & DMAC_FILTER_RULE) {
1283 struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1284
1285 /* user not allowed to remove dmac filter with interface mac */
1286 if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1287 return -EPERM;
1288
1289 err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1290 flow->entry);
1291 clear_bit(flow->entry, flow_cfg->dmacflt_bmap);
1292 /* If all dmac filters are removed delete macfilter with
1293 * interface mac address and configure CGX/RPM block in
1294 * promiscuous mode
1295 */
1296 if (bitmap_weight(flow_cfg->dmacflt_bmap,
1297 flow_cfg->dmacflt_max_flows) == 1)
1298 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1299 } else {
1300 #ifdef CONFIG_DCB
1301 if (flow->rule_type & PFC_FLOWCTRL_RULE)
1302 otx2_update_bpid_in_rqctx(pfvf, 0,
1303 flow->flow_spec.ring_cookie,
1304 false);
1305 #endif
1306
1307 err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1308 }
1309
1310 if (err)
1311 return err;
1312
1313 list_del(&flow->list);
1314 kfree(flow);
1315 flow_cfg->nr_flows--;
1316
1317 return 0;
1318 }
1319
otx2_rss_ctx_flow_del(struct otx2_nic * pfvf,int ctx_id)1320 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1321 {
1322 struct otx2_flow *flow, *tmp;
1323 int err;
1324
1325 list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1326 if (flow->rss_ctx_id != ctx_id)
1327 continue;
1328 err = otx2_remove_flow(pfvf, flow->location);
1329 if (err)
1330 netdev_warn(pfvf->netdev,
1331 "Can't delete the rule %d associated with this rss group err:%d",
1332 flow->location, err);
1333 }
1334 }
1335
otx2_destroy_ntuple_flows(struct otx2_nic * pfvf)1336 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1337 {
1338 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1339 struct npc_delete_flow_req *req;
1340 struct otx2_flow *iter, *tmp;
1341 int err;
1342
1343 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1344 return 0;
1345
1346 if (!flow_cfg->max_flows)
1347 return 0;
1348
1349 mutex_lock(&pfvf->mbox.lock);
1350 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1351 if (!req) {
1352 mutex_unlock(&pfvf->mbox.lock);
1353 return -ENOMEM;
1354 }
1355
1356 req->start = flow_cfg->flow_ent[0];
1357 req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
1358 err = otx2_sync_mbox_msg(&pfvf->mbox);
1359 mutex_unlock(&pfvf->mbox.lock);
1360
1361 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1362 list_del(&iter->list);
1363 kfree(iter);
1364 flow_cfg->nr_flows--;
1365 }
1366 return err;
1367 }
1368
otx2_destroy_mcam_flows(struct otx2_nic * pfvf)1369 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1370 {
1371 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1372 struct npc_mcam_free_entry_req *req;
1373 struct otx2_flow *iter, *tmp;
1374 int err;
1375
1376 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1377 return 0;
1378
1379 /* remove all flows */
1380 err = otx2_remove_flow_msg(pfvf, 0, true);
1381 if (err)
1382 return err;
1383
1384 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1385 list_del(&iter->list);
1386 kfree(iter);
1387 flow_cfg->nr_flows--;
1388 }
1389
1390 mutex_lock(&pfvf->mbox.lock);
1391 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1392 if (!req) {
1393 mutex_unlock(&pfvf->mbox.lock);
1394 return -ENOMEM;
1395 }
1396
1397 req->all = 1;
1398 /* Send message to AF to free MCAM entries */
1399 err = otx2_sync_mbox_msg(&pfvf->mbox);
1400 if (err) {
1401 mutex_unlock(&pfvf->mbox.lock);
1402 return err;
1403 }
1404
1405 pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1406 mutex_unlock(&pfvf->mbox.lock);
1407
1408 return 0;
1409 }
1410
otx2_install_rxvlan_offload_flow(struct otx2_nic * pfvf)1411 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1412 {
1413 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1414 struct npc_install_flow_req *req;
1415 int err;
1416
1417 mutex_lock(&pfvf->mbox.lock);
1418 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1419 if (!req) {
1420 mutex_unlock(&pfvf->mbox.lock);
1421 return -ENOMEM;
1422 }
1423
1424 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1425 req->intf = NIX_INTF_RX;
1426 ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1427 eth_broadcast_addr((u8 *)&req->mask.dmac);
1428 req->channel = pfvf->hw.rx_chan_base;
1429 req->op = NIX_RX_ACTION_DEFAULT;
1430 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1431 req->vtag0_valid = true;
1432 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1433
1434 /* Send message to AF */
1435 err = otx2_sync_mbox_msg(&pfvf->mbox);
1436 mutex_unlock(&pfvf->mbox.lock);
1437 return err;
1438 }
1439
otx2_delete_rxvlan_offload_flow(struct otx2_nic * pfvf)1440 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1441 {
1442 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1443 struct npc_delete_flow_req *req;
1444 int err;
1445
1446 mutex_lock(&pfvf->mbox.lock);
1447 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1448 if (!req) {
1449 mutex_unlock(&pfvf->mbox.lock);
1450 return -ENOMEM;
1451 }
1452
1453 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1454 /* Send message to AF */
1455 err = otx2_sync_mbox_msg(&pfvf->mbox);
1456 mutex_unlock(&pfvf->mbox.lock);
1457 return err;
1458 }
1459
otx2_enable_rxvlan(struct otx2_nic * pf,bool enable)1460 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1461 {
1462 struct nix_vtag_config *req;
1463 struct mbox_msghdr *rsp_hdr;
1464 int err;
1465
1466 /* Dont have enough mcam entries */
1467 if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1468 return -ENOMEM;
1469
1470 if (enable) {
1471 err = otx2_install_rxvlan_offload_flow(pf);
1472 if (err)
1473 return err;
1474 } else {
1475 err = otx2_delete_rxvlan_offload_flow(pf);
1476 if (err)
1477 return err;
1478 }
1479
1480 mutex_lock(&pf->mbox.lock);
1481 req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1482 if (!req) {
1483 mutex_unlock(&pf->mbox.lock);
1484 return -ENOMEM;
1485 }
1486
1487 /* config strip, capture and size */
1488 req->vtag_size = VTAGSIZE_T4;
1489 req->cfg_type = 1; /* rx vlan cfg */
1490 req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1491 req->rx.strip_vtag = enable;
1492 req->rx.capture_vtag = enable;
1493
1494 err = otx2_sync_mbox_msg(&pf->mbox);
1495 if (err) {
1496 mutex_unlock(&pf->mbox.lock);
1497 return err;
1498 }
1499
1500 rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1501 if (IS_ERR(rsp_hdr)) {
1502 mutex_unlock(&pf->mbox.lock);
1503 return PTR_ERR(rsp_hdr);
1504 }
1505
1506 mutex_unlock(&pf->mbox.lock);
1507 return rsp_hdr->rc;
1508 }
1509
otx2_dmacflt_reinstall_flows(struct otx2_nic * pf)1510 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1511 {
1512 struct otx2_flow *iter;
1513 struct ethhdr *eth_hdr;
1514
1515 list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1516 if (iter->rule_type & DMAC_FILTER_RULE) {
1517 eth_hdr = &iter->flow_spec.h_u.ether_spec;
1518 otx2_dmacflt_add(pf, eth_hdr->h_dest,
1519 iter->entry);
1520 }
1521 }
1522 }
1523
otx2_dmacflt_update_pfmac_flow(struct otx2_nic * pfvf)1524 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1525 {
1526 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1527 }
1528