14ab0c6a8SSathya Perla /* Broadcom NetXtreme-C/E network driver.
24ab0c6a8SSathya Perla *
34ab0c6a8SSathya Perla * Copyright (c) 2016-2017 Broadcom Limited
44ab0c6a8SSathya Perla *
54ab0c6a8SSathya Perla * This program is free software; you can redistribute it and/or modify
64ab0c6a8SSathya Perla * it under the terms of the GNU General Public License as published by
74ab0c6a8SSathya Perla * the Free Software Foundation.
84ab0c6a8SSathya Perla */
94ab0c6a8SSathya Perla #include <linux/pci.h>
104ab0c6a8SSathya Perla #include <linux/netdevice.h>
114ab0c6a8SSathya Perla #include <linux/etherdevice.h>
124ab0c6a8SSathya Perla #include <linux/rtnetlink.h>
134ab0c6a8SSathya Perla #include <linux/jhash.h>
142ae7408fSSathya Perla #include <net/pkt_cls.h>
154ab0c6a8SSathya Perla
164ab0c6a8SSathya Perla #include "bnxt_hsi.h"
174ab0c6a8SSathya Perla #include "bnxt.h"
183c8c20dbSEdwin Peer #include "bnxt_hwrm.h"
194ab0c6a8SSathya Perla #include "bnxt_vfr.h"
203c467bf3SSteve Lin #include "bnxt_devlink.h"
212ae7408fSSathya Perla #include "bnxt_tc.h"
224ab0c6a8SSathya Perla
23d3e3beceSSathya Perla #ifdef CONFIG_BNXT_SRIOV
24d3e3beceSSathya Perla
254ab0c6a8SSathya Perla #define CFA_HANDLE_INVALID 0xffff
26ee5c7fb3SSathya Perla #define VF_IDX_INVALID 0xffff
27ee5c7fb3SSathya Perla
hwrm_cfa_vfr_alloc(struct bnxt * bp,u16 vf_idx,u16 * tx_cfa_action,u16 * rx_cfa_code)28ee5c7fb3SSathya Perla static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
29ee5c7fb3SSathya Perla u16 *tx_cfa_action, u16 *rx_cfa_code)
30ee5c7fb3SSathya Perla {
31bbf33d1dSEdwin Peer struct hwrm_cfa_vfr_alloc_output *resp;
32bbf33d1dSEdwin Peer struct hwrm_cfa_vfr_alloc_input *req;
33ee5c7fb3SSathya Perla int rc;
34ee5c7fb3SSathya Perla
35bbf33d1dSEdwin Peer rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_ALLOC);
36bbf33d1dSEdwin Peer if (!rc) {
37bbf33d1dSEdwin Peer req->vf_id = cpu_to_le16(vf_idx);
38bbf33d1dSEdwin Peer sprintf(req->vfr_name, "vfr%d", vf_idx);
39ee5c7fb3SSathya Perla
40bbf33d1dSEdwin Peer resp = hwrm_req_hold(bp, req);
41bbf33d1dSEdwin Peer rc = hwrm_req_send(bp, req);
42ee5c7fb3SSathya Perla if (!rc) {
43ee5c7fb3SSathya Perla *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
44ee5c7fb3SSathya Perla *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
45ee5c7fb3SSathya Perla netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
46ee5c7fb3SSathya Perla *tx_cfa_action, *rx_cfa_code);
47ee5c7fb3SSathya Perla }
48bbf33d1dSEdwin Peer hwrm_req_drop(bp, req);
49bbf33d1dSEdwin Peer }
50bbf33d1dSEdwin Peer if (rc)
51bbf33d1dSEdwin Peer netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
52ee5c7fb3SSathya Perla return rc;
53ee5c7fb3SSathya Perla }
54ee5c7fb3SSathya Perla
hwrm_cfa_vfr_free(struct bnxt * bp,u16 vf_idx)55ee5c7fb3SSathya Perla static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
56ee5c7fb3SSathya Perla {
57bbf33d1dSEdwin Peer struct hwrm_cfa_vfr_free_input *req;
58ee5c7fb3SSathya Perla int rc;
59ee5c7fb3SSathya Perla
60bbf33d1dSEdwin Peer rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_FREE);
61bbf33d1dSEdwin Peer if (!rc) {
62bbf33d1dSEdwin Peer sprintf(req->vfr_name, "vfr%d", vf_idx);
63bbf33d1dSEdwin Peer rc = hwrm_req_send(bp, req);
64bbf33d1dSEdwin Peer }
65ee5c7fb3SSathya Perla if (rc)
669a005c38SJonathan Lemon netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
67ee5c7fb3SSathya Perla return rc;
68ee5c7fb3SSathya Perla }
69ee5c7fb3SSathya Perla
bnxt_hwrm_vfr_qcfg(struct bnxt * bp,struct bnxt_vf_rep * vf_rep,u16 * max_mtu)709d96465bSSriharsha Basavapatna static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
719d96465bSSriharsha Basavapatna u16 *max_mtu)
729d96465bSSriharsha Basavapatna {
73bbf33d1dSEdwin Peer struct hwrm_func_qcfg_output *resp;
74bbf33d1dSEdwin Peer struct hwrm_func_qcfg_input *req;
759d96465bSSriharsha Basavapatna u16 mtu;
769d96465bSSriharsha Basavapatna int rc;
779d96465bSSriharsha Basavapatna
78bbf33d1dSEdwin Peer rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
79bbf33d1dSEdwin Peer if (rc)
80bbf33d1dSEdwin Peer return rc;
819d96465bSSriharsha Basavapatna
82bbf33d1dSEdwin Peer req->fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
83bbf33d1dSEdwin Peer resp = hwrm_req_hold(bp, req);
84bbf33d1dSEdwin Peer rc = hwrm_req_send(bp, req);
859d96465bSSriharsha Basavapatna if (!rc) {
869d96465bSSriharsha Basavapatna mtu = le16_to_cpu(resp->max_mtu_configured);
879d96465bSSriharsha Basavapatna if (!mtu)
889d96465bSSriharsha Basavapatna *max_mtu = BNXT_MAX_MTU;
899d96465bSSriharsha Basavapatna else
909d96465bSSriharsha Basavapatna *max_mtu = mtu;
919d96465bSSriharsha Basavapatna }
92bbf33d1dSEdwin Peer hwrm_req_drop(bp, req);
939d96465bSSriharsha Basavapatna return rc;
949d96465bSSriharsha Basavapatna }
959d96465bSSriharsha Basavapatna
bnxt_vf_rep_open(struct net_device * dev)96ee5c7fb3SSathya Perla static int bnxt_vf_rep_open(struct net_device *dev)
97ee5c7fb3SSathya Perla {
98ee5c7fb3SSathya Perla struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
99ee5c7fb3SSathya Perla struct bnxt *bp = vf_rep->bp;
100ee5c7fb3SSathya Perla
101ee5c7fb3SSathya Perla /* Enable link and TX only if the parent PF is open. */
102ee5c7fb3SSathya Perla if (netif_running(bp->dev)) {
103ee5c7fb3SSathya Perla netif_carrier_on(dev);
104ee5c7fb3SSathya Perla netif_tx_start_all_queues(dev);
105ee5c7fb3SSathya Perla }
106ee5c7fb3SSathya Perla return 0;
107ee5c7fb3SSathya Perla }
108ee5c7fb3SSathya Perla
bnxt_vf_rep_close(struct net_device * dev)109ee5c7fb3SSathya Perla static int bnxt_vf_rep_close(struct net_device *dev)
110ee5c7fb3SSathya Perla {
111ee5c7fb3SSathya Perla netif_carrier_off(dev);
112ee5c7fb3SSathya Perla netif_tx_disable(dev);
113ee5c7fb3SSathya Perla
114ee5c7fb3SSathya Perla return 0;
115ee5c7fb3SSathya Perla }
116ee5c7fb3SSathya Perla
bnxt_vf_rep_xmit(struct sk_buff * skb,struct net_device * dev)117ee5c7fb3SSathya Perla static netdev_tx_t bnxt_vf_rep_xmit(struct sk_buff *skb,
118ee5c7fb3SSathya Perla struct net_device *dev)
119ee5c7fb3SSathya Perla {
120ee5c7fb3SSathya Perla struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
121ee5c7fb3SSathya Perla int rc, len = skb->len;
122ee5c7fb3SSathya Perla
123ee5c7fb3SSathya Perla skb_dst_drop(skb);
124ee5c7fb3SSathya Perla dst_hold((struct dst_entry *)vf_rep->dst);
125ee5c7fb3SSathya Perla skb_dst_set(skb, (struct dst_entry *)vf_rep->dst);
126ee5c7fb3SSathya Perla skb->dev = vf_rep->dst->u.port_info.lower_dev;
127ee5c7fb3SSathya Perla
128ee5c7fb3SSathya Perla rc = dev_queue_xmit(skb);
129ee5c7fb3SSathya Perla if (!rc) {
130ee5c7fb3SSathya Perla vf_rep->tx_stats.packets++;
131ee5c7fb3SSathya Perla vf_rep->tx_stats.bytes += len;
132ee5c7fb3SSathya Perla }
133ee5c7fb3SSathya Perla return rc;
134ee5c7fb3SSathya Perla }
135ee5c7fb3SSathya Perla
136ee5c7fb3SSathya Perla static void
bnxt_vf_rep_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)137ee5c7fb3SSathya Perla bnxt_vf_rep_get_stats64(struct net_device *dev,
138ee5c7fb3SSathya Perla struct rtnl_link_stats64 *stats)
139ee5c7fb3SSathya Perla {
140ee5c7fb3SSathya Perla struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
141ee5c7fb3SSathya Perla
142ee5c7fb3SSathya Perla stats->rx_packets = vf_rep->rx_stats.packets;
143ee5c7fb3SSathya Perla stats->rx_bytes = vf_rep->rx_stats.bytes;
144ee5c7fb3SSathya Perla stats->tx_packets = vf_rep->tx_stats.packets;
145ee5c7fb3SSathya Perla stats->tx_bytes = vf_rep->tx_stats.bytes;
146ee5c7fb3SSathya Perla }
147ee5c7fb3SSathya Perla
bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)1489e0fd15dSJiri Pirko static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,
1499e0fd15dSJiri Pirko void *type_data,
1509e0fd15dSJiri Pirko void *cb_priv)
1512ae7408fSSathya Perla {
1529e0fd15dSJiri Pirko struct bnxt_vf_rep *vf_rep = cb_priv;
1532ae7408fSSathya Perla struct bnxt *bp = vf_rep->bp;
1542ae7408fSSathya Perla int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
1552ae7408fSSathya Perla
156312324f1SJakub Kicinski if (!bnxt_tc_flower_enabled(vf_rep->bp) ||
157312324f1SJakub Kicinski !tc_cls_can_offload_and_chain0(bp->dev, type_data))
158cd66358eSSathya Perla return -EOPNOTSUPP;
159cd66358eSSathya Perla
1602ae7408fSSathya Perla switch (type) {
1612ae7408fSSathya Perla case TC_SETUP_CLSFLOWER:
1622ae7408fSSathya Perla return bnxt_tc_setup_flower(bp, vf_fid, type_data);
1632ae7408fSSathya Perla default:
1642ae7408fSSathya Perla return -EOPNOTSUPP;
1652ae7408fSSathya Perla }
1662ae7408fSSathya Perla }
1672ae7408fSSathya Perla
168955bcb6eSPablo Neira Ayuso static LIST_HEAD(bnxt_vf_block_cb_list);
169955bcb6eSPablo Neira Ayuso
bnxt_vf_rep_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1709e0fd15dSJiri Pirko static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
1719e0fd15dSJiri Pirko void *type_data)
1729e0fd15dSJiri Pirko {
1734e95bc26SPablo Neira Ayuso struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
1744e95bc26SPablo Neira Ayuso
1759e0fd15dSJiri Pirko switch (type) {
1769e0fd15dSJiri Pirko case TC_SETUP_BLOCK:
177955bcb6eSPablo Neira Ayuso return flow_block_cb_setup_simple(type_data,
178955bcb6eSPablo Neira Ayuso &bnxt_vf_block_cb_list,
1794e95bc26SPablo Neira Ayuso bnxt_vf_rep_setup_tc_block_cb,
1804e95bc26SPablo Neira Ayuso vf_rep, vf_rep, true);
1819e0fd15dSJiri Pirko default:
1829e0fd15dSJiri Pirko return -EOPNOTSUPP;
1839e0fd15dSJiri Pirko }
1849e0fd15dSJiri Pirko }
1859e0fd15dSJiri Pirko
bnxt_get_vf_rep(struct bnxt * bp,u16 cfa_code)186ee5c7fb3SSathya Perla struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
187ee5c7fb3SSathya Perla {
188ee5c7fb3SSathya Perla u16 vf_idx;
189ee5c7fb3SSathya Perla
190ee5c7fb3SSathya Perla if (cfa_code && bp->cfa_code_map && BNXT_PF(bp)) {
191ee5c7fb3SSathya Perla vf_idx = bp->cfa_code_map[cfa_code];
192ee5c7fb3SSathya Perla if (vf_idx != VF_IDX_INVALID)
193ee5c7fb3SSathya Perla return bp->vf_reps[vf_idx]->dev;
194ee5c7fb3SSathya Perla }
195ee5c7fb3SSathya Perla return NULL;
196ee5c7fb3SSathya Perla }
197ee5c7fb3SSathya Perla
bnxt_vf_rep_rx(struct bnxt * bp,struct sk_buff * skb)198ee5c7fb3SSathya Perla void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb)
199ee5c7fb3SSathya Perla {
200ee5c7fb3SSathya Perla struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev);
201ee5c7fb3SSathya Perla
202ee5c7fb3SSathya Perla vf_rep->rx_stats.bytes += skb->len;
203ee5c7fb3SSathya Perla vf_rep->rx_stats.packets++;
204ee5c7fb3SSathya Perla
205ee5c7fb3SSathya Perla netif_receive_skb(skb);
206ee5c7fb3SSathya Perla }
207ee5c7fb3SSathya Perla
bnxt_vf_rep_get_phys_port_name(struct net_device * dev,char * buf,size_t len)208c124a62fSSathya Perla static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
209c124a62fSSathya Perla size_t len)
210c124a62fSSathya Perla {
211c124a62fSSathya Perla struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
21253f70b8bSSathya Perla struct pci_dev *pf_pdev = vf_rep->bp->pdev;
213c124a62fSSathya Perla int rc;
214c124a62fSSathya Perla
21553f70b8bSSathya Perla rc = snprintf(buf, len, "pf%dvf%d", PCI_FUNC(pf_pdev->devfn),
21653f70b8bSSathya Perla vf_rep->vf_idx);
217c124a62fSSathya Perla if (rc >= len)
218c124a62fSSathya Perla return -EOPNOTSUPP;
219c124a62fSSathya Perla return 0;
220c124a62fSSathya Perla }
221c124a62fSSathya Perla
bnxt_vf_rep_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)222ee5c7fb3SSathya Perla static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
223ee5c7fb3SSathya Perla struct ethtool_drvinfo *info)
224ee5c7fb3SSathya Perla {
225f029c781SWolfram Sang strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
226ee5c7fb3SSathya Perla }
227ee5c7fb3SSathya Perla
bnxt_vf_rep_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)22852d5254aSFlorian Fainelli static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
22952d5254aSFlorian Fainelli struct netdev_phys_item_id *ppid)
230c124a62fSSathya Perla {
231c124a62fSSathya Perla struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
232c124a62fSSathya Perla
233c124a62fSSathya Perla /* as only PORT_PARENT_ID is supported currently use common code
234c124a62fSSathya Perla * between PF and VF-rep for now.
235c124a62fSSathya Perla */
23652d5254aSFlorian Fainelli return bnxt_get_port_parent_id(vf_rep->bp->dev, ppid);
237c124a62fSSathya Perla }
238c124a62fSSathya Perla
239ee5c7fb3SSathya Perla static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = {
240ee5c7fb3SSathya Perla .get_drvinfo = bnxt_vf_rep_get_drvinfo
241ee5c7fb3SSathya Perla };
242ee5c7fb3SSathya Perla
243ee5c7fb3SSathya Perla static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
244ee5c7fb3SSathya Perla .ndo_open = bnxt_vf_rep_open,
245ee5c7fb3SSathya Perla .ndo_stop = bnxt_vf_rep_close,
246ee5c7fb3SSathya Perla .ndo_start_xmit = bnxt_vf_rep_xmit,
247c124a62fSSathya Perla .ndo_get_stats64 = bnxt_vf_rep_get_stats64,
2482ae7408fSSathya Perla .ndo_setup_tc = bnxt_vf_rep_setup_tc,
24952d5254aSFlorian Fainelli .ndo_get_port_parent_id = bnxt_vf_rep_get_port_parent_id,
250c124a62fSSathya Perla .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
251ee5c7fb3SSathya Perla };
252ee5c7fb3SSathya Perla
bnxt_dev_is_vf_rep(struct net_device * dev)253dd4ea1daSSathya Perla bool bnxt_dev_is_vf_rep(struct net_device *dev)
254dd4ea1daSSathya Perla {
255dd4ea1daSSathya Perla return dev->netdev_ops == &bnxt_vf_rep_netdev_ops;
256dd4ea1daSSathya Perla }
257dd4ea1daSSathya Perla
258ee5c7fb3SSathya Perla /* Called when the parent PF interface is closed:
259ee5c7fb3SSathya Perla * As the mode transition from SWITCHDEV to LEGACY
260ee5c7fb3SSathya Perla * happens under the rtnl_lock() this routine is safe
261ee5c7fb3SSathya Perla * under the rtnl_lock()
262ee5c7fb3SSathya Perla */
bnxt_vf_reps_close(struct bnxt * bp)263ee5c7fb3SSathya Perla void bnxt_vf_reps_close(struct bnxt *bp)
264ee5c7fb3SSathya Perla {
265ee5c7fb3SSathya Perla struct bnxt_vf_rep *vf_rep;
266ee5c7fb3SSathya Perla u16 num_vfs, i;
267ee5c7fb3SSathya Perla
268ee5c7fb3SSathya Perla if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
269ee5c7fb3SSathya Perla return;
270ee5c7fb3SSathya Perla
271ee5c7fb3SSathya Perla num_vfs = pci_num_vf(bp->pdev);
272ee5c7fb3SSathya Perla for (i = 0; i < num_vfs; i++) {
273ee5c7fb3SSathya Perla vf_rep = bp->vf_reps[i];
274ee5c7fb3SSathya Perla if (netif_running(vf_rep->dev))
275ee5c7fb3SSathya Perla bnxt_vf_rep_close(vf_rep->dev);
276ee5c7fb3SSathya Perla }
277ee5c7fb3SSathya Perla }
278ee5c7fb3SSathya Perla
279ee5c7fb3SSathya Perla /* Called when the parent PF interface is opened (re-opened):
280ee5c7fb3SSathya Perla * As the mode transition from SWITCHDEV to LEGACY
281ee5c7fb3SSathya Perla * happen under the rtnl_lock() this routine is safe
282ee5c7fb3SSathya Perla * under the rtnl_lock()
283ee5c7fb3SSathya Perla */
bnxt_vf_reps_open(struct bnxt * bp)284ee5c7fb3SSathya Perla void bnxt_vf_reps_open(struct bnxt *bp)
285ee5c7fb3SSathya Perla {
286ee5c7fb3SSathya Perla int i;
287ee5c7fb3SSathya Perla
288ee5c7fb3SSathya Perla if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
289ee5c7fb3SSathya Perla return;
290ee5c7fb3SSathya Perla
291ac797cedSSriharsha Basavapatna for (i = 0; i < pci_num_vf(bp->pdev); i++) {
292ac797cedSSriharsha Basavapatna /* Open the VF-Rep only if it is allocated in the FW */
293ac797cedSSriharsha Basavapatna if (bp->vf_reps[i]->tx_cfa_action != CFA_HANDLE_INVALID)
294ee5c7fb3SSathya Perla bnxt_vf_rep_open(bp->vf_reps[i]->dev);
295ee5c7fb3SSathya Perla }
296ac797cedSSriharsha Basavapatna }
2974ab0c6a8SSathya Perla
__bnxt_free_one_vf_rep(struct bnxt * bp,struct bnxt_vf_rep * vf_rep)29890f4fd02SMichael Chan static void __bnxt_free_one_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep)
29990f4fd02SMichael Chan {
30090f4fd02SMichael Chan if (!vf_rep)
30190f4fd02SMichael Chan return;
30290f4fd02SMichael Chan
30390f4fd02SMichael Chan if (vf_rep->dst) {
30490f4fd02SMichael Chan dst_release((struct dst_entry *)vf_rep->dst);
30590f4fd02SMichael Chan vf_rep->dst = NULL;
30690f4fd02SMichael Chan }
30790f4fd02SMichael Chan if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) {
30890f4fd02SMichael Chan hwrm_cfa_vfr_free(bp, vf_rep->vf_idx);
30990f4fd02SMichael Chan vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
31090f4fd02SMichael Chan }
31190f4fd02SMichael Chan }
31290f4fd02SMichael Chan
__bnxt_vf_reps_destroy(struct bnxt * bp)3134ab0c6a8SSathya Perla static void __bnxt_vf_reps_destroy(struct bnxt *bp)
3144ab0c6a8SSathya Perla {
3154ab0c6a8SSathya Perla u16 num_vfs = pci_num_vf(bp->pdev);
3164ab0c6a8SSathya Perla struct bnxt_vf_rep *vf_rep;
3174ab0c6a8SSathya Perla int i;
3184ab0c6a8SSathya Perla
3194ab0c6a8SSathya Perla for (i = 0; i < num_vfs; i++) {
3204ab0c6a8SSathya Perla vf_rep = bp->vf_reps[i];
3214ab0c6a8SSathya Perla if (vf_rep) {
32290f4fd02SMichael Chan __bnxt_free_one_vf_rep(bp, vf_rep);
3234ab0c6a8SSathya Perla if (vf_rep->dev) {
3244ab0c6a8SSathya Perla /* if register_netdev failed, then netdev_ops
3254ab0c6a8SSathya Perla * would have been set to NULL
3264ab0c6a8SSathya Perla */
3274ab0c6a8SSathya Perla if (vf_rep->dev->netdev_ops)
3284ab0c6a8SSathya Perla unregister_netdev(vf_rep->dev);
3294ab0c6a8SSathya Perla free_netdev(vf_rep->dev);
3304ab0c6a8SSathya Perla }
3314ab0c6a8SSathya Perla }
3324ab0c6a8SSathya Perla }
3334ab0c6a8SSathya Perla
3344ab0c6a8SSathya Perla kfree(bp->vf_reps);
3354ab0c6a8SSathya Perla bp->vf_reps = NULL;
3364ab0c6a8SSathya Perla }
3374ab0c6a8SSathya Perla
bnxt_vf_reps_destroy(struct bnxt * bp)3384ab0c6a8SSathya Perla void bnxt_vf_reps_destroy(struct bnxt *bp)
3394ab0c6a8SSathya Perla {
3404ab0c6a8SSathya Perla bool closed = false;
3414ab0c6a8SSathya Perla
3424ab0c6a8SSathya Perla if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3434ab0c6a8SSathya Perla return;
3444ab0c6a8SSathya Perla
3454ab0c6a8SSathya Perla if (!bp->vf_reps)
3464ab0c6a8SSathya Perla return;
3474ab0c6a8SSathya Perla
3484ab0c6a8SSathya Perla /* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
3494ab0c6a8SSathya Perla * before proceeding with VF-rep cleanup.
3504ab0c6a8SSathya Perla */
3514ab0c6a8SSathya Perla rtnl_lock();
3524ab0c6a8SSathya Perla if (netif_running(bp->dev)) {
3534ab0c6a8SSathya Perla bnxt_close_nic(bp, false, false);
3544ab0c6a8SSathya Perla closed = true;
3554ab0c6a8SSathya Perla }
356ee5c7fb3SSathya Perla /* un-publish cfa_code_map so that RX path can't see it anymore */
357ee5c7fb3SSathya Perla kfree(bp->cfa_code_map);
358ee5c7fb3SSathya Perla bp->cfa_code_map = NULL;
3594ab0c6a8SSathya Perla
360*f032d8a9SIvan Vecera if (closed) {
361*f032d8a9SIvan Vecera /* Temporarily set legacy mode to avoid re-opening
362*f032d8a9SIvan Vecera * representors and restore switchdev mode after that.
363*f032d8a9SIvan Vecera */
364*f032d8a9SIvan Vecera bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3654ab0c6a8SSathya Perla bnxt_open_nic(bp, false, false);
366*f032d8a9SIvan Vecera bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
367*f032d8a9SIvan Vecera }
3684ab0c6a8SSathya Perla rtnl_unlock();
3694ab0c6a8SSathya Perla
3704ab0c6a8SSathya Perla /* Need to call vf_reps_destroy() outside of rntl_lock
3714ab0c6a8SSathya Perla * as unregister_netdev takes rtnl_lock
3724ab0c6a8SSathya Perla */
3734ab0c6a8SSathya Perla __bnxt_vf_reps_destroy(bp);
3744ab0c6a8SSathya Perla }
3754ab0c6a8SSathya Perla
376ac797cedSSriharsha Basavapatna /* Free the VF-Reps in firmware, during firmware hot-reset processing.
377ac797cedSSriharsha Basavapatna * Note that the VF-Rep netdevs are still active (not unregistered) during
378ac797cedSSriharsha Basavapatna * this process. As the mode transition from SWITCHDEV to LEGACY happens
379ac797cedSSriharsha Basavapatna * under the rtnl_lock() this routine is safe under the rtnl_lock().
380ac797cedSSriharsha Basavapatna */
bnxt_vf_reps_free(struct bnxt * bp)381ac797cedSSriharsha Basavapatna void bnxt_vf_reps_free(struct bnxt *bp)
382ac797cedSSriharsha Basavapatna {
383ac797cedSSriharsha Basavapatna u16 num_vfs = pci_num_vf(bp->pdev);
384ac797cedSSriharsha Basavapatna int i;
385ac797cedSSriharsha Basavapatna
386ac797cedSSriharsha Basavapatna if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
387ac797cedSSriharsha Basavapatna return;
388ac797cedSSriharsha Basavapatna
389ac797cedSSriharsha Basavapatna for (i = 0; i < num_vfs; i++)
390ac797cedSSriharsha Basavapatna __bnxt_free_one_vf_rep(bp, bp->vf_reps[i]);
391ac797cedSSriharsha Basavapatna }
392ac797cedSSriharsha Basavapatna
bnxt_alloc_vf_rep(struct bnxt * bp,struct bnxt_vf_rep * vf_rep,u16 * cfa_code_map)393ea2d37b2SSriharsha Basavapatna static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
394ea2d37b2SSriharsha Basavapatna u16 *cfa_code_map)
395ea2d37b2SSriharsha Basavapatna {
396ea2d37b2SSriharsha Basavapatna /* get cfa handles from FW */
397ea2d37b2SSriharsha Basavapatna if (hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, &vf_rep->tx_cfa_action,
398ea2d37b2SSriharsha Basavapatna &vf_rep->rx_cfa_code))
399ea2d37b2SSriharsha Basavapatna return -ENOLINK;
400ea2d37b2SSriharsha Basavapatna
401ea2d37b2SSriharsha Basavapatna cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx;
402ea2d37b2SSriharsha Basavapatna vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
403ea2d37b2SSriharsha Basavapatna if (!vf_rep->dst)
404ea2d37b2SSriharsha Basavapatna return -ENOMEM;
405ea2d37b2SSriharsha Basavapatna
406ea2d37b2SSriharsha Basavapatna /* only cfa_action is needed to mux a packet while TXing */
407ea2d37b2SSriharsha Basavapatna vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action;
408ea2d37b2SSriharsha Basavapatna vf_rep->dst->u.port_info.lower_dev = bp->dev;
409ea2d37b2SSriharsha Basavapatna
410ea2d37b2SSriharsha Basavapatna return 0;
411ea2d37b2SSriharsha Basavapatna }
412ea2d37b2SSriharsha Basavapatna
413ac797cedSSriharsha Basavapatna /* Allocate the VF-Reps in firmware, during firmware hot-reset processing.
414ac797cedSSriharsha Basavapatna * Note that the VF-Rep netdevs are still active (not unregistered) during
415ac797cedSSriharsha Basavapatna * this process. As the mode transition from SWITCHDEV to LEGACY happens
416ac797cedSSriharsha Basavapatna * under the rtnl_lock() this routine is safe under the rtnl_lock().
417ac797cedSSriharsha Basavapatna */
bnxt_vf_reps_alloc(struct bnxt * bp)418ac797cedSSriharsha Basavapatna int bnxt_vf_reps_alloc(struct bnxt *bp)
419ac797cedSSriharsha Basavapatna {
420ac797cedSSriharsha Basavapatna u16 *cfa_code_map = bp->cfa_code_map, num_vfs = pci_num_vf(bp->pdev);
421ac797cedSSriharsha Basavapatna struct bnxt_vf_rep *vf_rep;
422ac797cedSSriharsha Basavapatna int rc, i;
423ac797cedSSriharsha Basavapatna
424ac797cedSSriharsha Basavapatna if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
425ac797cedSSriharsha Basavapatna return 0;
426ac797cedSSriharsha Basavapatna
427ac797cedSSriharsha Basavapatna if (!cfa_code_map)
428ac797cedSSriharsha Basavapatna return -EINVAL;
429ac797cedSSriharsha Basavapatna
430ac797cedSSriharsha Basavapatna for (i = 0; i < MAX_CFA_CODE; i++)
431ac797cedSSriharsha Basavapatna cfa_code_map[i] = VF_IDX_INVALID;
432ac797cedSSriharsha Basavapatna
433ac797cedSSriharsha Basavapatna for (i = 0; i < num_vfs; i++) {
434ac797cedSSriharsha Basavapatna vf_rep = bp->vf_reps[i];
435ac797cedSSriharsha Basavapatna vf_rep->vf_idx = i;
436ac797cedSSriharsha Basavapatna
437ac797cedSSriharsha Basavapatna rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map);
438ac797cedSSriharsha Basavapatna if (rc)
439ac797cedSSriharsha Basavapatna goto err;
440ac797cedSSriharsha Basavapatna }
441ac797cedSSriharsha Basavapatna
442ac797cedSSriharsha Basavapatna return 0;
443ac797cedSSriharsha Basavapatna
444ac797cedSSriharsha Basavapatna err:
445ac797cedSSriharsha Basavapatna netdev_info(bp->dev, "%s error=%d\n", __func__, rc);
446ac797cedSSriharsha Basavapatna bnxt_vf_reps_free(bp);
447ac797cedSSriharsha Basavapatna return rc;
448ac797cedSSriharsha Basavapatna }
449ac797cedSSriharsha Basavapatna
4504ab0c6a8SSathya Perla /* Use the OUI of the PF's perm addr and report the same mac addr
4514ab0c6a8SSathya Perla * for the same VF-rep each time
4524ab0c6a8SSathya Perla */
bnxt_vf_rep_eth_addr_gen(u8 * src_mac,u16 vf_idx,u8 * mac)4534ab0c6a8SSathya Perla static void bnxt_vf_rep_eth_addr_gen(u8 *src_mac, u16 vf_idx, u8 *mac)
4544ab0c6a8SSathya Perla {
4554ab0c6a8SSathya Perla u32 addr;
4564ab0c6a8SSathya Perla
4574ab0c6a8SSathya Perla ether_addr_copy(mac, src_mac);
4584ab0c6a8SSathya Perla
4594ab0c6a8SSathya Perla addr = jhash(src_mac, ETH_ALEN, 0) + vf_idx;
4604ab0c6a8SSathya Perla mac[3] = (u8)(addr & 0xFF);
4614ab0c6a8SSathya Perla mac[4] = (u8)((addr >> 8) & 0xFF);
4624ab0c6a8SSathya Perla mac[5] = (u8)((addr >> 16) & 0xFF);
4634ab0c6a8SSathya Perla }
4644ab0c6a8SSathya Perla
bnxt_vf_rep_netdev_init(struct bnxt * bp,struct bnxt_vf_rep * vf_rep,struct net_device * dev)4654ab0c6a8SSathya Perla static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
4664ab0c6a8SSathya Perla struct net_device *dev)
4674ab0c6a8SSathya Perla {
4684ab0c6a8SSathya Perla struct net_device *pf_dev = bp->dev;
4699d96465bSSriharsha Basavapatna u16 max_mtu;
4704ab0c6a8SSathya Perla
471ee5c7fb3SSathya Perla SET_NETDEV_DEV(dev, &bp->pdev->dev);
472ee5c7fb3SSathya Perla dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
4734ab0c6a8SSathya Perla dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
4744ab0c6a8SSathya Perla /* Just inherit all the featues of the parent PF as the VF-R
4754ab0c6a8SSathya Perla * uses the RX/TX rings of the parent PF
4764ab0c6a8SSathya Perla */
4774ab0c6a8SSathya Perla dev->hw_features = pf_dev->hw_features;
4784ab0c6a8SSathya Perla dev->gso_partial_features = pf_dev->gso_partial_features;
4794ab0c6a8SSathya Perla dev->vlan_features = pf_dev->vlan_features;
4804ab0c6a8SSathya Perla dev->hw_enc_features = pf_dev->hw_enc_features;
4814ab0c6a8SSathya Perla dev->features |= pf_dev->features;
4824ab0c6a8SSathya Perla bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
483f3956ebbSJakub Kicinski dev->perm_addr);
4849d96465bSSriharsha Basavapatna eth_hw_addr_set(dev, dev->perm_addr);
4859d96465bSSriharsha Basavapatna /* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
4869d96465bSSriharsha Basavapatna if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
4879d96465bSSriharsha Basavapatna dev->max_mtu = max_mtu;
4884ab0c6a8SSathya Perla dev->min_mtu = ETH_ZLEN;
4894ab0c6a8SSathya Perla }
490*f032d8a9SIvan Vecera
bnxt_vf_reps_create(struct bnxt * bp)4914ab0c6a8SSathya Perla int bnxt_vf_reps_create(struct bnxt *bp)
492ee5c7fb3SSathya Perla {
4934ab0c6a8SSathya Perla u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
4944ab0c6a8SSathya Perla struct bnxt_vf_rep *vf_rep;
4954ab0c6a8SSathya Perla struct net_device *dev;
4964ab0c6a8SSathya Perla int rc, i;
497d061b241SMichael Chan
498d061b241SMichael Chan if (!(bp->flags & BNXT_FLAG_DSN_VALID))
499d061b241SMichael Chan return -ENODEV;
5004ab0c6a8SSathya Perla
5014ab0c6a8SSathya Perla bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL);
5024ab0c6a8SSathya Perla if (!bp->vf_reps)
5034ab0c6a8SSathya Perla return -ENOMEM;
504ee5c7fb3SSathya Perla
5056da2ec56SKees Cook /* storage for cfa_code to vf-idx mapping */
506ee5c7fb3SSathya Perla cfa_code_map = kmalloc_array(MAX_CFA_CODE, sizeof(*bp->cfa_code_map),
507ee5c7fb3SSathya Perla GFP_KERNEL);
508ee5c7fb3SSathya Perla if (!cfa_code_map) {
509ee5c7fb3SSathya Perla rc = -ENOMEM;
510ee5c7fb3SSathya Perla goto err;
511ee5c7fb3SSathya Perla }
512ee5c7fb3SSathya Perla for (i = 0; i < MAX_CFA_CODE; i++)
513ee5c7fb3SSathya Perla cfa_code_map[i] = VF_IDX_INVALID;
5144ab0c6a8SSathya Perla
5154ab0c6a8SSathya Perla for (i = 0; i < num_vfs; i++) {
5164ab0c6a8SSathya Perla dev = alloc_etherdev(sizeof(*vf_rep));
5174ab0c6a8SSathya Perla if (!dev) {
5184ab0c6a8SSathya Perla rc = -ENOMEM;
5194ab0c6a8SSathya Perla goto err;
5204ab0c6a8SSathya Perla }
5214ab0c6a8SSathya Perla
5224ab0c6a8SSathya Perla vf_rep = netdev_priv(dev);
5234ab0c6a8SSathya Perla bp->vf_reps[i] = vf_rep;
5244ab0c6a8SSathya Perla vf_rep->dev = dev;
5254ab0c6a8SSathya Perla vf_rep->bp = bp;
5264ab0c6a8SSathya Perla vf_rep->vf_idx = i;
5274ab0c6a8SSathya Perla vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
528ea2d37b2SSriharsha Basavapatna
529ea2d37b2SSriharsha Basavapatna rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map);
530ee5c7fb3SSathya Perla if (rc)
531ee5c7fb3SSathya Perla goto err;
5324ab0c6a8SSathya Perla
5334ab0c6a8SSathya Perla bnxt_vf_rep_netdev_init(bp, vf_rep, dev);
5344ab0c6a8SSathya Perla rc = register_netdev(dev);
5354ab0c6a8SSathya Perla if (rc) {
5364ab0c6a8SSathya Perla /* no need for unregister_netdev in cleanup */
5374ab0c6a8SSathya Perla dev->netdev_ops = NULL;
5384ab0c6a8SSathya Perla goto err;
5394ab0c6a8SSathya Perla }
5404ab0c6a8SSathya Perla }
541ee5c7fb3SSathya Perla
542ee5c7fb3SSathya Perla /* publish cfa_code_map only after all VF-reps have been initialized */
543ee5c7fb3SSathya Perla bp->cfa_code_map = cfa_code_map;
5444ab0c6a8SSathya Perla netif_keep_dst(bp->dev);
5454ab0c6a8SSathya Perla return 0;
5464ab0c6a8SSathya Perla
5479a005c38SJonathan Lemon err:
548ee5c7fb3SSathya Perla netdev_info(bp->dev, "%s error=%d\n", __func__, rc);
5494ab0c6a8SSathya Perla kfree(cfa_code_map);
5504ab0c6a8SSathya Perla __bnxt_vf_reps_destroy(bp);
5514ab0c6a8SSathya Perla return rc;
5524ab0c6a8SSathya Perla }
5534ab0c6a8SSathya Perla
5543c467bf3SSteve Lin /* Devlink related routines */
bnxt_dl_eswitch_mode_get(struct devlink * devlink,u16 * mode)5554ab0c6a8SSathya Perla int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
5564ab0c6a8SSathya Perla {
5574ab0c6a8SSathya Perla struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
5584ab0c6a8SSathya Perla
5594ab0c6a8SSathya Perla *mode = bp->eswitch_mode;
5604ab0c6a8SSathya Perla return 0;
5614ab0c6a8SSathya Perla }
562db7ff19eSEli Britstein
bnxt_dl_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)563db7ff19eSEli Britstein int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
5644ab0c6a8SSathya Perla struct netlink_ext_ack *extack)
5654ab0c6a8SSathya Perla {
566*f032d8a9SIvan Vecera struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
5674ab0c6a8SSathya Perla int ret = 0;
5684ab0c6a8SSathya Perla
5699a005c38SJonathan Lemon if (bp->eswitch_mode == mode) {
5704ab0c6a8SSathya Perla netdev_info(bp->dev, "already in %s eswitch mode\n",
5714ab0c6a8SSathya Perla mode == DEVLINK_ESWITCH_MODE_LEGACY ?
57214e426bfSJakub Kicinski "legacy" : "switchdev");
5734ab0c6a8SSathya Perla return -EINVAL;
5744ab0c6a8SSathya Perla }
5754ab0c6a8SSathya Perla
5764ab0c6a8SSathya Perla switch (mode) {
5774ab0c6a8SSathya Perla case DEVLINK_ESWITCH_MODE_LEGACY:
578*f032d8a9SIvan Vecera bnxt_vf_reps_destroy(bp);
5794ab0c6a8SSathya Perla break;
5804ab0c6a8SSathya Perla
5816354b95eSVasundhara Volam case DEVLINK_ESWITCH_MODE_SWITCHDEV:
5826354b95eSVasundhara Volam if (bp->hwrm_spec_code < 0x10803) {
58314e426bfSJakub Kicinski netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
5846354b95eSVasundhara Volam return -ENOTSUPP;
5856354b95eSVasundhara Volam }
586*f032d8a9SIvan Vecera
587*f032d8a9SIvan Vecera /* Create representors for existing VFs */
588*f032d8a9SIvan Vecera if (pci_num_vf(bp->pdev) > 0)
589*f032d8a9SIvan Vecera ret = bnxt_vf_reps_create(bp);
5904ab0c6a8SSathya Perla break;
5914ab0c6a8SSathya Perla
59214e426bfSJakub Kicinski default:
5934ab0c6a8SSathya Perla return -EINVAL;
594*f032d8a9SIvan Vecera }
595*f032d8a9SIvan Vecera
596*f032d8a9SIvan Vecera if (!ret)
597*f032d8a9SIvan Vecera bp->eswitch_mode = mode;
598*f032d8a9SIvan Vecera
5994ab0c6a8SSathya Perla return ret;
6004ab0c6a8SSathya Perla }
601d3e3beceSSathya Perla
602 #endif
603