xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
196de2506SJakub Kicinski // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
296de2506SJakub Kicinski /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
35de73ee4SSimon Horman 
45de73ee4SSimon Horman #include <linux/etherdevice.h>
5eadfa4c3SSimon Horman #include <linux/io-64-nonatomic-hi-lo.h>
65de73ee4SSimon Horman #include <linux/lockdep.h>
75de73ee4SSimon Horman #include <net/dst_metadata.h>
85de73ee4SSimon Horman 
95de73ee4SSimon Horman #include "nfpcore/nfp_cpp.h"
103238b250SJakub Kicinski #include "nfpcore/nfp_nsp.h"
115de73ee4SSimon Horman #include "nfp_app.h"
125de73ee4SSimon Horman #include "nfp_main.h"
1351a6588eSJakub Kicinski #include "nfp_net.h"
14eadfa4c3SSimon Horman #include "nfp_net_ctrl.h"
155de73ee4SSimon Horman #include "nfp_net_repr.h"
166abd224bSSimon Horman #include "nfp_net_sriov.h"
175de73ee4SSimon Horman #include "nfp_port.h"
185de73ee4SSimon Horman 
193eb47dfcSJakub Kicinski struct net_device *
nfp_repr_get_locked(struct nfp_app * app,struct nfp_reprs * set,unsigned int id)203eb47dfcSJakub Kicinski nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
213eb47dfcSJakub Kicinski {
223eb47dfcSJakub Kicinski 	return rcu_dereference_protected(set->reprs[id],
238a38f2ccSJakub Kicinski 					 nfp_app_is_locked(app));
243eb47dfcSJakub Kicinski }
253eb47dfcSJakub Kicinski 
26eadfa4c3SSimon Horman static void
nfp_repr_inc_tx_stats(struct net_device * netdev,unsigned int len,int tx_status)27eadfa4c3SSimon Horman nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
28eadfa4c3SSimon Horman 		      int tx_status)
29eadfa4c3SSimon Horman {
30eadfa4c3SSimon Horman 	struct nfp_repr *repr = netdev_priv(netdev);
31eadfa4c3SSimon Horman 	struct nfp_repr_pcpu_stats *stats;
32eadfa4c3SSimon Horman 
33eadfa4c3SSimon Horman 	if (unlikely(tx_status != NET_XMIT_SUCCESS &&
34eadfa4c3SSimon Horman 		     tx_status != NET_XMIT_CN)) {
35eadfa4c3SSimon Horman 		this_cpu_inc(repr->stats->tx_drops);
36eadfa4c3SSimon Horman 		return;
37eadfa4c3SSimon Horman 	}
38eadfa4c3SSimon Horman 
39eadfa4c3SSimon Horman 	stats = this_cpu_ptr(repr->stats);
40eadfa4c3SSimon Horman 	u64_stats_update_begin(&stats->syncp);
41eadfa4c3SSimon Horman 	stats->tx_packets++;
42eadfa4c3SSimon Horman 	stats->tx_bytes += len;
43eadfa4c3SSimon Horman 	u64_stats_update_end(&stats->syncp);
44eadfa4c3SSimon Horman }
45eadfa4c3SSimon Horman 
nfp_repr_inc_rx_stats(struct net_device * netdev,unsigned int len)46eadfa4c3SSimon Horman void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
47eadfa4c3SSimon Horman {
48eadfa4c3SSimon Horman 	struct nfp_repr *repr = netdev_priv(netdev);
49eadfa4c3SSimon Horman 	struct nfp_repr_pcpu_stats *stats;
50eadfa4c3SSimon Horman 
51eadfa4c3SSimon Horman 	stats = this_cpu_ptr(repr->stats);
52eadfa4c3SSimon Horman 	u64_stats_update_begin(&stats->syncp);
53eadfa4c3SSimon Horman 	stats->rx_packets++;
54eadfa4c3SSimon Horman 	stats->rx_bytes += len;
55eadfa4c3SSimon Horman 	u64_stats_update_end(&stats->syncp);
56eadfa4c3SSimon Horman }
57eadfa4c3SSimon Horman 
58eadfa4c3SSimon Horman static void
nfp_repr_phy_port_get_stats64(struct nfp_port * port,struct rtnl_link_stats64 * stats)597344bea1SJakub Kicinski nfp_repr_phy_port_get_stats64(struct nfp_port *port,
60eadfa4c3SSimon Horman 			      struct rtnl_link_stats64 *stats)
61eadfa4c3SSimon Horman {
627344bea1SJakub Kicinski 	u8 __iomem *mem = port->eth_stats;
63eadfa4c3SSimon Horman 
6442d779ffSPieter Jansen van Vuuren 	stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
6542d779ffSPieter Jansen van Vuuren 	stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
6642d779ffSPieter Jansen van Vuuren 	stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
67eadfa4c3SSimon Horman 
6842d779ffSPieter Jansen van Vuuren 	stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
6942d779ffSPieter Jansen van Vuuren 	stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
7042d779ffSPieter Jansen van Vuuren 	stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
71eadfa4c3SSimon Horman }
72eadfa4c3SSimon Horman 
73eadfa4c3SSimon Horman static void
nfp_repr_vnic_get_stats64(struct nfp_port * port,struct rtnl_link_stats64 * stats)74ef0ec676SJakub Kicinski nfp_repr_vnic_get_stats64(struct nfp_port *port,
75eadfa4c3SSimon Horman 			  struct rtnl_link_stats64 *stats)
76eadfa4c3SSimon Horman {
77eadfa4c3SSimon Horman 	/* TX and RX stats are flipped as we are returning the stats as seen
78eadfa4c3SSimon Horman 	 * at the switch port corresponding to the VF.
79eadfa4c3SSimon Horman 	 */
80ef0ec676SJakub Kicinski 	stats->tx_packets = readq(port->vnic + NFP_NET_CFG_STATS_RX_FRAMES);
81ef0ec676SJakub Kicinski 	stats->tx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_RX_OCTETS);
82ef0ec676SJakub Kicinski 	stats->tx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_RX_DISCARDS);
83eadfa4c3SSimon Horman 
84ef0ec676SJakub Kicinski 	stats->rx_packets = readq(port->vnic + NFP_NET_CFG_STATS_TX_FRAMES);
85ef0ec676SJakub Kicinski 	stats->rx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_TX_OCTETS);
86ef0ec676SJakub Kicinski 	stats->rx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_TX_DISCARDS);
87eadfa4c3SSimon Horman }
88eadfa4c3SSimon Horman 
895d7c64a7SJakub Kicinski static void
nfp_repr_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)903238b250SJakub Kicinski nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
91eadfa4c3SSimon Horman {
923238b250SJakub Kicinski 	struct nfp_repr *repr = netdev_priv(netdev);
933238b250SJakub Kicinski 
943238b250SJakub Kicinski 	if (WARN_ON(!repr->port))
953238b250SJakub Kicinski 		return;
963238b250SJakub Kicinski 
973238b250SJakub Kicinski 	switch (repr->port->type) {
983238b250SJakub Kicinski 	case NFP_PORT_PHYS_PORT:
997344bea1SJakub Kicinski 		if (!__nfp_port_get_eth_port(repr->port))
100eadfa4c3SSimon Horman 			break;
1017344bea1SJakub Kicinski 		nfp_repr_phy_port_get_stats64(repr->port, stats);
102eadfa4c3SSimon Horman 		break;
1033238b250SJakub Kicinski 	case NFP_PORT_PF_PORT:
1043238b250SJakub Kicinski 	case NFP_PORT_VF_PORT:
105ef0ec676SJakub Kicinski 		nfp_repr_vnic_get_stats64(repr->port, stats);
106ebd04762SGustavo A. R. Silva 		break;
107eadfa4c3SSimon Horman 	default:
108eadfa4c3SSimon Horman 		break;
109eadfa4c3SSimon Horman 	}
110eadfa4c3SSimon Horman }
111eadfa4c3SSimon Horman 
1125d7c64a7SJakub Kicinski static bool
nfp_repr_has_offload_stats(const struct net_device * dev,int attr_id)113eadfa4c3SSimon Horman nfp_repr_has_offload_stats(const struct net_device *dev, int attr_id)
114eadfa4c3SSimon Horman {
115eadfa4c3SSimon Horman 	switch (attr_id) {
116eadfa4c3SSimon Horman 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
117eadfa4c3SSimon Horman 		return true;
118eadfa4c3SSimon Horman 	}
119eadfa4c3SSimon Horman 
120eadfa4c3SSimon Horman 	return false;
121eadfa4c3SSimon Horman }
122eadfa4c3SSimon Horman 
123eadfa4c3SSimon Horman static int
nfp_repr_get_host_stats64(const struct net_device * netdev,struct rtnl_link_stats64 * stats)124eadfa4c3SSimon Horman nfp_repr_get_host_stats64(const struct net_device *netdev,
125eadfa4c3SSimon Horman 			  struct rtnl_link_stats64 *stats)
126eadfa4c3SSimon Horman {
127eadfa4c3SSimon Horman 	struct nfp_repr *repr = netdev_priv(netdev);
128eadfa4c3SSimon Horman 	int i;
129eadfa4c3SSimon Horman 
130eadfa4c3SSimon Horman 	for_each_possible_cpu(i) {
131eadfa4c3SSimon Horman 		u64 tbytes, tpkts, tdrops, rbytes, rpkts;
132eadfa4c3SSimon Horman 		struct nfp_repr_pcpu_stats *repr_stats;
133eadfa4c3SSimon Horman 		unsigned int start;
134eadfa4c3SSimon Horman 
135eadfa4c3SSimon Horman 		repr_stats = per_cpu_ptr(repr->stats, i);
136eadfa4c3SSimon Horman 		do {
137*068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&repr_stats->syncp);
138eadfa4c3SSimon Horman 			tbytes = repr_stats->tx_bytes;
139eadfa4c3SSimon Horman 			tpkts = repr_stats->tx_packets;
140eadfa4c3SSimon Horman 			tdrops = repr_stats->tx_drops;
141eadfa4c3SSimon Horman 			rbytes = repr_stats->rx_bytes;
142eadfa4c3SSimon Horman 			rpkts = repr_stats->rx_packets;
143*068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&repr_stats->syncp, start));
144eadfa4c3SSimon Horman 
145eadfa4c3SSimon Horman 		stats->tx_bytes += tbytes;
146eadfa4c3SSimon Horman 		stats->tx_packets += tpkts;
147eadfa4c3SSimon Horman 		stats->tx_dropped += tdrops;
148eadfa4c3SSimon Horman 		stats->rx_bytes += rbytes;
149eadfa4c3SSimon Horman 		stats->rx_packets += rpkts;
150eadfa4c3SSimon Horman 	}
151eadfa4c3SSimon Horman 
152eadfa4c3SSimon Horman 	return 0;
153eadfa4c3SSimon Horman }
154eadfa4c3SSimon Horman 
1555d7c64a7SJakub Kicinski static int
nfp_repr_get_offload_stats(int attr_id,const struct net_device * dev,void * stats)1565d7c64a7SJakub Kicinski nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
157eadfa4c3SSimon Horman 			   void *stats)
158eadfa4c3SSimon Horman {
159eadfa4c3SSimon Horman 	switch (attr_id) {
160eadfa4c3SSimon Horman 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
161eadfa4c3SSimon Horman 		return nfp_repr_get_host_stats64(dev, stats);
162eadfa4c3SSimon Horman 	}
163eadfa4c3SSimon Horman 
164eadfa4c3SSimon Horman 	return -EINVAL;
165eadfa4c3SSimon Horman }
166eadfa4c3SSimon Horman 
nfp_repr_change_mtu(struct net_device * netdev,int new_mtu)167ccbdc596SJakub Kicinski static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
168ccbdc596SJakub Kicinski {
169ccbdc596SJakub Kicinski 	struct nfp_repr *repr = netdev_priv(netdev);
170167cebefSJohn Hurley 	int err;
171ccbdc596SJakub Kicinski 
172167cebefSJohn Hurley 	err = nfp_app_check_mtu(repr->app, netdev, new_mtu);
173167cebefSJohn Hurley 	if (err)
174167cebefSJohn Hurley 		return err;
175167cebefSJohn Hurley 
176167cebefSJohn Hurley 	err = nfp_app_repr_change_mtu(repr->app, netdev, new_mtu);
177167cebefSJohn Hurley 	if (err)
178167cebefSJohn Hurley 		return err;
179167cebefSJohn Hurley 
180167cebefSJohn Hurley 	netdev->mtu = new_mtu;
181167cebefSJohn Hurley 
182167cebefSJohn Hurley 	return 0;
183ccbdc596SJakub Kicinski }
184ccbdc596SJakub Kicinski 
nfp_repr_xmit(struct sk_buff * skb,struct net_device * netdev)1855d7c64a7SJakub Kicinski static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
186eadfa4c3SSimon Horman {
187eadfa4c3SSimon Horman 	struct nfp_repr *repr = netdev_priv(netdev);
188eadfa4c3SSimon Horman 	unsigned int len = skb->len;
189eadfa4c3SSimon Horman 	int ret;
190eadfa4c3SSimon Horman 
191eadfa4c3SSimon Horman 	skb_dst_drop(skb);
192eadfa4c3SSimon Horman 	dst_hold((struct dst_entry *)repr->dst);
193eadfa4c3SSimon Horman 	skb_dst_set(skb, (struct dst_entry *)repr->dst);
194eadfa4c3SSimon Horman 	skb->dev = repr->dst->u.port_info.lower_dev;
195eadfa4c3SSimon Horman 
196eadfa4c3SSimon Horman 	ret = dev_queue_xmit(skb);
197eadfa4c3SSimon Horman 	nfp_repr_inc_tx_stats(netdev, len, ret);
198eadfa4c3SSimon Horman 
199c8ba5b91SJakub Kicinski 	return NETDEV_TX_OK;
200eadfa4c3SSimon Horman }
201eadfa4c3SSimon Horman 
nfp_repr_stop(struct net_device * netdev)2025d7c64a7SJakub Kicinski static int nfp_repr_stop(struct net_device *netdev)
2035d7c64a7SJakub Kicinski {
2045d7c64a7SJakub Kicinski 	struct nfp_repr *repr = netdev_priv(netdev);
205447e9ebfSDirk van der Merwe 	int err;
2065d7c64a7SJakub Kicinski 
207447e9ebfSDirk van der Merwe 	err = nfp_app_repr_stop(repr->app, repr);
208447e9ebfSDirk van der Merwe 	if (err)
209447e9ebfSDirk van der Merwe 		return err;
210447e9ebfSDirk van der Merwe 
211447e9ebfSDirk van der Merwe 	nfp_port_configure(netdev, false);
212447e9ebfSDirk van der Merwe 	return 0;
2135d7c64a7SJakub Kicinski }
2145d7c64a7SJakub Kicinski 
nfp_repr_open(struct net_device * netdev)2155d7c64a7SJakub Kicinski static int nfp_repr_open(struct net_device *netdev)
2165d7c64a7SJakub Kicinski {
2175d7c64a7SJakub Kicinski 	struct nfp_repr *repr = netdev_priv(netdev);
218447e9ebfSDirk van der Merwe 	int err;
2195d7c64a7SJakub Kicinski 
220447e9ebfSDirk van der Merwe 	err = nfp_port_configure(netdev, true);
221447e9ebfSDirk van der Merwe 	if (err)
222447e9ebfSDirk van der Merwe 		return err;
223447e9ebfSDirk van der Merwe 
224447e9ebfSDirk van der Merwe 	err = nfp_app_repr_open(repr->app, repr);
225447e9ebfSDirk van der Merwe 	if (err)
226447e9ebfSDirk van der Merwe 		goto err_port_disable;
227447e9ebfSDirk van der Merwe 
228447e9ebfSDirk van der Merwe 	return 0;
229447e9ebfSDirk van der Merwe 
230447e9ebfSDirk van der Merwe err_port_disable:
231447e9ebfSDirk van der Merwe 	nfp_port_configure(netdev, false);
232447e9ebfSDirk van der Merwe 	return err;
2335d7c64a7SJakub Kicinski }
2345d7c64a7SJakub Kicinski 
23551a6588eSJakub Kicinski static netdev_features_t
nfp_repr_fix_features(struct net_device * netdev,netdev_features_t features)23651a6588eSJakub Kicinski nfp_repr_fix_features(struct net_device *netdev, netdev_features_t features)
23751a6588eSJakub Kicinski {
23851a6588eSJakub Kicinski 	struct nfp_repr *repr = netdev_priv(netdev);
23951a6588eSJakub Kicinski 	netdev_features_t old_features = features;
24051a6588eSJakub Kicinski 	netdev_features_t lower_features;
24151a6588eSJakub Kicinski 	struct net_device *lower_dev;
24251a6588eSJakub Kicinski 
24351a6588eSJakub Kicinski 	lower_dev = repr->dst->u.port_info.lower_dev;
24451a6588eSJakub Kicinski 
24551a6588eSJakub Kicinski 	lower_features = lower_dev->features;
24651a6588eSJakub Kicinski 	if (lower_features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
24751a6588eSJakub Kicinski 		lower_features |= NETIF_F_HW_CSUM;
24851a6588eSJakub Kicinski 
24951a6588eSJakub Kicinski 	features = netdev_intersect_features(features, lower_features);
25051a6588eSJakub Kicinski 	features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_HW_TC);
25151a6588eSJakub Kicinski 	features |= NETIF_F_LLTX;
25251a6588eSJakub Kicinski 
25351a6588eSJakub Kicinski 	return features;
25451a6588eSJakub Kicinski }
25551a6588eSJakub Kicinski 
25639ae7eb6SJakub Kicinski const struct net_device_ops nfp_repr_netdev_ops = {
2574612bebfSJakub Kicinski 	.ndo_init		= nfp_app_ndo_init,
2584612bebfSJakub Kicinski 	.ndo_uninit		= nfp_app_ndo_uninit,
2595d7c64a7SJakub Kicinski 	.ndo_open		= nfp_repr_open,
2605d7c64a7SJakub Kicinski 	.ndo_stop		= nfp_repr_stop,
2615d7c64a7SJakub Kicinski 	.ndo_start_xmit		= nfp_repr_xmit,
262ccbdc596SJakub Kicinski 	.ndo_change_mtu		= nfp_repr_change_mtu,
2635d7c64a7SJakub Kicinski 	.ndo_get_stats64	= nfp_repr_get_stats64,
2645d7c64a7SJakub Kicinski 	.ndo_has_offload_stats	= nfp_repr_has_offload_stats,
2655d7c64a7SJakub Kicinski 	.ndo_get_offload_stats	= nfp_repr_get_offload_stats,
266168c478eSJakub Kicinski 	.ndo_get_phys_port_name	= nfp_port_get_phys_port_name,
2678a276873SPieter Jansen van Vuuren 	.ndo_setup_tc		= nfp_port_setup_tc,
2686abd224bSSimon Horman 	.ndo_set_vf_mac		= nfp_app_set_vf_mac,
2696abd224bSSimon Horman 	.ndo_set_vf_vlan	= nfp_app_set_vf_vlan,
2706abd224bSSimon Horman 	.ndo_set_vf_spoofchk	= nfp_app_set_vf_spoofchk,
2714ef6cbe8SPablo Cascón 	.ndo_set_vf_trust	= nfp_app_set_vf_trust,
2726abd224bSSimon Horman 	.ndo_get_vf_config	= nfp_app_get_vf_config,
2736abd224bSSimon Horman 	.ndo_set_vf_link_state	= nfp_app_set_vf_link_state,
27451a6588eSJakub Kicinski 	.ndo_fix_features	= nfp_repr_fix_features,
275d692403eSJakub Kicinski 	.ndo_set_features	= nfp_port_set_features,
27624f132e2SJohn Hurley 	.ndo_set_mac_address    = eth_mac_addr,
2771e966763SPieter Jansen van Vuuren 	.ndo_get_port_parent_id	= nfp_port_get_port_parent_id,
2785d7c64a7SJakub Kicinski };
2795d7c64a7SJakub Kicinski 
28051a6588eSJakub Kicinski void
nfp_repr_transfer_features(struct net_device * netdev,struct net_device * lower)28151a6588eSJakub Kicinski nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower)
28251a6588eSJakub Kicinski {
28351a6588eSJakub Kicinski 	struct nfp_repr *repr = netdev_priv(netdev);
28451a6588eSJakub Kicinski 
28551a6588eSJakub Kicinski 	if (repr->dst->u.port_info.lower_dev != lower)
28651a6588eSJakub Kicinski 		return;
28751a6588eSJakub Kicinski 
2886df6398fSJakub Kicinski 	netif_inherit_tso_max(netdev, lower);
28951a6588eSJakub Kicinski 
29051a6588eSJakub Kicinski 	netdev_update_features(netdev);
29151a6588eSJakub Kicinski }
29251a6588eSJakub Kicinski 
nfp_repr_clean(struct nfp_repr * repr)2935de73ee4SSimon Horman static void nfp_repr_clean(struct nfp_repr *repr)
2945de73ee4SSimon Horman {
2955de73ee4SSimon Horman 	unregister_netdev(repr->netdev);
2961a24d4f9SJohn Hurley 	nfp_app_repr_clean(repr->app, repr->netdev);
2975de73ee4SSimon Horman 	dst_release((struct dst_entry *)repr->dst);
2985de73ee4SSimon Horman 	nfp_port_free(repr->port);
2995de73ee4SSimon Horman }
3005de73ee4SSimon Horman 
3011a33e10eSCong Wang static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
3021a33e10eSCong Wang 
nfp_repr_set_lockdep_class_one(struct net_device * dev,struct netdev_queue * txq,void * _unused)3031a33e10eSCong Wang static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
3041a33e10eSCong Wang 					   struct netdev_queue *txq,
3051a33e10eSCong Wang 					   void *_unused)
3061a33e10eSCong Wang {
3071a33e10eSCong Wang 	lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
3081a33e10eSCong Wang }
3091a33e10eSCong Wang 
nfp_repr_set_lockdep_class(struct net_device * dev)3101a33e10eSCong Wang static void nfp_repr_set_lockdep_class(struct net_device *dev)
3111a33e10eSCong Wang {
3121a33e10eSCong Wang 	netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
3131a33e10eSCong Wang }
3141a33e10eSCong Wang 
nfp_repr_init(struct nfp_app * app,struct net_device * netdev,u32 cmsg_port_id,struct nfp_port * port,struct net_device * pf_netdev)3155de73ee4SSimon Horman int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
3165d7c64a7SJakub Kicinski 		  u32 cmsg_port_id, struct nfp_port *port,
3175d7c64a7SJakub Kicinski 		  struct net_device *pf_netdev)
3185de73ee4SSimon Horman {
3195de73ee4SSimon Horman 	struct nfp_repr *repr = netdev_priv(netdev);
32051a6588eSJakub Kicinski 	struct nfp_net *nn = netdev_priv(pf_netdev);
32151a6588eSJakub Kicinski 	u32 repr_cap = nn->tlv_caps.repr_cap;
3225de73ee4SSimon Horman 	int err;
3235de73ee4SSimon Horman 
3241a33e10eSCong Wang 	nfp_repr_set_lockdep_class(netdev);
3251a33e10eSCong Wang 
3265de73ee4SSimon Horman 	repr->port = port;
3275de73ee4SSimon Horman 	repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
3285de73ee4SSimon Horman 	if (!repr->dst)
3295de73ee4SSimon Horman 		return -ENOMEM;
3305de73ee4SSimon Horman 	repr->dst->u.port_info.port_id = cmsg_port_id;
3315de73ee4SSimon Horman 	repr->dst->u.port_info.lower_dev = pf_netdev;
3325de73ee4SSimon Horman 
3335d7c64a7SJakub Kicinski 	netdev->netdev_ops = &nfp_repr_netdev_ops;
33406726f30SJakub Kicinski 	netdev->ethtool_ops = &nfp_port_ethtool_ops;
33506726f30SJakub Kicinski 
336743ba5b4SDirk van der Merwe 	netdev->max_mtu = pf_netdev->max_mtu;
337743ba5b4SDirk van der Merwe 
33851a6588eSJakub Kicinski 	/* Set features the lower device can support with representors */
33951a6588eSJakub Kicinski 	if (repr_cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
34051a6588eSJakub Kicinski 		netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
34151a6588eSJakub Kicinski 
34251a6588eSJakub Kicinski 	netdev->hw_features = NETIF_F_HIGHDMA;
34351a6588eSJakub Kicinski 	if (repr_cap & NFP_NET_CFG_CTRL_RXCSUM_ANY)
34451a6588eSJakub Kicinski 		netdev->hw_features |= NETIF_F_RXCSUM;
34551a6588eSJakub Kicinski 	if (repr_cap & NFP_NET_CFG_CTRL_TXCSUM)
34651a6588eSJakub Kicinski 		netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
34751a6588eSJakub Kicinski 	if (repr_cap & NFP_NET_CFG_CTRL_GATHER)
34851a6588eSJakub Kicinski 		netdev->hw_features |= NETIF_F_SG;
34951a6588eSJakub Kicinski 	if ((repr_cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
35051a6588eSJakub Kicinski 	    repr_cap & NFP_NET_CFG_CTRL_LSO2)
35151a6588eSJakub Kicinski 		netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
35251a6588eSJakub Kicinski 	if (repr_cap & NFP_NET_CFG_CTRL_RSS_ANY)
35351a6588eSJakub Kicinski 		netdev->hw_features |= NETIF_F_RXHASH;
35451a6588eSJakub Kicinski 	if (repr_cap & NFP_NET_CFG_CTRL_VXLAN) {
35551a6588eSJakub Kicinski 		if (repr_cap & NFP_NET_CFG_CTRL_LSO)
35651a6588eSJakub Kicinski 			netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
35751a6588eSJakub Kicinski 	}
35851a6588eSJakub Kicinski 	if (repr_cap & NFP_NET_CFG_CTRL_NVGRE) {
35951a6588eSJakub Kicinski 		if (repr_cap & NFP_NET_CFG_CTRL_LSO)
36051a6588eSJakub Kicinski 			netdev->hw_features |= NETIF_F_GSO_GRE;
36151a6588eSJakub Kicinski 	}
36251a6588eSJakub Kicinski 	if (repr_cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
36351a6588eSJakub Kicinski 		netdev->hw_enc_features = netdev->hw_features;
36451a6588eSJakub Kicinski 
36551a6588eSJakub Kicinski 	netdev->vlan_features = netdev->hw_features;
36651a6588eSJakub Kicinski 
36767d2656bSDiana Wang 	if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN_ANY)
36851a6588eSJakub Kicinski 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
369d80702ffSDiana Wang 	if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
37051a6588eSJakub Kicinski 		if (repr_cap & NFP_NET_CFG_CTRL_LSO2)
37151a6588eSJakub Kicinski 			netdev_warn(netdev, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
37251a6588eSJakub Kicinski 		else
37351a6588eSJakub Kicinski 			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
37451a6588eSJakub Kicinski 	}
37551a6588eSJakub Kicinski 	if (repr_cap & NFP_NET_CFG_CTRL_CTAG_FILTER)
37651a6588eSJakub Kicinski 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
37767d2656bSDiana Wang 	if (repr_cap & NFP_NET_CFG_CTRL_RXQINQ)
37867d2656bSDiana Wang 		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
37951a6588eSJakub Kicinski 
38051a6588eSJakub Kicinski 	netdev->features = netdev->hw_features;
38151a6588eSJakub Kicinski 
3827de8b691SSimon Horman 	/* C-Tag strip and S-Tag strip can't be supported simultaneously,
38367d2656bSDiana Wang 	 * so enable C-Tag strip and disable S-Tag strip by default.
38467d2656bSDiana Wang 	 */
3857de8b691SSimon Horman 	netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
386ee8b7a11SJakub Kicinski 	netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
38751a6588eSJakub Kicinski 
388c3e1f7ffSJakub Kicinski 	netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
3899db8bbcbSJakub Kicinski 	netdev->features |= NETIF_F_LLTX;
3909db8bbcbSJakub Kicinski 
3918a276873SPieter Jansen van Vuuren 	if (nfp_app_has_tc(app)) {
3928a276873SPieter Jansen van Vuuren 		netdev->features |= NETIF_F_HW_TC;
3938a276873SPieter Jansen van Vuuren 		netdev->hw_features |= NETIF_F_HW_TC;
3948a276873SPieter Jansen van Vuuren 	}
3958a276873SPieter Jansen van Vuuren 
3961a24d4f9SJohn Hurley 	err = nfp_app_repr_init(app, netdev);
3975de73ee4SSimon Horman 	if (err)
3985de73ee4SSimon Horman 		goto err_clean;
3995de73ee4SSimon Horman 
4001a24d4f9SJohn Hurley 	err = register_netdev(netdev);
4011a24d4f9SJohn Hurley 	if (err)
4021a24d4f9SJohn Hurley 		goto err_repr_clean;
4031a24d4f9SJohn Hurley 
4045de73ee4SSimon Horman 	return 0;
4055de73ee4SSimon Horman 
4061a24d4f9SJohn Hurley err_repr_clean:
4071a24d4f9SJohn Hurley 	nfp_app_repr_clean(app, netdev);
4085de73ee4SSimon Horman err_clean:
4095de73ee4SSimon Horman 	dst_release((struct dst_entry *)repr->dst);
4105de73ee4SSimon Horman 	return err;
4115de73ee4SSimon Horman }
4125de73ee4SSimon Horman 
__nfp_repr_free(struct nfp_repr * repr)4133b734ff6SJiri Pirko static void __nfp_repr_free(struct nfp_repr *repr)
414eadfa4c3SSimon Horman {
415eadfa4c3SSimon Horman 	free_percpu(repr->stats);
416eadfa4c3SSimon Horman 	free_netdev(repr->netdev);
417eadfa4c3SSimon Horman }
418eadfa4c3SSimon Horman 
nfp_repr_free(struct net_device * netdev)4193b734ff6SJiri Pirko void nfp_repr_free(struct net_device *netdev)
4203b734ff6SJiri Pirko {
4213b734ff6SJiri Pirko 	__nfp_repr_free(netdev_priv(netdev));
4223b734ff6SJiri Pirko }
4233b734ff6SJiri Pirko 
4242ef3c253SJakub Kicinski struct net_device *
nfp_repr_alloc_mqs(struct nfp_app * app,unsigned int txqs,unsigned int rxqs)4252ef3c253SJakub Kicinski nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs)
4265de73ee4SSimon Horman {
4275de73ee4SSimon Horman 	struct net_device *netdev;
4285de73ee4SSimon Horman 	struct nfp_repr *repr;
4295de73ee4SSimon Horman 
4302ef3c253SJakub Kicinski 	netdev = alloc_etherdev_mqs(sizeof(*repr), txqs, rxqs);
4315de73ee4SSimon Horman 	if (!netdev)
4325de73ee4SSimon Horman 		return NULL;
4335de73ee4SSimon Horman 
434c6d20ab4SDirk van der Merwe 	netif_carrier_off(netdev);
435c6d20ab4SDirk van der Merwe 
4365de73ee4SSimon Horman 	repr = netdev_priv(netdev);
4375de73ee4SSimon Horman 	repr->netdev = netdev;
4385de73ee4SSimon Horman 	repr->app = app;
4395de73ee4SSimon Horman 
440eadfa4c3SSimon Horman 	repr->stats = netdev_alloc_pcpu_stats(struct nfp_repr_pcpu_stats);
441eadfa4c3SSimon Horman 	if (!repr->stats)
442eadfa4c3SSimon Horman 		goto err_free_netdev;
443eadfa4c3SSimon Horman 
4445de73ee4SSimon Horman 	return netdev;
445eadfa4c3SSimon Horman 
446eadfa4c3SSimon Horman err_free_netdev:
447eadfa4c3SSimon Horman 	free_netdev(netdev);
448eadfa4c3SSimon Horman 	return NULL;
4495de73ee4SSimon Horman }
4505de73ee4SSimon Horman 
nfp_repr_clean_and_free(struct nfp_repr * repr)451d05d902eSJakub Kicinski void nfp_repr_clean_and_free(struct nfp_repr *repr)
4525de73ee4SSimon Horman {
4535de73ee4SSimon Horman 	nfp_info(repr->app->cpp, "Destroying Representor(%s)\n",
4545de73ee4SSimon Horman 		 repr->netdev->name);
4555de73ee4SSimon Horman 	nfp_repr_clean(repr);
4563b734ff6SJiri Pirko 	__nfp_repr_free(repr);
4575de73ee4SSimon Horman }
4585de73ee4SSimon Horman 
nfp_reprs_clean_and_free(struct nfp_app * app,struct nfp_reprs * reprs)4593eb47dfcSJakub Kicinski void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs)
4605de73ee4SSimon Horman {
4613eb47dfcSJakub Kicinski 	struct net_device *netdev;
4625de73ee4SSimon Horman 	unsigned int i;
4635de73ee4SSimon Horman 
4643eb47dfcSJakub Kicinski 	for (i = 0; i < reprs->num_reprs; i++) {
4653eb47dfcSJakub Kicinski 		netdev = nfp_repr_get_locked(app, reprs, i);
4663eb47dfcSJakub Kicinski 		if (netdev)
4673eb47dfcSJakub Kicinski 			nfp_repr_clean_and_free(netdev_priv(netdev));
4683eb47dfcSJakub Kicinski 	}
4695de73ee4SSimon Horman 
4705de73ee4SSimon Horman 	kfree(reprs);
4715de73ee4SSimon Horman }
4725de73ee4SSimon Horman 
4735de73ee4SSimon Horman void
nfp_reprs_clean_and_free_by_type(struct nfp_app * app,enum nfp_repr_type type)4743eb47dfcSJakub Kicinski nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
4755de73ee4SSimon Horman {
4763eb47dfcSJakub Kicinski 	struct net_device *netdev;
4775de73ee4SSimon Horman 	struct nfp_reprs *reprs;
4780f084791SDirk van der Merwe 	int i;
4795de73ee4SSimon Horman 
4800f084791SDirk van der Merwe 	reprs = rcu_dereference_protected(app->reprs[type],
4818a38f2ccSJakub Kicinski 					  nfp_app_is_locked(app));
4825de73ee4SSimon Horman 	if (!reprs)
4835de73ee4SSimon Horman 		return;
4845de73ee4SSimon Horman 
4850f084791SDirk van der Merwe 	/* Preclean must happen before we remove the reprs reference from the
4860f084791SDirk van der Merwe 	 * app below.
4870f084791SDirk van der Merwe 	 */
4883eb47dfcSJakub Kicinski 	for (i = 0; i < reprs->num_reprs; i++) {
4893eb47dfcSJakub Kicinski 		netdev = nfp_repr_get_locked(app, reprs, i);
4903eb47dfcSJakub Kicinski 		if (netdev)
4913eb47dfcSJakub Kicinski 			nfp_app_repr_preclean(app, netdev);
4923eb47dfcSJakub Kicinski 	}
4930f084791SDirk van der Merwe 
4940f084791SDirk van der Merwe 	reprs = nfp_app_reprs_set(app, type, NULL);
4950f084791SDirk van der Merwe 
4965de73ee4SSimon Horman 	synchronize_rcu();
4973eb47dfcSJakub Kicinski 	nfp_reprs_clean_and_free(app, reprs);
4985de73ee4SSimon Horman }
4995de73ee4SSimon Horman 
nfp_reprs_alloc(unsigned int num_reprs)5005de73ee4SSimon Horman struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
5015de73ee4SSimon Horman {
5025de73ee4SSimon Horman 	struct nfp_reprs *reprs;
5035de73ee4SSimon Horman 
5049eb4c320SLen Baker 	reprs = kzalloc(struct_size(reprs, reprs, num_reprs), GFP_KERNEL);
5055de73ee4SSimon Horman 	if (!reprs)
5065de73ee4SSimon Horman 		return NULL;
5075de73ee4SSimon Horman 	reprs->num_reprs = num_reprs;
5085de73ee4SSimon Horman 
5095de73ee4SSimon Horman 	return reprs;
5105de73ee4SSimon Horman }
5115fa27d59SDirk van der Merwe 
nfp_reprs_resync_phys_ports(struct nfp_app * app)5125fa27d59SDirk van der Merwe int nfp_reprs_resync_phys_ports(struct nfp_app *app)
5135fa27d59SDirk van der Merwe {
5143eb47dfcSJakub Kicinski 	struct net_device *netdev;
5153eb47dfcSJakub Kicinski 	struct nfp_reprs *reprs;
5165fa27d59SDirk van der Merwe 	struct nfp_repr *repr;
5175fa27d59SDirk van der Merwe 	int i;
5185fa27d59SDirk van der Merwe 
5193eb47dfcSJakub Kicinski 	reprs = nfp_reprs_get_locked(app, NFP_REPR_TYPE_PHYS_PORT);
5203eb47dfcSJakub Kicinski 	if (!reprs)
5215fa27d59SDirk van der Merwe 		return 0;
5225fa27d59SDirk van der Merwe 
5233eb47dfcSJakub Kicinski 	for (i = 0; i < reprs->num_reprs; i++) {
5243eb47dfcSJakub Kicinski 		netdev = nfp_repr_get_locked(app, reprs, i);
5253eb47dfcSJakub Kicinski 		if (!netdev)
5265fa27d59SDirk van der Merwe 			continue;
5275fa27d59SDirk van der Merwe 
5283eb47dfcSJakub Kicinski 		repr = netdev_priv(netdev);
5295fa27d59SDirk van der Merwe 		if (repr->port->type != NFP_PORT_INVALID)
5305fa27d59SDirk van der Merwe 			continue;
5315fa27d59SDirk van der Merwe 
5323eb47dfcSJakub Kicinski 		nfp_app_repr_preclean(app, netdev);
53371844facSJakub Kicinski 		rtnl_lock();
5343eb47dfcSJakub Kicinski 		rcu_assign_pointer(reprs->reprs[i], NULL);
53571844facSJakub Kicinski 		rtnl_unlock();
5363eb47dfcSJakub Kicinski 		synchronize_rcu();
5375fa27d59SDirk van der Merwe 		nfp_repr_clean(repr);
5385fa27d59SDirk van der Merwe 	}
5395fa27d59SDirk van der Merwe 
5405fa27d59SDirk van der Merwe 	return 0;
5415fa27d59SDirk van der Merwe }
542