1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2019 Solarflare Communications Inc.
5  * Copyright 2020-2022 Xilinx Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation, incorporated herein by reference.
10  */
11 
12 #include "ef100_rep.h"
13 #include "ef100_netdev.h"
14 #include "ef100_nic.h"
15 #include "mae.h"
16 #include "rx_common.h"
17 
18 #define EFX_EF100_REP_DRIVER	"efx_ef100_rep"
19 
20 #define EFX_REP_DEFAULT_PSEUDO_RING_SIZE	64
21 
22 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
23 
24 static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
25 				     unsigned int i)
26 {
27 	efv->parent = efx;
28 	efv->idx = i;
29 	INIT_LIST_HEAD(&efv->list);
30 	efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
31 	INIT_LIST_HEAD(&efv->dflt.acts.list);
32 	INIT_LIST_HEAD(&efv->rx_list);
33 	spin_lock_init(&efv->rx_lock);
34 	efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
35 			  NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
36 			  NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
37 			  NETIF_MSG_TX_ERR | NETIF_MSG_HW;
38 	return 0;
39 }
40 
41 static int efx_ef100_rep_open(struct net_device *net_dev)
42 {
43 	struct efx_rep *efv = netdev_priv(net_dev);
44 
45 	netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
46 		       NAPI_POLL_WEIGHT);
47 	napi_enable(&efv->napi);
48 	return 0;
49 }
50 
51 static int efx_ef100_rep_close(struct net_device *net_dev)
52 {
53 	struct efx_rep *efv = netdev_priv(net_dev);
54 
55 	napi_disable(&efv->napi);
56 	netif_napi_del(&efv->napi);
57 	return 0;
58 }
59 
60 static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
61 				      struct net_device *dev)
62 {
63 	struct efx_rep *efv = netdev_priv(dev);
64 	struct efx_nic *efx = efv->parent;
65 	netdev_tx_t rc;
66 
67 	/* __ef100_hard_start_xmit() will always return success even in the
68 	 * case of TX drops, where it will increment efx's tx_dropped.  The
69 	 * efv stats really only count attempted TX, not success/failure.
70 	 */
71 	atomic64_inc(&efv->stats.tx_packets);
72 	atomic64_add(skb->len, &efv->stats.tx_bytes);
73 	netif_tx_lock(efx->net_dev);
74 	rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
75 	netif_tx_unlock(efx->net_dev);
76 	return rc;
77 }
78 
79 static int efx_ef100_rep_get_port_parent_id(struct net_device *dev,
80 					    struct netdev_phys_item_id *ppid)
81 {
82 	struct efx_rep *efv = netdev_priv(dev);
83 	struct efx_nic *efx = efv->parent;
84 	struct ef100_nic_data *nic_data;
85 
86 	nic_data = efx->nic_data;
87 	/* nic_data->port_id is a u8[] */
88 	ppid->id_len = sizeof(nic_data->port_id);
89 	memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id));
90 	return 0;
91 }
92 
93 static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
94 					    char *buf, size_t len)
95 {
96 	struct efx_rep *efv = netdev_priv(dev);
97 	struct efx_nic *efx = efv->parent;
98 	struct ef100_nic_data *nic_data;
99 	int ret;
100 
101 	nic_data = efx->nic_data;
102 	ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num,
103 		       nic_data->pf_index, efv->idx);
104 	if (ret >= len)
105 		return -EOPNOTSUPP;
106 
107 	return 0;
108 }
109 
110 static void efx_ef100_rep_get_stats64(struct net_device *dev,
111 				      struct rtnl_link_stats64 *stats)
112 {
113 	struct efx_rep *efv = netdev_priv(dev);
114 
115 	stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
116 	stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
117 	stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
118 	stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
119 	stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
120 	stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
121 }
122 
123 static const struct net_device_ops efx_ef100_rep_netdev_ops = {
124 	.ndo_open		= efx_ef100_rep_open,
125 	.ndo_stop		= efx_ef100_rep_close,
126 	.ndo_start_xmit		= efx_ef100_rep_xmit,
127 	.ndo_get_port_parent_id	= efx_ef100_rep_get_port_parent_id,
128 	.ndo_get_phys_port_name	= efx_ef100_rep_get_phys_port_name,
129 	.ndo_get_stats64	= efx_ef100_rep_get_stats64,
130 };
131 
132 static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
133 				      struct ethtool_drvinfo *drvinfo)
134 {
135 	strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver));
136 }
137 
138 static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev)
139 {
140 	struct efx_rep *efv = netdev_priv(net_dev);
141 
142 	return efv->msg_enable;
143 }
144 
145 static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
146 					       u32 msg_enable)
147 {
148 	struct efx_rep *efv = netdev_priv(net_dev);
149 
150 	efv->msg_enable = msg_enable;
151 }
152 
153 static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
154 						struct ethtool_ringparam *ring,
155 						struct kernel_ethtool_ringparam *kring,
156 						struct netlink_ext_ack *ext_ack)
157 {
158 	struct efx_rep *efv = netdev_priv(net_dev);
159 
160 	ring->rx_max_pending = U32_MAX;
161 	ring->rx_pending = efv->rx_pring_size;
162 }
163 
164 static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
165 					       struct ethtool_ringparam *ring,
166 					       struct kernel_ethtool_ringparam *kring,
167 					       struct netlink_ext_ack *ext_ack)
168 {
169 	struct efx_rep *efv = netdev_priv(net_dev);
170 
171 	if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
172 		return -EINVAL;
173 
174 	efv->rx_pring_size = ring->rx_pending;
175 	return 0;
176 }
177 
178 static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
179 	.get_drvinfo		= efx_ef100_rep_get_drvinfo,
180 	.get_msglevel		= efx_ef100_rep_ethtool_get_msglevel,
181 	.set_msglevel		= efx_ef100_rep_ethtool_set_msglevel,
182 	.get_ringparam		= efx_ef100_rep_ethtool_get_ringparam,
183 	.set_ringparam		= efx_ef100_rep_ethtool_set_ringparam,
184 };
185 
186 static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
187 						   unsigned int i)
188 {
189 	struct net_device *net_dev;
190 	struct efx_rep *efv;
191 	int rc;
192 
193 	net_dev = alloc_etherdev_mq(sizeof(*efv), 1);
194 	if (!net_dev)
195 		return ERR_PTR(-ENOMEM);
196 
197 	efv = netdev_priv(net_dev);
198 	rc = efx_ef100_rep_init_struct(efx, efv, i);
199 	if (rc)
200 		goto fail1;
201 	efv->net_dev = net_dev;
202 	rtnl_lock();
203 	spin_lock_bh(&efx->vf_reps_lock);
204 	list_add_tail(&efv->list, &efx->vf_reps);
205 	spin_unlock_bh(&efx->vf_reps_lock);
206 	if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) {
207 		netif_device_attach(net_dev);
208 		netif_carrier_on(net_dev);
209 	} else {
210 		netif_carrier_off(net_dev);
211 		netif_tx_stop_all_queues(net_dev);
212 	}
213 	rtnl_unlock();
214 
215 	net_dev->netdev_ops = &efx_ef100_rep_netdev_ops;
216 	net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
217 	net_dev->min_mtu = EFX_MIN_MTU;
218 	net_dev->max_mtu = EFX_MAX_MTU;
219 	net_dev->features |= NETIF_F_LLTX;
220 	net_dev->hw_features |= NETIF_F_LLTX;
221 	return efv;
222 fail1:
223 	free_netdev(net_dev);
224 	return ERR_PTR(rc);
225 }
226 
227 static int efx_ef100_configure_rep(struct efx_rep *efv)
228 {
229 	struct efx_nic *efx = efv->parent;
230 	u32 selector;
231 	int rc;
232 
233 	efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
234 	/* Construct mport selector for corresponding VF */
235 	efx_mae_mport_vf(efx, efv->idx, &selector);
236 	/* Look up actual mport ID */
237 	rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
238 	if (rc)
239 		return rc;
240 	pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
241 	/* mport label should fit in 16 bits */
242 	WARN_ON(efv->mport >> 16);
243 
244 	return efx_tc_configure_default_rule_rep(efv);
245 }
246 
247 static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
248 {
249 	struct efx_nic *efx = efv->parent;
250 
251 	efx_tc_deconfigure_default_rule(efx, &efv->dflt);
252 }
253 
254 static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
255 {
256 	struct efx_nic *efx = efv->parent;
257 
258 	rtnl_lock();
259 	spin_lock_bh(&efx->vf_reps_lock);
260 	list_del(&efv->list);
261 	spin_unlock_bh(&efx->vf_reps_lock);
262 	rtnl_unlock();
263 	synchronize_rcu();
264 	free_netdev(efv->net_dev);
265 }
266 
267 int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
268 {
269 	struct efx_rep *efv;
270 	int rc;
271 
272 	efv = efx_ef100_rep_create_netdev(efx, i);
273 	if (IS_ERR(efv)) {
274 		rc = PTR_ERR(efv);
275 		pci_err(efx->pci_dev,
276 			"Failed to create representor for VF %d, rc %d\n", i,
277 			rc);
278 		return rc;
279 	}
280 	rc = efx_ef100_configure_rep(efv);
281 	if (rc) {
282 		pci_err(efx->pci_dev,
283 			"Failed to configure representor for VF %d, rc %d\n",
284 			i, rc);
285 		goto fail1;
286 	}
287 	rc = register_netdev(efv->net_dev);
288 	if (rc) {
289 		pci_err(efx->pci_dev,
290 			"Failed to register representor for VF %d, rc %d\n",
291 			i, rc);
292 		goto fail2;
293 	}
294 	pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
295 		efv->net_dev->name);
296 	return 0;
297 fail2:
298 	efx_ef100_deconfigure_rep(efv);
299 fail1:
300 	efx_ef100_rep_destroy_netdev(efv);
301 	return rc;
302 }
303 
304 void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
305 {
306 	struct net_device *rep_dev;
307 
308 	rep_dev = efv->net_dev;
309 	if (!rep_dev)
310 		return;
311 	netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
312 	unregister_netdev(rep_dev);
313 	efx_ef100_deconfigure_rep(efv);
314 	efx_ef100_rep_destroy_netdev(efv);
315 }
316 
317 void efx_ef100_fini_vfreps(struct efx_nic *efx)
318 {
319 	struct ef100_nic_data *nic_data = efx->nic_data;
320 	struct efx_rep *efv, *next;
321 
322 	if (!nic_data->grp_mae)
323 		return;
324 
325 	list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
326 		efx_ef100_vfrep_destroy(efx, efv);
327 }
328 
329 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
330 {
331 	struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
332 	unsigned int read_index;
333 	struct list_head head;
334 	struct sk_buff *skb;
335 	bool need_resched;
336 	int spent = 0;
337 
338 	INIT_LIST_HEAD(&head);
339 	/* Grab up to 'weight' pending SKBs */
340 	spin_lock_bh(&efv->rx_lock);
341 	read_index = efv->write_index;
342 	while (spent < weight && !list_empty(&efv->rx_list)) {
343 		skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
344 		list_del(&skb->list);
345 		list_add_tail(&skb->list, &head);
346 		spent++;
347 	}
348 	spin_unlock_bh(&efv->rx_lock);
349 	/* Receive them */
350 	netif_receive_skb_list(&head);
351 	if (spent < weight)
352 		if (napi_complete_done(napi, spent)) {
353 			spin_lock_bh(&efv->rx_lock);
354 			efv->read_index = read_index;
355 			/* If write_index advanced while we were doing the
356 			 * RX, then storing our read_index won't re-prime the
357 			 * fake-interrupt.  In that case, we need to schedule
358 			 * NAPI again to consume the additional packet(s).
359 			 */
360 			need_resched = efv->write_index != read_index;
361 			spin_unlock_bh(&efv->rx_lock);
362 			if (need_resched)
363 				napi_schedule(&efv->napi);
364 		}
365 	return spent;
366 }
367 
368 void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
369 {
370 	u8 *eh = efx_rx_buf_va(rx_buf);
371 	struct sk_buff *skb;
372 	bool primed;
373 
374 	/* Don't allow too many queued SKBs to build up, as they consume
375 	 * GFP_ATOMIC memory.  If we overrun, just start dropping.
376 	 */
377 	if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
378 		atomic64_inc(&efv->stats.rx_dropped);
379 		if (net_ratelimit())
380 			netif_dbg(efv->parent, rx_err, efv->net_dev,
381 				  "nodesc-dropped packet of length %u\n",
382 				  rx_buf->len);
383 		return;
384 	}
385 
386 	skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
387 	if (!skb) {
388 		atomic64_inc(&efv->stats.rx_dropped);
389 		if (net_ratelimit())
390 			netif_dbg(efv->parent, rx_err, efv->net_dev,
391 				  "noskb-dropped packet of length %u\n",
392 				  rx_buf->len);
393 		return;
394 	}
395 	memcpy(skb->data, eh, rx_buf->len);
396 	__skb_put(skb, rx_buf->len);
397 
398 	skb_record_rx_queue(skb, 0); /* rep is single-queue */
399 
400 	/* Move past the ethernet header */
401 	skb->protocol = eth_type_trans(skb, efv->net_dev);
402 
403 	skb_checksum_none_assert(skb);
404 
405 	atomic64_inc(&efv->stats.rx_packets);
406 	atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
407 
408 	/* Add it to the rx list */
409 	spin_lock_bh(&efv->rx_lock);
410 	primed = efv->read_index == efv->write_index;
411 	list_add_tail(&skb->list, &efv->rx_list);
412 	efv->write_index++;
413 	spin_unlock_bh(&efv->rx_lock);
414 	/* Trigger rx work */
415 	if (primed)
416 		napi_schedule(&efv->napi);
417 }
418 
419 struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
420 {
421 	struct efx_rep *efv, *out = NULL;
422 
423 	/* spinlock guards against list mutation while we're walking it;
424 	 * but caller must also hold rcu_read_lock() to ensure the netdev
425 	 * isn't freed after we drop the spinlock.
426 	 */
427 	spin_lock_bh(&efx->vf_reps_lock);
428 	list_for_each_entry(efv, &efx->vf_reps, list)
429 		if (efv->mport == mport) {
430 			out = efv;
431 			break;
432 		}
433 	spin_unlock_bh(&efx->vf_reps_lock);
434 	return out;
435 }
436