1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2019 Solarflare Communications Inc.
5  * Copyright 2020-2022 Xilinx Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation, incorporated herein by reference.
10  */
11 
12 #include <linux/rhashtable.h>
13 #include "ef100_rep.h"
14 #include "ef100_netdev.h"
15 #include "ef100_nic.h"
16 #include "mae.h"
17 #include "rx_common.h"
18 #include "tc_bindings.h"
19 
20 #define EFX_EF100_REP_DRIVER	"efx_ef100_rep"
21 
22 #define EFX_REP_DEFAULT_PSEUDO_RING_SIZE	64
23 
24 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
25 
26 static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
27 				     unsigned int i)
28 {
29 	efv->parent = efx;
30 	efv->idx = i;
31 	INIT_LIST_HEAD(&efv->list);
32 	efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
33 	INIT_LIST_HEAD(&efv->dflt.acts.list);
34 	INIT_LIST_HEAD(&efv->rx_list);
35 	spin_lock_init(&efv->rx_lock);
36 	efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
37 			  NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
38 			  NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
39 			  NETIF_MSG_TX_ERR | NETIF_MSG_HW;
40 	return 0;
41 }
42 
43 static int efx_ef100_rep_open(struct net_device *net_dev)
44 {
45 	struct efx_rep *efv = netdev_priv(net_dev);
46 
47 	netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll);
48 	napi_enable(&efv->napi);
49 	return 0;
50 }
51 
52 static int efx_ef100_rep_close(struct net_device *net_dev)
53 {
54 	struct efx_rep *efv = netdev_priv(net_dev);
55 
56 	napi_disable(&efv->napi);
57 	netif_napi_del(&efv->napi);
58 	return 0;
59 }
60 
61 static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
62 				      struct net_device *dev)
63 {
64 	struct efx_rep *efv = netdev_priv(dev);
65 	struct efx_nic *efx = efv->parent;
66 	netdev_tx_t rc;
67 
68 	/* __ef100_hard_start_xmit() will always return success even in the
69 	 * case of TX drops, where it will increment efx's tx_dropped.  The
70 	 * efv stats really only count attempted TX, not success/failure.
71 	 */
72 	atomic64_inc(&efv->stats.tx_packets);
73 	atomic64_add(skb->len, &efv->stats.tx_bytes);
74 	netif_tx_lock(efx->net_dev);
75 	rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
76 	netif_tx_unlock(efx->net_dev);
77 	return rc;
78 }
79 
80 static int efx_ef100_rep_get_port_parent_id(struct net_device *dev,
81 					    struct netdev_phys_item_id *ppid)
82 {
83 	struct efx_rep *efv = netdev_priv(dev);
84 	struct efx_nic *efx = efv->parent;
85 	struct ef100_nic_data *nic_data;
86 
87 	nic_data = efx->nic_data;
88 	/* nic_data->port_id is a u8[] */
89 	ppid->id_len = sizeof(nic_data->port_id);
90 	memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id));
91 	return 0;
92 }
93 
94 static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
95 					    char *buf, size_t len)
96 {
97 	struct efx_rep *efv = netdev_priv(dev);
98 	struct efx_nic *efx = efv->parent;
99 	struct ef100_nic_data *nic_data;
100 	int ret;
101 
102 	nic_data = efx->nic_data;
103 	ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num,
104 		       nic_data->pf_index, efv->idx);
105 	if (ret >= len)
106 		return -EOPNOTSUPP;
107 
108 	return 0;
109 }
110 
111 static int efx_ef100_rep_setup_tc(struct net_device *net_dev,
112 				  enum tc_setup_type type, void *type_data)
113 {
114 	struct efx_rep *efv = netdev_priv(net_dev);
115 	struct efx_nic *efx = efv->parent;
116 
117 	if (type == TC_SETUP_CLSFLOWER)
118 		return efx_tc_flower(efx, net_dev, type_data, efv);
119 	if (type == TC_SETUP_BLOCK)
120 		return efx_tc_setup_block(net_dev, efx, type_data, efv);
121 
122 	return -EOPNOTSUPP;
123 }
124 
125 static void efx_ef100_rep_get_stats64(struct net_device *dev,
126 				      struct rtnl_link_stats64 *stats)
127 {
128 	struct efx_rep *efv = netdev_priv(dev);
129 
130 	stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
131 	stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
132 	stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
133 	stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
134 	stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
135 	stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
136 }
137 
138 const struct net_device_ops efx_ef100_rep_netdev_ops = {
139 	.ndo_open		= efx_ef100_rep_open,
140 	.ndo_stop		= efx_ef100_rep_close,
141 	.ndo_start_xmit		= efx_ef100_rep_xmit,
142 	.ndo_get_port_parent_id	= efx_ef100_rep_get_port_parent_id,
143 	.ndo_get_phys_port_name	= efx_ef100_rep_get_phys_port_name,
144 	.ndo_get_stats64	= efx_ef100_rep_get_stats64,
145 	.ndo_setup_tc		= efx_ef100_rep_setup_tc,
146 };
147 
148 static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
149 				      struct ethtool_drvinfo *drvinfo)
150 {
151 	strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver));
152 }
153 
154 static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev)
155 {
156 	struct efx_rep *efv = netdev_priv(net_dev);
157 
158 	return efv->msg_enable;
159 }
160 
161 static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
162 					       u32 msg_enable)
163 {
164 	struct efx_rep *efv = netdev_priv(net_dev);
165 
166 	efv->msg_enable = msg_enable;
167 }
168 
169 static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
170 						struct ethtool_ringparam *ring,
171 						struct kernel_ethtool_ringparam *kring,
172 						struct netlink_ext_ack *ext_ack)
173 {
174 	struct efx_rep *efv = netdev_priv(net_dev);
175 
176 	ring->rx_max_pending = U32_MAX;
177 	ring->rx_pending = efv->rx_pring_size;
178 }
179 
180 static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
181 					       struct ethtool_ringparam *ring,
182 					       struct kernel_ethtool_ringparam *kring,
183 					       struct netlink_ext_ack *ext_ack)
184 {
185 	struct efx_rep *efv = netdev_priv(net_dev);
186 
187 	if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
188 		return -EINVAL;
189 
190 	efv->rx_pring_size = ring->rx_pending;
191 	return 0;
192 }
193 
194 static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
195 	.get_drvinfo		= efx_ef100_rep_get_drvinfo,
196 	.get_msglevel		= efx_ef100_rep_ethtool_get_msglevel,
197 	.set_msglevel		= efx_ef100_rep_ethtool_set_msglevel,
198 	.get_ringparam		= efx_ef100_rep_ethtool_get_ringparam,
199 	.set_ringparam		= efx_ef100_rep_ethtool_set_ringparam,
200 };
201 
202 static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
203 						   unsigned int i)
204 {
205 	struct net_device *net_dev;
206 	struct efx_rep *efv;
207 	int rc;
208 
209 	net_dev = alloc_etherdev_mq(sizeof(*efv), 1);
210 	if (!net_dev)
211 		return ERR_PTR(-ENOMEM);
212 
213 	efv = netdev_priv(net_dev);
214 	rc = efx_ef100_rep_init_struct(efx, efv, i);
215 	if (rc)
216 		goto fail1;
217 	efv->net_dev = net_dev;
218 	rtnl_lock();
219 	spin_lock_bh(&efx->vf_reps_lock);
220 	list_add_tail(&efv->list, &efx->vf_reps);
221 	spin_unlock_bh(&efx->vf_reps_lock);
222 	if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) {
223 		netif_device_attach(net_dev);
224 		netif_carrier_on(net_dev);
225 	} else {
226 		netif_carrier_off(net_dev);
227 		netif_tx_stop_all_queues(net_dev);
228 	}
229 	rtnl_unlock();
230 
231 	net_dev->netdev_ops = &efx_ef100_rep_netdev_ops;
232 	net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
233 	net_dev->min_mtu = EFX_MIN_MTU;
234 	net_dev->max_mtu = EFX_MAX_MTU;
235 	net_dev->features |= NETIF_F_LLTX;
236 	net_dev->hw_features |= NETIF_F_LLTX;
237 	return efv;
238 fail1:
239 	free_netdev(net_dev);
240 	return ERR_PTR(rc);
241 }
242 
243 static int efx_ef100_configure_rep(struct efx_rep *efv)
244 {
245 	struct efx_nic *efx = efv->parent;
246 	u32 selector;
247 	int rc;
248 
249 	efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
250 	/* Construct mport selector for corresponding VF */
251 	efx_mae_mport_vf(efx, efv->idx, &selector);
252 	/* Look up actual mport ID */
253 	rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
254 	if (rc)
255 		return rc;
256 	pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
257 	/* mport label should fit in 16 bits */
258 	WARN_ON(efv->mport >> 16);
259 
260 	return efx_tc_configure_default_rule_rep(efv);
261 }
262 
263 static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
264 {
265 	struct efx_nic *efx = efv->parent;
266 
267 	efx_tc_deconfigure_default_rule(efx, &efv->dflt);
268 }
269 
270 static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
271 {
272 	struct efx_nic *efx = efv->parent;
273 
274 	rtnl_lock();
275 	spin_lock_bh(&efx->vf_reps_lock);
276 	list_del(&efv->list);
277 	spin_unlock_bh(&efx->vf_reps_lock);
278 	rtnl_unlock();
279 	synchronize_rcu();
280 	free_netdev(efv->net_dev);
281 }
282 
283 int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
284 {
285 	struct efx_rep *efv;
286 	int rc;
287 
288 	efv = efx_ef100_rep_create_netdev(efx, i);
289 	if (IS_ERR(efv)) {
290 		rc = PTR_ERR(efv);
291 		pci_err(efx->pci_dev,
292 			"Failed to create representor for VF %d, rc %d\n", i,
293 			rc);
294 		return rc;
295 	}
296 	rc = efx_ef100_configure_rep(efv);
297 	if (rc) {
298 		pci_err(efx->pci_dev,
299 			"Failed to configure representor for VF %d, rc %d\n",
300 			i, rc);
301 		goto fail1;
302 	}
303 	rc = register_netdev(efv->net_dev);
304 	if (rc) {
305 		pci_err(efx->pci_dev,
306 			"Failed to register representor for VF %d, rc %d\n",
307 			i, rc);
308 		goto fail2;
309 	}
310 	pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
311 		efv->net_dev->name);
312 	return 0;
313 fail2:
314 	efx_ef100_deconfigure_rep(efv);
315 fail1:
316 	efx_ef100_rep_destroy_netdev(efv);
317 	return rc;
318 }
319 
320 void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
321 {
322 	struct net_device *rep_dev;
323 
324 	rep_dev = efv->net_dev;
325 	if (!rep_dev)
326 		return;
327 	netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
328 	unregister_netdev(rep_dev);
329 	efx_ef100_deconfigure_rep(efv);
330 	efx_ef100_rep_destroy_netdev(efv);
331 }
332 
333 void efx_ef100_fini_vfreps(struct efx_nic *efx)
334 {
335 	struct ef100_nic_data *nic_data = efx->nic_data;
336 	struct efx_rep *efv, *next;
337 
338 	if (!nic_data->grp_mae)
339 		return;
340 
341 	list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
342 		efx_ef100_vfrep_destroy(efx, efv);
343 }
344 
345 void efx_ef100_init_reps(struct efx_nic *efx)
346 {
347 	struct ef100_nic_data *nic_data = efx->nic_data;
348 	int rc;
349 
350 	nic_data->have_local_intf = false;
351 	rc = efx_mae_enumerate_mports(efx);
352 	if (rc)
353 		pci_warn(efx->pci_dev,
354 			 "Could not enumerate mports (rc=%d), are we admin?",
355 			 rc);
356 }
357 
358 void efx_ef100_fini_reps(struct efx_nic *efx)
359 {
360 	struct efx_mae *mae = efx->mae;
361 
362 	rhashtable_free_and_destroy(&mae->mports_ht, efx_mae_remove_mport,
363 				    NULL);
364 }
365 
366 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
367 {
368 	struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
369 	unsigned int read_index;
370 	struct list_head head;
371 	struct sk_buff *skb;
372 	bool need_resched;
373 	int spent = 0;
374 
375 	INIT_LIST_HEAD(&head);
376 	/* Grab up to 'weight' pending SKBs */
377 	spin_lock_bh(&efv->rx_lock);
378 	read_index = efv->write_index;
379 	while (spent < weight && !list_empty(&efv->rx_list)) {
380 		skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
381 		list_del(&skb->list);
382 		list_add_tail(&skb->list, &head);
383 		spent++;
384 	}
385 	spin_unlock_bh(&efv->rx_lock);
386 	/* Receive them */
387 	netif_receive_skb_list(&head);
388 	if (spent < weight)
389 		if (napi_complete_done(napi, spent)) {
390 			spin_lock_bh(&efv->rx_lock);
391 			efv->read_index = read_index;
392 			/* If write_index advanced while we were doing the
393 			 * RX, then storing our read_index won't re-prime the
394 			 * fake-interrupt.  In that case, we need to schedule
395 			 * NAPI again to consume the additional packet(s).
396 			 */
397 			need_resched = efv->write_index != read_index;
398 			spin_unlock_bh(&efv->rx_lock);
399 			if (need_resched)
400 				napi_schedule(&efv->napi);
401 		}
402 	return spent;
403 }
404 
405 void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
406 {
407 	u8 *eh = efx_rx_buf_va(rx_buf);
408 	struct sk_buff *skb;
409 	bool primed;
410 
411 	/* Don't allow too many queued SKBs to build up, as they consume
412 	 * GFP_ATOMIC memory.  If we overrun, just start dropping.
413 	 */
414 	if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
415 		atomic64_inc(&efv->stats.rx_dropped);
416 		if (net_ratelimit())
417 			netif_dbg(efv->parent, rx_err, efv->net_dev,
418 				  "nodesc-dropped packet of length %u\n",
419 				  rx_buf->len);
420 		return;
421 	}
422 
423 	skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
424 	if (!skb) {
425 		atomic64_inc(&efv->stats.rx_dropped);
426 		if (net_ratelimit())
427 			netif_dbg(efv->parent, rx_err, efv->net_dev,
428 				  "noskb-dropped packet of length %u\n",
429 				  rx_buf->len);
430 		return;
431 	}
432 	memcpy(skb->data, eh, rx_buf->len);
433 	__skb_put(skb, rx_buf->len);
434 
435 	skb_record_rx_queue(skb, 0); /* rep is single-queue */
436 
437 	/* Move past the ethernet header */
438 	skb->protocol = eth_type_trans(skb, efv->net_dev);
439 
440 	skb_checksum_none_assert(skb);
441 
442 	atomic64_inc(&efv->stats.rx_packets);
443 	atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
444 
445 	/* Add it to the rx list */
446 	spin_lock_bh(&efv->rx_lock);
447 	primed = efv->read_index == efv->write_index;
448 	list_add_tail(&skb->list, &efv->rx_list);
449 	efv->write_index++;
450 	spin_unlock_bh(&efv->rx_lock);
451 	/* Trigger rx work */
452 	if (primed)
453 		napi_schedule(&efv->napi);
454 }
455 
456 struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
457 {
458 	struct efx_rep *efv, *out = NULL;
459 
460 	/* spinlock guards against list mutation while we're walking it;
461 	 * but caller must also hold rcu_read_lock() to ensure the netdev
462 	 * isn't freed after we drop the spinlock.
463 	 */
464 	spin_lock_bh(&efx->vf_reps_lock);
465 	list_for_each_entry(efv, &efx->vf_reps, list)
466 		if (efv->mport == mport) {
467 			out = efv;
468 			break;
469 		}
470 	spin_unlock_bh(&efx->vf_reps_lock);
471 	return out;
472 }
473