xref: /openbmc/linux/drivers/net/ethernet/sfc/ef100_netdev.c (revision a03a91bd68cb00c615e602cf605e6be12bedaa90)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  * Copyright 2019-2020 Xilinx Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation, incorporated herein by reference.
10  */
11 #include "net_driver.h"
12 #include "mcdi_port_common.h"
13 #include "mcdi_functions.h"
14 #include "efx_common.h"
15 #include "efx_channels.h"
16 #include "tx_common.h"
17 #include "ef100_netdev.h"
18 #include "ef100_ethtool.h"
19 #include "nic_common.h"
20 #include "ef100_nic.h"
21 #include "ef100_tx.h"
22 #include "ef100_regs.h"
23 #include "mcdi_filters.h"
24 #include "rx_common.h"
25 #include "ef100_sriov.h"
26 #include "tc_bindings.h"
27 #include "efx_devlink.h"
28 
29 static void ef100_update_name(struct efx_nic *efx)
30 {
31 	strcpy(efx->name, efx->net_dev->name);
32 }
33 
34 static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis)
35 {
36 	/* EF100 uses a single TXQ per channel, as all checksum offloading
37 	 * is configured in the TX descriptor, and there is no TX Pacer for
38 	 * HIGHPRI queues.
39 	 */
40 	unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels;
41 	unsigned int rx_vis = efx->n_rx_channels;
42 	unsigned int min_vis, max_vis;
43 	int rc;
44 
45 	EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1);
46 
47 	tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel;
48 
49 	max_vis = max(rx_vis, tx_vis);
50 	/* We require at least a single complete TX channel worth of queues. */
51 	min_vis = efx->tx_queues_per_channel;
52 
53 	rc = efx_mcdi_alloc_vis(efx, min_vis, max_vis,
54 				NULL, allocated_vis);
55 
56 	/* We retry allocating VIs by reallocating channels when we have not
57 	 * been able to allocate the maximum VIs.
58 	 */
59 	if (!rc && *allocated_vis < max_vis)
60 		rc = -EAGAIN;
61 
62 	return rc;
63 }
64 
65 static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
66 {
67 	unsigned int uc_mem_map_size;
68 	void __iomem *membase;
69 
70 	efx->max_vis = max_vis;
71 	uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride);
72 
73 	/* Extend the original UC mapping of the memory BAR */
74 	membase = ioremap(efx->membase_phys, uc_mem_map_size);
75 	if (!membase) {
76 		netif_err(efx, probe, efx->net_dev,
77 			  "could not extend memory BAR to %x\n",
78 			  uc_mem_map_size);
79 		return -ENOMEM;
80 	}
81 	iounmap(efx->membase);
82 	efx->membase = membase;
83 	return 0;
84 }
85 
86 /* Context: process, rtnl_lock() held.
87  * Note that the kernel will ignore our return code; this method
88  * should really be a void.
89  */
90 static int ef100_net_stop(struct net_device *net_dev)
91 {
92 	struct efx_nic *efx = efx_netdev_priv(net_dev);
93 
94 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
95 		  raw_smp_processor_id());
96 
97 	efx_detach_reps(efx);
98 	netif_stop_queue(net_dev);
99 	efx_stop_all(efx);
100 	efx_mcdi_mac_fini_stats(efx);
101 	efx_disable_interrupts(efx);
102 	efx_clear_interrupt_affinity(efx);
103 	efx_nic_fini_interrupt(efx);
104 	efx_remove_filters(efx);
105 	efx_fini_napi(efx);
106 	efx_remove_channels(efx);
107 	efx_mcdi_free_vis(efx);
108 	efx_remove_interrupts(efx);
109 
110 	efx->state = STATE_NET_DOWN;
111 
112 	return 0;
113 }
114 
115 /* Context: process, rtnl_lock() held. */
116 static int ef100_net_open(struct net_device *net_dev)
117 {
118 	struct efx_nic *efx = efx_netdev_priv(net_dev);
119 	unsigned int allocated_vis;
120 	int rc;
121 
122 	ef100_update_name(efx);
123 	netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n",
124 		  raw_smp_processor_id());
125 
126 	rc = efx_check_disabled(efx);
127 	if (rc)
128 		goto fail;
129 
130 	rc = efx_probe_interrupts(efx);
131 	if (rc)
132 		goto fail;
133 
134 	rc = efx_set_channels(efx);
135 	if (rc)
136 		goto fail;
137 
138 	rc = efx_mcdi_free_vis(efx);
139 	if (rc)
140 		goto fail;
141 
142 	rc = ef100_alloc_vis(efx, &allocated_vis);
143 	if (rc && rc != -EAGAIN)
144 		goto fail;
145 
146 	/* Try one more time but with the maximum number of channels
147 	 * equal to the allocated VIs, which would more likely succeed.
148 	 */
149 	if (rc == -EAGAIN) {
150 		rc = efx_mcdi_free_vis(efx);
151 		if (rc)
152 			goto fail;
153 
154 		efx_remove_interrupts(efx);
155 		efx->max_channels = allocated_vis;
156 
157 		rc = efx_probe_interrupts(efx);
158 		if (rc)
159 			goto fail;
160 
161 		rc = efx_set_channels(efx);
162 		if (rc)
163 			goto fail;
164 
165 		rc = ef100_alloc_vis(efx, &allocated_vis);
166 		if (rc && rc != -EAGAIN)
167 			goto fail;
168 
169 		/* It should be very unlikely that we failed here again, but in
170 		 * such a case we return ENOSPC.
171 		 */
172 		if (rc == -EAGAIN) {
173 			rc = -ENOSPC;
174 			goto fail;
175 		}
176 	}
177 
178 	rc = efx_probe_channels(efx);
179 	if (rc)
180 		return rc;
181 
182 	rc = ef100_remap_bar(efx, allocated_vis);
183 	if (rc)
184 		goto fail;
185 
186 	efx_init_napi(efx);
187 
188 	rc = efx_probe_filters(efx);
189 	if (rc)
190 		goto fail;
191 
192 	rc = efx_nic_init_interrupt(efx);
193 	if (rc)
194 		goto fail;
195 	efx_set_interrupt_affinity(efx);
196 
197 	rc = efx_enable_interrupts(efx);
198 	if (rc)
199 		goto fail;
200 
201 	/* in case the MC rebooted while we were stopped, consume the change
202 	 * to the warm reboot count
203 	 */
204 	(void) efx_mcdi_poll_reboot(efx);
205 
206 	rc = efx_mcdi_mac_init_stats(efx);
207 	if (rc)
208 		goto fail;
209 
210 	efx_start_all(efx);
211 
212 	/* Link state detection is normally event-driven; we have
213 	 * to poll now because we could have missed a change
214 	 */
215 	mutex_lock(&efx->mac_lock);
216 	if (efx_mcdi_phy_poll(efx))
217 		efx_link_status_changed(efx);
218 	mutex_unlock(&efx->mac_lock);
219 
220 	efx->state = STATE_NET_UP;
221 	if (netif_running(efx->net_dev))
222 		efx_attach_reps(efx);
223 
224 	return 0;
225 
226 fail:
227 	ef100_net_stop(net_dev);
228 	return rc;
229 }
230 
231 /* Initiate a packet transmission.  We use one channel per CPU
232  * (sharing when we have more CPUs than channels).
233  *
234  * Context: non-blocking.
235  * Note that returning anything other than NETDEV_TX_OK will cause the
236  * OS to free the skb.
237  */
238 static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
239 					 struct net_device *net_dev)
240 {
241 	struct efx_nic *efx = efx_netdev_priv(net_dev);
242 
243 	return __ef100_hard_start_xmit(skb, efx, net_dev, NULL);
244 }
245 
246 netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
247 				    struct efx_nic *efx,
248 				    struct net_device *net_dev,
249 				    struct efx_rep *efv)
250 {
251 	struct efx_tx_queue *tx_queue;
252 	struct efx_channel *channel;
253 	int rc;
254 
255 	channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb));
256 	netif_vdbg(efx, tx_queued, efx->net_dev,
257 		   "%s len %d data %d channel %d\n", __func__,
258 		   skb->len, skb->data_len, channel->channel);
259 	if (!efx->n_channels || !efx->n_tx_channels || !channel) {
260 		netif_stop_queue(net_dev);
261 		dev_kfree_skb_any(skb);
262 		goto err;
263 	}
264 
265 	tx_queue = &channel->tx_queue[0];
266 	rc = __ef100_enqueue_skb(tx_queue, skb, efv);
267 	if (rc == 0)
268 		return NETDEV_TX_OK;
269 
270 err:
271 	net_dev->stats.tx_dropped++;
272 	return NETDEV_TX_OK;
273 }
274 
275 static const struct net_device_ops ef100_netdev_ops = {
276 	.ndo_open               = ef100_net_open,
277 	.ndo_stop               = ef100_net_stop,
278 	.ndo_start_xmit         = ef100_hard_start_xmit,
279 	.ndo_tx_timeout         = efx_watchdog,
280 	.ndo_get_stats64        = efx_net_stats,
281 	.ndo_change_mtu         = efx_change_mtu,
282 	.ndo_validate_addr      = eth_validate_addr,
283 	.ndo_set_mac_address    = efx_set_mac_address,
284 	.ndo_set_rx_mode        = efx_set_rx_mode, /* Lookout */
285 	.ndo_set_features       = efx_set_features,
286 	.ndo_get_phys_port_id   = efx_get_phys_port_id,
287 	.ndo_get_phys_port_name = efx_get_phys_port_name,
288 #ifdef CONFIG_RFS_ACCEL
289 	.ndo_rx_flow_steer      = efx_filter_rfs,
290 #endif
291 #ifdef CONFIG_SFC_SRIOV
292 	.ndo_setup_tc		= efx_tc_setup,
293 #endif
294 };
295 
296 /*	Netdev registration
297  */
298 int ef100_netdev_event(struct notifier_block *this,
299 		       unsigned long event, void *ptr)
300 {
301 	struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
302 	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
303 
304 	if (efx->net_dev == net_dev &&
305 	    (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER))
306 		ef100_update_name(efx);
307 
308 	return NOTIFY_DONE;
309 }
310 
311 static int ef100_register_netdev(struct efx_nic *efx)
312 {
313 	struct net_device *net_dev = efx->net_dev;
314 	int rc;
315 
316 	net_dev->watchdog_timeo = 5 * HZ;
317 	net_dev->irq = efx->pci_dev->irq;
318 	net_dev->netdev_ops = &ef100_netdev_ops;
319 	net_dev->min_mtu = EFX_MIN_MTU;
320 	net_dev->max_mtu = EFX_MAX_MTU;
321 	net_dev->ethtool_ops = &ef100_ethtool_ops;
322 
323 	rtnl_lock();
324 
325 	rc = dev_alloc_name(net_dev, net_dev->name);
326 	if (rc < 0)
327 		goto fail_locked;
328 	ef100_update_name(efx);
329 
330 	rc = register_netdevice(net_dev);
331 	if (rc)
332 		goto fail_locked;
333 
334 	/* Always start with carrier off; PHY events will detect the link */
335 	netif_carrier_off(net_dev);
336 
337 	efx->state = STATE_NET_DOWN;
338 	rtnl_unlock();
339 	efx_init_mcdi_logging(efx);
340 
341 	return 0;
342 
343 fail_locked:
344 	rtnl_unlock();
345 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
346 	return rc;
347 }
348 
349 static void ef100_unregister_netdev(struct efx_nic *efx)
350 {
351 	if (efx_dev_registered(efx)) {
352 		efx_fini_mcdi_logging(efx);
353 		efx->state = STATE_PROBED;
354 		unregister_netdev(efx->net_dev);
355 	}
356 }
357 
358 void ef100_remove_netdev(struct efx_probe_data *probe_data)
359 {
360 	struct efx_nic *efx = &probe_data->efx;
361 
362 	if (!efx->net_dev)
363 		return;
364 
365 	rtnl_lock();
366 	dev_close(efx->net_dev);
367 	rtnl_unlock();
368 
369 	unregister_netdevice_notifier(&efx->netdev_notifier);
370 #if defined(CONFIG_SFC_SRIOV)
371 	if (!efx->type->is_vf)
372 		efx_ef100_pci_sriov_disable(efx, true);
373 #endif
374 
375 	efx_fini_devlink_lock(efx);
376 	ef100_unregister_netdev(efx);
377 
378 #ifdef CONFIG_SFC_SRIOV
379 	ef100_pf_unset_devlink_port(efx);
380 	efx_fini_tc(efx);
381 #endif
382 
383 	down_write(&efx->filter_sem);
384 	efx_mcdi_filter_table_remove(efx);
385 	up_write(&efx->filter_sem);
386 	efx_fini_channels(efx);
387 	kfree(efx->phy_data);
388 	efx->phy_data = NULL;
389 
390 	efx_fini_devlink_and_unlock(efx);
391 
392 	free_netdev(efx->net_dev);
393 	efx->net_dev = NULL;
394 	efx->state = STATE_PROBED;
395 }
396 
397 int ef100_probe_netdev(struct efx_probe_data *probe_data)
398 {
399 	struct efx_nic *efx = &probe_data->efx;
400 	struct efx_probe_data **probe_ptr;
401 	struct ef100_nic_data *nic_data;
402 	struct net_device *net_dev;
403 	int rc;
404 
405 	if (efx->mcdi->fn_flags &
406 			(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
407 		pci_info(efx->pci_dev, "No network port on this PCI function");
408 		return 0;
409 	}
410 
411 	/* Allocate and initialise a struct net_device */
412 	net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
413 	if (!net_dev)
414 		return -ENOMEM;
415 	probe_ptr = netdev_priv(net_dev);
416 	*probe_ptr = probe_data;
417 	efx->net_dev = net_dev;
418 	SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
419 
420 	/* enable all supported features except rx-fcs and rx-all */
421 	net_dev->features |= efx->type->offload_features &
422 			     ~(NETIF_F_RXFCS | NETIF_F_RXALL);
423 	net_dev->hw_features |= efx->type->offload_features;
424 	net_dev->hw_enc_features |= efx->type->offload_features;
425 	net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
426 				  NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
427 	netif_set_tso_max_segs(net_dev,
428 			       ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
429 	efx->mdio.dev = net_dev;
430 
431 	rc = efx_ef100_init_datapath_caps(efx);
432 	if (rc < 0)
433 		goto fail;
434 
435 	rc = ef100_phy_probe(efx);
436 	if (rc)
437 		goto fail;
438 
439 	rc = efx_init_channels(efx);
440 	if (rc)
441 		goto fail;
442 
443 	down_write(&efx->filter_sem);
444 	rc = ef100_filter_table_probe(efx);
445 	up_write(&efx->filter_sem);
446 	if (rc)
447 		goto fail;
448 
449 	netdev_rss_key_fill(efx->rss_context.rx_hash_key,
450 			    sizeof(efx->rss_context.rx_hash_key));
451 
452 	/* Don't fail init if RSS setup doesn't work. */
453 	efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
454 
455 	nic_data = efx->nic_data;
456 	rc = ef100_get_mac_address(efx, net_dev->perm_addr, CLIENT_HANDLE_SELF,
457 				   efx->type->is_vf);
458 	if (rc)
459 		return rc;
460 	/* Assign MAC address */
461 	eth_hw_addr_set(net_dev, net_dev->perm_addr);
462 	ether_addr_copy(nic_data->port_id, net_dev->perm_addr);
463 
464 	/* devlink creation, registration and lock */
465 	rc = efx_probe_devlink_and_lock(efx);
466 	if (rc)
467 		pci_info(efx->pci_dev, "devlink registration failed");
468 
469 	rc = ef100_register_netdev(efx);
470 	if (rc)
471 		goto fail;
472 
473 	if (!efx->type->is_vf) {
474 		rc = ef100_probe_netdev_pf(efx);
475 		if (rc)
476 			goto fail;
477 #ifdef CONFIG_SFC_SRIOV
478 		ef100_pf_set_devlink_port(efx);
479 #endif
480 	}
481 
482 	efx->netdev_notifier.notifier_call = ef100_netdev_event;
483 	rc = register_netdevice_notifier(&efx->netdev_notifier);
484 	if (rc) {
485 		netif_err(efx, probe, efx->net_dev,
486 			  "Failed to register netdevice notifier, rc=%d\n", rc);
487 		goto fail;
488 	}
489 
490 	efx_probe_devlink_unlock(efx);
491 	return rc;
492 fail:
493 #ifdef CONFIG_SFC_SRIOV
494 	/* remove devlink port if does exist */
495 	ef100_pf_unset_devlink_port(efx);
496 #endif
497 	efx_probe_devlink_unlock(efx);
498 	return rc;
499 }
500