1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  * Copyright 2019-2020 Xilinx Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation, incorporated herein by reference.
10  */
11 #include "net_driver.h"
12 #include "mcdi_port_common.h"
13 #include "mcdi_functions.h"
14 #include "efx_common.h"
15 #include "efx_channels.h"
16 #include "tx_common.h"
17 #include "ef100_netdev.h"
18 #include "ef100_ethtool.h"
19 #include "nic_common.h"
20 #include "ef100_nic.h"
21 #include "ef100_tx.h"
22 #include "ef100_regs.h"
23 #include "mcdi_filters.h"
24 #include "rx_common.h"
25 #include "ef100_sriov.h"
26 
27 static void ef100_update_name(struct efx_nic *efx)
28 {
29 	strcpy(efx->name, efx->net_dev->name);
30 }
31 
32 static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis)
33 {
34 	/* EF100 uses a single TXQ per channel, as all checksum offloading
35 	 * is configured in the TX descriptor, and there is no TX Pacer for
36 	 * HIGHPRI queues.
37 	 */
38 	unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels;
39 	unsigned int rx_vis = efx->n_rx_channels;
40 	unsigned int min_vis, max_vis;
41 
42 	EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1);
43 
44 	tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel;
45 
46 	max_vis = max(rx_vis, tx_vis);
47 	/* Currently don't handle resource starvation and only accept
48 	 * our maximum needs and no less.
49 	 */
50 	min_vis = max_vis;
51 
52 	return efx_mcdi_alloc_vis(efx, min_vis, max_vis,
53 				  NULL, allocated_vis);
54 }
55 
56 static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
57 {
58 	unsigned int uc_mem_map_size;
59 	void __iomem *membase;
60 
61 	efx->max_vis = max_vis;
62 	uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride);
63 
64 	/* Extend the original UC mapping of the memory BAR */
65 	membase = ioremap(efx->membase_phys, uc_mem_map_size);
66 	if (!membase) {
67 		netif_err(efx, probe, efx->net_dev,
68 			  "could not extend memory BAR to %x\n",
69 			  uc_mem_map_size);
70 		return -ENOMEM;
71 	}
72 	iounmap(efx->membase);
73 	efx->membase = membase;
74 	return 0;
75 }
76 
77 /* Context: process, rtnl_lock() held.
78  * Note that the kernel will ignore our return code; this method
79  * should really be a void.
80  */
81 static int ef100_net_stop(struct net_device *net_dev)
82 {
83 	struct efx_nic *efx = efx_netdev_priv(net_dev);
84 
85 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
86 		  raw_smp_processor_id());
87 
88 	efx_detach_reps(efx);
89 	netif_stop_queue(net_dev);
90 	efx_stop_all(efx);
91 	efx_mcdi_mac_fini_stats(efx);
92 	efx_disable_interrupts(efx);
93 	efx_clear_interrupt_affinity(efx);
94 	efx_nic_fini_interrupt(efx);
95 	efx_remove_filters(efx);
96 	efx_fini_napi(efx);
97 	efx_remove_channels(efx);
98 	efx_mcdi_free_vis(efx);
99 	efx_remove_interrupts(efx);
100 
101 	efx->state = STATE_NET_DOWN;
102 
103 	return 0;
104 }
105 
106 /* Context: process, rtnl_lock() held. */
107 static int ef100_net_open(struct net_device *net_dev)
108 {
109 	struct efx_nic *efx = efx_netdev_priv(net_dev);
110 	unsigned int allocated_vis;
111 	int rc;
112 
113 	ef100_update_name(efx);
114 	netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n",
115 		  raw_smp_processor_id());
116 
117 	rc = efx_check_disabled(efx);
118 	if (rc)
119 		goto fail;
120 
121 	rc = efx_probe_interrupts(efx);
122 	if (rc)
123 		goto fail;
124 
125 	rc = efx_set_channels(efx);
126 	if (rc)
127 		goto fail;
128 
129 	rc = efx_mcdi_free_vis(efx);
130 	if (rc)
131 		goto fail;
132 
133 	rc = ef100_alloc_vis(efx, &allocated_vis);
134 	if (rc)
135 		goto fail;
136 
137 	rc = efx_probe_channels(efx);
138 	if (rc)
139 		return rc;
140 
141 	rc = ef100_remap_bar(efx, allocated_vis);
142 	if (rc)
143 		goto fail;
144 
145 	efx_init_napi(efx);
146 
147 	rc = efx_probe_filters(efx);
148 	if (rc)
149 		goto fail;
150 
151 	rc = efx_nic_init_interrupt(efx);
152 	if (rc)
153 		goto fail;
154 	efx_set_interrupt_affinity(efx);
155 
156 	rc = efx_enable_interrupts(efx);
157 	if (rc)
158 		goto fail;
159 
160 	/* in case the MC rebooted while we were stopped, consume the change
161 	 * to the warm reboot count
162 	 */
163 	(void) efx_mcdi_poll_reboot(efx);
164 
165 	rc = efx_mcdi_mac_init_stats(efx);
166 	if (rc)
167 		goto fail;
168 
169 	efx_start_all(efx);
170 
171 	/* Link state detection is normally event-driven; we have
172 	 * to poll now because we could have missed a change
173 	 */
174 	mutex_lock(&efx->mac_lock);
175 	if (efx_mcdi_phy_poll(efx))
176 		efx_link_status_changed(efx);
177 	mutex_unlock(&efx->mac_lock);
178 
179 	efx->state = STATE_NET_UP;
180 	if (netif_running(efx->net_dev))
181 		efx_attach_reps(efx);
182 
183 	return 0;
184 
185 fail:
186 	ef100_net_stop(net_dev);
187 	return rc;
188 }
189 
190 /* Initiate a packet transmission.  We use one channel per CPU
191  * (sharing when we have more CPUs than channels).
192  *
193  * Context: non-blocking.
194  * Note that returning anything other than NETDEV_TX_OK will cause the
195  * OS to free the skb.
196  */
197 static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
198 					 struct net_device *net_dev)
199 {
200 	struct efx_nic *efx = efx_netdev_priv(net_dev);
201 
202 	return __ef100_hard_start_xmit(skb, efx, net_dev, NULL);
203 }
204 
205 netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
206 				    struct efx_nic *efx,
207 				    struct net_device *net_dev,
208 				    struct efx_rep *efv)
209 {
210 	struct efx_tx_queue *tx_queue;
211 	struct efx_channel *channel;
212 	int rc;
213 
214 	channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb));
215 	netif_vdbg(efx, tx_queued, efx->net_dev,
216 		   "%s len %d data %d channel %d\n", __func__,
217 		   skb->len, skb->data_len, channel->channel);
218 	if (!efx->n_channels || !efx->n_tx_channels || !channel) {
219 		netif_stop_queue(net_dev);
220 		goto err;
221 	}
222 
223 	tx_queue = &channel->tx_queue[0];
224 	rc = __ef100_enqueue_skb(tx_queue, skb, efv);
225 	if (rc == 0)
226 		return NETDEV_TX_OK;
227 
228 err:
229 	net_dev->stats.tx_dropped++;
230 	return NETDEV_TX_OK;
231 }
232 
233 static const struct net_device_ops ef100_netdev_ops = {
234 	.ndo_open               = ef100_net_open,
235 	.ndo_stop               = ef100_net_stop,
236 	.ndo_start_xmit         = ef100_hard_start_xmit,
237 	.ndo_tx_timeout         = efx_watchdog,
238 	.ndo_get_stats64        = efx_net_stats,
239 	.ndo_change_mtu         = efx_change_mtu,
240 	.ndo_validate_addr      = eth_validate_addr,
241 	.ndo_set_mac_address    = efx_set_mac_address,
242 	.ndo_set_rx_mode        = efx_set_rx_mode, /* Lookout */
243 	.ndo_set_features       = efx_set_features,
244 	.ndo_get_phys_port_id   = efx_get_phys_port_id,
245 	.ndo_get_phys_port_name = efx_get_phys_port_name,
246 #ifdef CONFIG_RFS_ACCEL
247 	.ndo_rx_flow_steer      = efx_filter_rfs,
248 #endif
249 };
250 
251 /*	Netdev registration
252  */
253 int ef100_netdev_event(struct notifier_block *this,
254 		       unsigned long event, void *ptr)
255 {
256 	struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
257 	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
258 
259 	if (efx->net_dev == net_dev &&
260 	    (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER))
261 		ef100_update_name(efx);
262 
263 	return NOTIFY_DONE;
264 }
265 
266 static int ef100_register_netdev(struct efx_nic *efx)
267 {
268 	struct net_device *net_dev = efx->net_dev;
269 	int rc;
270 
271 	net_dev->watchdog_timeo = 5 * HZ;
272 	net_dev->irq = efx->pci_dev->irq;
273 	net_dev->netdev_ops = &ef100_netdev_ops;
274 	net_dev->min_mtu = EFX_MIN_MTU;
275 	net_dev->max_mtu = EFX_MAX_MTU;
276 	net_dev->ethtool_ops = &ef100_ethtool_ops;
277 
278 	rtnl_lock();
279 
280 	rc = dev_alloc_name(net_dev, net_dev->name);
281 	if (rc < 0)
282 		goto fail_locked;
283 	ef100_update_name(efx);
284 
285 	rc = register_netdevice(net_dev);
286 	if (rc)
287 		goto fail_locked;
288 
289 	/* Always start with carrier off; PHY events will detect the link */
290 	netif_carrier_off(net_dev);
291 
292 	efx->state = STATE_NET_DOWN;
293 	rtnl_unlock();
294 	efx_init_mcdi_logging(efx);
295 
296 	return 0;
297 
298 fail_locked:
299 	rtnl_unlock();
300 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
301 	return rc;
302 }
303 
304 static void ef100_unregister_netdev(struct efx_nic *efx)
305 {
306 	if (efx_dev_registered(efx)) {
307 		efx_fini_mcdi_logging(efx);
308 		efx->state = STATE_PROBED;
309 		unregister_netdev(efx->net_dev);
310 	}
311 }
312 
313 void ef100_remove_netdev(struct efx_probe_data *probe_data)
314 {
315 	struct efx_nic *efx = &probe_data->efx;
316 
317 	if (!efx->net_dev)
318 		return;
319 
320 	rtnl_lock();
321 	dev_close(efx->net_dev);
322 	rtnl_unlock();
323 
324 	unregister_netdevice_notifier(&efx->netdev_notifier);
325 #if defined(CONFIG_SFC_SRIOV)
326 	if (!efx->type->is_vf)
327 		efx_ef100_pci_sriov_disable(efx, true);
328 #endif
329 
330 	ef100_unregister_netdev(efx);
331 
332 #ifdef CONFIG_SFC_SRIOV
333 	efx_fini_tc(efx);
334 #endif
335 
336 	down_write(&efx->filter_sem);
337 	efx_mcdi_filter_table_remove(efx);
338 	up_write(&efx->filter_sem);
339 	efx_fini_channels(efx);
340 	kfree(efx->phy_data);
341 	efx->phy_data = NULL;
342 
343 	free_netdev(efx->net_dev);
344 	efx->net_dev = NULL;
345 	efx->state = STATE_PROBED;
346 }
347 
348 int ef100_probe_netdev(struct efx_probe_data *probe_data)
349 {
350 	struct efx_nic *efx = &probe_data->efx;
351 	struct efx_probe_data **probe_ptr;
352 	struct net_device *net_dev;
353 	int rc;
354 
355 	if (efx->mcdi->fn_flags &
356 			(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
357 		pci_info(efx->pci_dev, "No network port on this PCI function");
358 		return 0;
359 	}
360 
361 	/* Allocate and initialise a struct net_device */
362 	net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
363 	if (!net_dev)
364 		return -ENOMEM;
365 	probe_ptr = netdev_priv(net_dev);
366 	*probe_ptr = probe_data;
367 	efx->net_dev = net_dev;
368 	SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
369 
370 	net_dev->features |= efx->type->offload_features;
371 	net_dev->hw_features |= efx->type->offload_features;
372 	net_dev->hw_enc_features |= efx->type->offload_features;
373 	net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
374 				  NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
375 	netif_set_tso_max_segs(net_dev,
376 			       ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
377 	efx->mdio.dev = net_dev;
378 
379 	rc = efx_ef100_init_datapath_caps(efx);
380 	if (rc < 0)
381 		goto fail;
382 
383 	rc = ef100_phy_probe(efx);
384 	if (rc)
385 		goto fail;
386 
387 	rc = efx_init_channels(efx);
388 	if (rc)
389 		goto fail;
390 
391 	down_write(&efx->filter_sem);
392 	rc = ef100_filter_table_probe(efx);
393 	up_write(&efx->filter_sem);
394 	if (rc)
395 		goto fail;
396 
397 	netdev_rss_key_fill(efx->rss_context.rx_hash_key,
398 			    sizeof(efx->rss_context.rx_hash_key));
399 
400 	/* Don't fail init if RSS setup doesn't work. */
401 	efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
402 
403 	rc = ef100_register_netdev(efx);
404 	if (rc)
405 		goto fail;
406 
407 	if (!efx->type->is_vf) {
408 		rc = ef100_probe_netdev_pf(efx);
409 		if (rc)
410 			goto fail;
411 	}
412 
413 	efx->netdev_notifier.notifier_call = ef100_netdev_event;
414 	rc = register_netdevice_notifier(&efx->netdev_notifier);
415 	if (rc) {
416 		netif_err(efx, probe, efx->net_dev,
417 			  "Failed to register netdevice notifier, rc=%d\n", rc);
418 		goto fail;
419 	}
420 
421 fail:
422 	return rc;
423 }
424