1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  * Copyright 2019-2020 Xilinx Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation, incorporated herein by reference.
10  */
11 #include "net_driver.h"
12 #include "mcdi_port_common.h"
13 #include "mcdi_functions.h"
14 #include "efx_common.h"
15 #include "efx_channels.h"
16 #include "tx_common.h"
17 #include "ef100_netdev.h"
18 #include "ef100_ethtool.h"
19 #include "nic_common.h"
20 #include "ef100_nic.h"
21 #include "ef100_tx.h"
22 #include "ef100_regs.h"
23 #include "mcdi_filters.h"
24 #include "rx_common.h"
25 #include "ef100_sriov.h"
26 #include "tc_bindings.h"
27 
28 static void ef100_update_name(struct efx_nic *efx)
29 {
30 	strcpy(efx->name, efx->net_dev->name);
31 }
32 
33 static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis)
34 {
35 	/* EF100 uses a single TXQ per channel, as all checksum offloading
36 	 * is configured in the TX descriptor, and there is no TX Pacer for
37 	 * HIGHPRI queues.
38 	 */
39 	unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels;
40 	unsigned int rx_vis = efx->n_rx_channels;
41 	unsigned int min_vis, max_vis;
42 
43 	EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1);
44 
45 	tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel;
46 
47 	max_vis = max(rx_vis, tx_vis);
48 	/* Currently don't handle resource starvation and only accept
49 	 * our maximum needs and no less.
50 	 */
51 	min_vis = max_vis;
52 
53 	return efx_mcdi_alloc_vis(efx, min_vis, max_vis,
54 				  NULL, allocated_vis);
55 }
56 
57 static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
58 {
59 	unsigned int uc_mem_map_size;
60 	void __iomem *membase;
61 
62 	efx->max_vis = max_vis;
63 	uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride);
64 
65 	/* Extend the original UC mapping of the memory BAR */
66 	membase = ioremap(efx->membase_phys, uc_mem_map_size);
67 	if (!membase) {
68 		netif_err(efx, probe, efx->net_dev,
69 			  "could not extend memory BAR to %x\n",
70 			  uc_mem_map_size);
71 		return -ENOMEM;
72 	}
73 	iounmap(efx->membase);
74 	efx->membase = membase;
75 	return 0;
76 }
77 
78 /* Context: process, rtnl_lock() held.
79  * Note that the kernel will ignore our return code; this method
80  * should really be a void.
81  */
82 static int ef100_net_stop(struct net_device *net_dev)
83 {
84 	struct efx_nic *efx = efx_netdev_priv(net_dev);
85 
86 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
87 		  raw_smp_processor_id());
88 
89 	efx_detach_reps(efx);
90 	netif_stop_queue(net_dev);
91 	efx_stop_all(efx);
92 	efx_mcdi_mac_fini_stats(efx);
93 	efx_disable_interrupts(efx);
94 	efx_clear_interrupt_affinity(efx);
95 	efx_nic_fini_interrupt(efx);
96 	efx_remove_filters(efx);
97 	efx_fini_napi(efx);
98 	efx_remove_channels(efx);
99 	efx_mcdi_free_vis(efx);
100 	efx_remove_interrupts(efx);
101 
102 	efx->state = STATE_NET_DOWN;
103 
104 	return 0;
105 }
106 
107 /* Context: process, rtnl_lock() held. */
108 static int ef100_net_open(struct net_device *net_dev)
109 {
110 	struct efx_nic *efx = efx_netdev_priv(net_dev);
111 	unsigned int allocated_vis;
112 	int rc;
113 
114 	ef100_update_name(efx);
115 	netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n",
116 		  raw_smp_processor_id());
117 
118 	rc = efx_check_disabled(efx);
119 	if (rc)
120 		goto fail;
121 
122 	rc = efx_probe_interrupts(efx);
123 	if (rc)
124 		goto fail;
125 
126 	rc = efx_set_channels(efx);
127 	if (rc)
128 		goto fail;
129 
130 	rc = efx_mcdi_free_vis(efx);
131 	if (rc)
132 		goto fail;
133 
134 	rc = ef100_alloc_vis(efx, &allocated_vis);
135 	if (rc)
136 		goto fail;
137 
138 	rc = efx_probe_channels(efx);
139 	if (rc)
140 		return rc;
141 
142 	rc = ef100_remap_bar(efx, allocated_vis);
143 	if (rc)
144 		goto fail;
145 
146 	efx_init_napi(efx);
147 
148 	rc = efx_probe_filters(efx);
149 	if (rc)
150 		goto fail;
151 
152 	rc = efx_nic_init_interrupt(efx);
153 	if (rc)
154 		goto fail;
155 	efx_set_interrupt_affinity(efx);
156 
157 	rc = efx_enable_interrupts(efx);
158 	if (rc)
159 		goto fail;
160 
161 	/* in case the MC rebooted while we were stopped, consume the change
162 	 * to the warm reboot count
163 	 */
164 	(void) efx_mcdi_poll_reboot(efx);
165 
166 	rc = efx_mcdi_mac_init_stats(efx);
167 	if (rc)
168 		goto fail;
169 
170 	efx_start_all(efx);
171 
172 	/* Link state detection is normally event-driven; we have
173 	 * to poll now because we could have missed a change
174 	 */
175 	mutex_lock(&efx->mac_lock);
176 	if (efx_mcdi_phy_poll(efx))
177 		efx_link_status_changed(efx);
178 	mutex_unlock(&efx->mac_lock);
179 
180 	efx->state = STATE_NET_UP;
181 	if (netif_running(efx->net_dev))
182 		efx_attach_reps(efx);
183 
184 	return 0;
185 
186 fail:
187 	ef100_net_stop(net_dev);
188 	return rc;
189 }
190 
191 /* Initiate a packet transmission.  We use one channel per CPU
192  * (sharing when we have more CPUs than channels).
193  *
194  * Context: non-blocking.
195  * Note that returning anything other than NETDEV_TX_OK will cause the
196  * OS to free the skb.
197  */
198 static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
199 					 struct net_device *net_dev)
200 {
201 	struct efx_nic *efx = efx_netdev_priv(net_dev);
202 
203 	return __ef100_hard_start_xmit(skb, efx, net_dev, NULL);
204 }
205 
206 netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
207 				    struct efx_nic *efx,
208 				    struct net_device *net_dev,
209 				    struct efx_rep *efv)
210 {
211 	struct efx_tx_queue *tx_queue;
212 	struct efx_channel *channel;
213 	int rc;
214 
215 	channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb));
216 	netif_vdbg(efx, tx_queued, efx->net_dev,
217 		   "%s len %d data %d channel %d\n", __func__,
218 		   skb->len, skb->data_len, channel->channel);
219 	if (!efx->n_channels || !efx->n_tx_channels || !channel) {
220 		netif_stop_queue(net_dev);
221 		dev_kfree_skb_any(skb);
222 		goto err;
223 	}
224 
225 	tx_queue = &channel->tx_queue[0];
226 	rc = __ef100_enqueue_skb(tx_queue, skb, efv);
227 	if (rc == 0)
228 		return NETDEV_TX_OK;
229 
230 err:
231 	net_dev->stats.tx_dropped++;
232 	return NETDEV_TX_OK;
233 }
234 
235 static const struct net_device_ops ef100_netdev_ops = {
236 	.ndo_open               = ef100_net_open,
237 	.ndo_stop               = ef100_net_stop,
238 	.ndo_start_xmit         = ef100_hard_start_xmit,
239 	.ndo_tx_timeout         = efx_watchdog,
240 	.ndo_get_stats64        = efx_net_stats,
241 	.ndo_change_mtu         = efx_change_mtu,
242 	.ndo_validate_addr      = eth_validate_addr,
243 	.ndo_set_mac_address    = efx_set_mac_address,
244 	.ndo_set_rx_mode        = efx_set_rx_mode, /* Lookout */
245 	.ndo_set_features       = efx_set_features,
246 	.ndo_get_phys_port_id   = efx_get_phys_port_id,
247 	.ndo_get_phys_port_name = efx_get_phys_port_name,
248 #ifdef CONFIG_RFS_ACCEL
249 	.ndo_rx_flow_steer      = efx_filter_rfs,
250 #endif
251 #ifdef CONFIG_SFC_SRIOV
252 	.ndo_setup_tc		= efx_tc_setup,
253 #endif
254 };
255 
256 /*	Netdev registration
257  */
258 int ef100_netdev_event(struct notifier_block *this,
259 		       unsigned long event, void *ptr)
260 {
261 	struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
262 	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
263 
264 	if (efx->net_dev == net_dev &&
265 	    (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER))
266 		ef100_update_name(efx);
267 
268 	return NOTIFY_DONE;
269 }
270 
271 static int ef100_register_netdev(struct efx_nic *efx)
272 {
273 	struct net_device *net_dev = efx->net_dev;
274 	int rc;
275 
276 	net_dev->watchdog_timeo = 5 * HZ;
277 	net_dev->irq = efx->pci_dev->irq;
278 	net_dev->netdev_ops = &ef100_netdev_ops;
279 	net_dev->min_mtu = EFX_MIN_MTU;
280 	net_dev->max_mtu = EFX_MAX_MTU;
281 	net_dev->ethtool_ops = &ef100_ethtool_ops;
282 
283 	rtnl_lock();
284 
285 	rc = dev_alloc_name(net_dev, net_dev->name);
286 	if (rc < 0)
287 		goto fail_locked;
288 	ef100_update_name(efx);
289 
290 	rc = register_netdevice(net_dev);
291 	if (rc)
292 		goto fail_locked;
293 
294 	/* Always start with carrier off; PHY events will detect the link */
295 	netif_carrier_off(net_dev);
296 
297 	efx->state = STATE_NET_DOWN;
298 	rtnl_unlock();
299 	efx_init_mcdi_logging(efx);
300 
301 	return 0;
302 
303 fail_locked:
304 	rtnl_unlock();
305 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
306 	return rc;
307 }
308 
309 static void ef100_unregister_netdev(struct efx_nic *efx)
310 {
311 	if (efx_dev_registered(efx)) {
312 		efx_fini_mcdi_logging(efx);
313 		efx->state = STATE_PROBED;
314 		unregister_netdev(efx->net_dev);
315 	}
316 }
317 
318 void ef100_remove_netdev(struct efx_probe_data *probe_data)
319 {
320 	struct efx_nic *efx = &probe_data->efx;
321 
322 	if (!efx->net_dev)
323 		return;
324 
325 	rtnl_lock();
326 	dev_close(efx->net_dev);
327 	rtnl_unlock();
328 
329 	unregister_netdevice_notifier(&efx->netdev_notifier);
330 #if defined(CONFIG_SFC_SRIOV)
331 	if (!efx->type->is_vf)
332 		efx_ef100_pci_sriov_disable(efx, true);
333 #endif
334 
335 	ef100_unregister_netdev(efx);
336 
337 #ifdef CONFIG_SFC_SRIOV
338 	efx_fini_tc(efx);
339 #endif
340 
341 	down_write(&efx->filter_sem);
342 	efx_mcdi_filter_table_remove(efx);
343 	up_write(&efx->filter_sem);
344 	efx_fini_channels(efx);
345 	kfree(efx->phy_data);
346 	efx->phy_data = NULL;
347 
348 	free_netdev(efx->net_dev);
349 	efx->net_dev = NULL;
350 	efx->state = STATE_PROBED;
351 }
352 
353 int ef100_probe_netdev(struct efx_probe_data *probe_data)
354 {
355 	struct efx_nic *efx = &probe_data->efx;
356 	struct efx_probe_data **probe_ptr;
357 	struct net_device *net_dev;
358 	int rc;
359 
360 	if (efx->mcdi->fn_flags &
361 			(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
362 		pci_info(efx->pci_dev, "No network port on this PCI function");
363 		return 0;
364 	}
365 
366 	/* Allocate and initialise a struct net_device */
367 	net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
368 	if (!net_dev)
369 		return -ENOMEM;
370 	probe_ptr = netdev_priv(net_dev);
371 	*probe_ptr = probe_data;
372 	efx->net_dev = net_dev;
373 	SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
374 
375 	net_dev->features |= efx->type->offload_features;
376 	net_dev->hw_features |= efx->type->offload_features;
377 	net_dev->hw_enc_features |= efx->type->offload_features;
378 	net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
379 				  NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
380 	netif_set_tso_max_segs(net_dev,
381 			       ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
382 	efx->mdio.dev = net_dev;
383 
384 	rc = efx_ef100_init_datapath_caps(efx);
385 	if (rc < 0)
386 		goto fail;
387 
388 	rc = ef100_phy_probe(efx);
389 	if (rc)
390 		goto fail;
391 
392 	rc = efx_init_channels(efx);
393 	if (rc)
394 		goto fail;
395 
396 	down_write(&efx->filter_sem);
397 	rc = ef100_filter_table_probe(efx);
398 	up_write(&efx->filter_sem);
399 	if (rc)
400 		goto fail;
401 
402 	netdev_rss_key_fill(efx->rss_context.rx_hash_key,
403 			    sizeof(efx->rss_context.rx_hash_key));
404 
405 	/* Don't fail init if RSS setup doesn't work. */
406 	efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
407 
408 	rc = ef100_register_netdev(efx);
409 	if (rc)
410 		goto fail;
411 
412 	if (!efx->type->is_vf) {
413 		rc = ef100_probe_netdev_pf(efx);
414 		if (rc)
415 			goto fail;
416 	}
417 
418 	efx->netdev_notifier.notifier_call = ef100_netdev_event;
419 	rc = register_netdevice_notifier(&efx->netdev_notifier);
420 	if (rc) {
421 		netif_err(efx, probe, efx->net_dev,
422 			  "Failed to register netdevice notifier, rc=%d\n", rc);
423 		goto fail;
424 	}
425 
426 fail:
427 	return rc;
428 }
429