1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  * Copyright 2019-2020 Xilinx Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation, incorporated herein by reference.
10  */
11 #include "net_driver.h"
12 #include "mcdi_port_common.h"
13 #include "mcdi_functions.h"
14 #include "efx_common.h"
15 #include "efx_channels.h"
16 #include "tx_common.h"
17 #include "ef100_netdev.h"
18 #include "ef100_ethtool.h"
19 #include "nic_common.h"
20 #include "ef100_nic.h"
21 #include "ef100_tx.h"
22 #include "ef100_regs.h"
23 #include "mcdi_filters.h"
24 #include "rx_common.h"
25 #include "ef100_sriov.h"
26 #include "tc_bindings.h"
27 
28 static void ef100_update_name(struct efx_nic *efx)
29 {
30 	strcpy(efx->name, efx->net_dev->name);
31 }
32 
33 static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis)
34 {
35 	/* EF100 uses a single TXQ per channel, as all checksum offloading
36 	 * is configured in the TX descriptor, and there is no TX Pacer for
37 	 * HIGHPRI queues.
38 	 */
39 	unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels;
40 	unsigned int rx_vis = efx->n_rx_channels;
41 	unsigned int min_vis, max_vis;
42 
43 	EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1);
44 
45 	tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel;
46 
47 	max_vis = max(rx_vis, tx_vis);
48 	/* Currently don't handle resource starvation and only accept
49 	 * our maximum needs and no less.
50 	 */
51 	min_vis = max_vis;
52 
53 	return efx_mcdi_alloc_vis(efx, min_vis, max_vis,
54 				  NULL, allocated_vis);
55 }
56 
57 static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
58 {
59 	unsigned int uc_mem_map_size;
60 	void __iomem *membase;
61 
62 	efx->max_vis = max_vis;
63 	uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride);
64 
65 	/* Extend the original UC mapping of the memory BAR */
66 	membase = ioremap(efx->membase_phys, uc_mem_map_size);
67 	if (!membase) {
68 		netif_err(efx, probe, efx->net_dev,
69 			  "could not extend memory BAR to %x\n",
70 			  uc_mem_map_size);
71 		return -ENOMEM;
72 	}
73 	iounmap(efx->membase);
74 	efx->membase = membase;
75 	return 0;
76 }
77 
78 /* Context: process, rtnl_lock() held.
79  * Note that the kernel will ignore our return code; this method
80  * should really be a void.
81  */
82 static int ef100_net_stop(struct net_device *net_dev)
83 {
84 	struct efx_nic *efx = efx_netdev_priv(net_dev);
85 
86 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
87 		  raw_smp_processor_id());
88 
89 	efx_detach_reps(efx);
90 	netif_stop_queue(net_dev);
91 	efx_stop_all(efx);
92 	efx_mcdi_mac_fini_stats(efx);
93 	efx_disable_interrupts(efx);
94 	efx_clear_interrupt_affinity(efx);
95 	efx_nic_fini_interrupt(efx);
96 	efx_remove_filters(efx);
97 	efx_fini_napi(efx);
98 	efx_remove_channels(efx);
99 	efx_mcdi_free_vis(efx);
100 	efx_remove_interrupts(efx);
101 
102 	efx->state = STATE_NET_DOWN;
103 
104 	return 0;
105 }
106 
107 /* Context: process, rtnl_lock() held. */
108 static int ef100_net_open(struct net_device *net_dev)
109 {
110 	struct efx_nic *efx = efx_netdev_priv(net_dev);
111 	unsigned int allocated_vis;
112 	int rc;
113 
114 	ef100_update_name(efx);
115 	netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n",
116 		  raw_smp_processor_id());
117 
118 	rc = efx_check_disabled(efx);
119 	if (rc)
120 		goto fail;
121 
122 	rc = efx_probe_interrupts(efx);
123 	if (rc)
124 		goto fail;
125 
126 	rc = efx_set_channels(efx);
127 	if (rc)
128 		goto fail;
129 
130 	rc = efx_mcdi_free_vis(efx);
131 	if (rc)
132 		goto fail;
133 
134 	rc = ef100_alloc_vis(efx, &allocated_vis);
135 	if (rc)
136 		goto fail;
137 
138 	rc = efx_probe_channels(efx);
139 	if (rc)
140 		return rc;
141 
142 	rc = ef100_remap_bar(efx, allocated_vis);
143 	if (rc)
144 		goto fail;
145 
146 	efx_init_napi(efx);
147 
148 	rc = efx_probe_filters(efx);
149 	if (rc)
150 		goto fail;
151 
152 	rc = efx_nic_init_interrupt(efx);
153 	if (rc)
154 		goto fail;
155 	efx_set_interrupt_affinity(efx);
156 
157 	rc = efx_enable_interrupts(efx);
158 	if (rc)
159 		goto fail;
160 
161 	/* in case the MC rebooted while we were stopped, consume the change
162 	 * to the warm reboot count
163 	 */
164 	(void) efx_mcdi_poll_reboot(efx);
165 
166 	rc = efx_mcdi_mac_init_stats(efx);
167 	if (rc)
168 		goto fail;
169 
170 	efx_start_all(efx);
171 
172 	/* Link state detection is normally event-driven; we have
173 	 * to poll now because we could have missed a change
174 	 */
175 	mutex_lock(&efx->mac_lock);
176 	if (efx_mcdi_phy_poll(efx))
177 		efx_link_status_changed(efx);
178 	mutex_unlock(&efx->mac_lock);
179 
180 	efx->state = STATE_NET_UP;
181 	if (netif_running(efx->net_dev))
182 		efx_attach_reps(efx);
183 
184 	return 0;
185 
186 fail:
187 	ef100_net_stop(net_dev);
188 	return rc;
189 }
190 
191 /* Initiate a packet transmission.  We use one channel per CPU
192  * (sharing when we have more CPUs than channels).
193  *
194  * Context: non-blocking.
195  * Note that returning anything other than NETDEV_TX_OK will cause the
196  * OS to free the skb.
197  */
198 static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
199 					 struct net_device *net_dev)
200 {
201 	struct efx_nic *efx = efx_netdev_priv(net_dev);
202 
203 	return __ef100_hard_start_xmit(skb, efx, net_dev, NULL);
204 }
205 
206 netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
207 				    struct efx_nic *efx,
208 				    struct net_device *net_dev,
209 				    struct efx_rep *efv)
210 {
211 	struct efx_tx_queue *tx_queue;
212 	struct efx_channel *channel;
213 	int rc;
214 
215 	channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb));
216 	netif_vdbg(efx, tx_queued, efx->net_dev,
217 		   "%s len %d data %d channel %d\n", __func__,
218 		   skb->len, skb->data_len, channel->channel);
219 	if (!efx->n_channels || !efx->n_tx_channels || !channel) {
220 		netif_stop_queue(net_dev);
221 		goto err;
222 	}
223 
224 	tx_queue = &channel->tx_queue[0];
225 	rc = __ef100_enqueue_skb(tx_queue, skb, efv);
226 	if (rc == 0)
227 		return NETDEV_TX_OK;
228 
229 err:
230 	net_dev->stats.tx_dropped++;
231 	return NETDEV_TX_OK;
232 }
233 
234 static const struct net_device_ops ef100_netdev_ops = {
235 	.ndo_open               = ef100_net_open,
236 	.ndo_stop               = ef100_net_stop,
237 	.ndo_start_xmit         = ef100_hard_start_xmit,
238 	.ndo_tx_timeout         = efx_watchdog,
239 	.ndo_get_stats64        = efx_net_stats,
240 	.ndo_change_mtu         = efx_change_mtu,
241 	.ndo_validate_addr      = eth_validate_addr,
242 	.ndo_set_mac_address    = efx_set_mac_address,
243 	.ndo_set_rx_mode        = efx_set_rx_mode, /* Lookout */
244 	.ndo_set_features       = efx_set_features,
245 	.ndo_get_phys_port_id   = efx_get_phys_port_id,
246 	.ndo_get_phys_port_name = efx_get_phys_port_name,
247 #ifdef CONFIG_RFS_ACCEL
248 	.ndo_rx_flow_steer      = efx_filter_rfs,
249 #endif
250 #ifdef CONFIG_SFC_SRIOV
251 	.ndo_setup_tc		= efx_tc_setup,
252 #endif
253 };
254 
255 /*	Netdev registration
256  */
257 int ef100_netdev_event(struct notifier_block *this,
258 		       unsigned long event, void *ptr)
259 {
260 	struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
261 	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
262 
263 	if (efx->net_dev == net_dev &&
264 	    (event == NETDEV_CHANGENAME || event == NETDEV_REGISTER))
265 		ef100_update_name(efx);
266 
267 	return NOTIFY_DONE;
268 }
269 
270 static int ef100_register_netdev(struct efx_nic *efx)
271 {
272 	struct net_device *net_dev = efx->net_dev;
273 	int rc;
274 
275 	net_dev->watchdog_timeo = 5 * HZ;
276 	net_dev->irq = efx->pci_dev->irq;
277 	net_dev->netdev_ops = &ef100_netdev_ops;
278 	net_dev->min_mtu = EFX_MIN_MTU;
279 	net_dev->max_mtu = EFX_MAX_MTU;
280 	net_dev->ethtool_ops = &ef100_ethtool_ops;
281 
282 	rtnl_lock();
283 
284 	rc = dev_alloc_name(net_dev, net_dev->name);
285 	if (rc < 0)
286 		goto fail_locked;
287 	ef100_update_name(efx);
288 
289 	rc = register_netdevice(net_dev);
290 	if (rc)
291 		goto fail_locked;
292 
293 	/* Always start with carrier off; PHY events will detect the link */
294 	netif_carrier_off(net_dev);
295 
296 	efx->state = STATE_NET_DOWN;
297 	rtnl_unlock();
298 	efx_init_mcdi_logging(efx);
299 
300 	return 0;
301 
302 fail_locked:
303 	rtnl_unlock();
304 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
305 	return rc;
306 }
307 
308 static void ef100_unregister_netdev(struct efx_nic *efx)
309 {
310 	if (efx_dev_registered(efx)) {
311 		efx_fini_mcdi_logging(efx);
312 		efx->state = STATE_PROBED;
313 		unregister_netdev(efx->net_dev);
314 	}
315 }
316 
317 void ef100_remove_netdev(struct efx_probe_data *probe_data)
318 {
319 	struct efx_nic *efx = &probe_data->efx;
320 
321 	if (!efx->net_dev)
322 		return;
323 
324 	rtnl_lock();
325 	dev_close(efx->net_dev);
326 	rtnl_unlock();
327 
328 	unregister_netdevice_notifier(&efx->netdev_notifier);
329 #if defined(CONFIG_SFC_SRIOV)
330 	if (!efx->type->is_vf)
331 		efx_ef100_pci_sriov_disable(efx, true);
332 #endif
333 
334 	ef100_unregister_netdev(efx);
335 
336 #ifdef CONFIG_SFC_SRIOV
337 	efx_fini_tc(efx);
338 #endif
339 
340 	down_write(&efx->filter_sem);
341 	efx_mcdi_filter_table_remove(efx);
342 	up_write(&efx->filter_sem);
343 	efx_fini_channels(efx);
344 	kfree(efx->phy_data);
345 	efx->phy_data = NULL;
346 
347 	free_netdev(efx->net_dev);
348 	efx->net_dev = NULL;
349 	efx->state = STATE_PROBED;
350 }
351 
352 int ef100_probe_netdev(struct efx_probe_data *probe_data)
353 {
354 	struct efx_nic *efx = &probe_data->efx;
355 	struct efx_probe_data **probe_ptr;
356 	struct net_device *net_dev;
357 	int rc;
358 
359 	if (efx->mcdi->fn_flags &
360 			(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
361 		pci_info(efx->pci_dev, "No network port on this PCI function");
362 		return 0;
363 	}
364 
365 	/* Allocate and initialise a struct net_device */
366 	net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
367 	if (!net_dev)
368 		return -ENOMEM;
369 	probe_ptr = netdev_priv(net_dev);
370 	*probe_ptr = probe_data;
371 	efx->net_dev = net_dev;
372 	SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
373 
374 	net_dev->features |= efx->type->offload_features;
375 	net_dev->hw_features |= efx->type->offload_features;
376 	net_dev->hw_enc_features |= efx->type->offload_features;
377 	net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
378 				  NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
379 	netif_set_tso_max_segs(net_dev,
380 			       ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
381 	efx->mdio.dev = net_dev;
382 
383 	rc = efx_ef100_init_datapath_caps(efx);
384 	if (rc < 0)
385 		goto fail;
386 
387 	rc = ef100_phy_probe(efx);
388 	if (rc)
389 		goto fail;
390 
391 	rc = efx_init_channels(efx);
392 	if (rc)
393 		goto fail;
394 
395 	down_write(&efx->filter_sem);
396 	rc = ef100_filter_table_probe(efx);
397 	up_write(&efx->filter_sem);
398 	if (rc)
399 		goto fail;
400 
401 	netdev_rss_key_fill(efx->rss_context.rx_hash_key,
402 			    sizeof(efx->rss_context.rx_hash_key));
403 
404 	/* Don't fail init if RSS setup doesn't work. */
405 	efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
406 
407 	rc = ef100_register_netdev(efx);
408 	if (rc)
409 		goto fail;
410 
411 	if (!efx->type->is_vf) {
412 		rc = ef100_probe_netdev_pf(efx);
413 		if (rc)
414 			goto fail;
415 	}
416 
417 	efx->netdev_notifier.notifier_call = ef100_netdev_event;
418 	rc = register_netdevice_notifier(&efx->netdev_notifier);
419 	if (rc) {
420 		netif_err(efx, probe, efx->net_dev,
421 			  "Failed to register netdevice notifier, rc=%d\n", rc);
422 		goto fail;
423 	}
424 
425 fail:
426 	return rc;
427 }
428