1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  * Copyright 2019-2020 Xilinx Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation, incorporated herein by reference.
10  */
11 #include "net_driver.h"
12 #include "mcdi_port_common.h"
13 #include "mcdi_functions.h"
14 #include "efx_common.h"
15 #include "efx_channels.h"
16 #include "tx_common.h"
17 #include "ef100_netdev.h"
18 #include "ef100_ethtool.h"
19 #include "nic_common.h"
20 #include "ef100_nic.h"
21 #include "ef100_tx.h"
22 #include "ef100_regs.h"
23 #include "mcdi_filters.h"
24 #include "rx_common.h"
25 
26 static void ef100_update_name(struct efx_nic *efx)
27 {
28 	strcpy(efx->name, efx->net_dev->name);
29 }
30 
31 static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis)
32 {
33 	/* EF100 uses a single TXQ per channel, as all checksum offloading
34 	 * is configured in the TX descriptor, and there is no TX Pacer for
35 	 * HIGHPRI queues.
36 	 */
37 	unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels;
38 	unsigned int rx_vis = efx->n_rx_channels;
39 	unsigned int min_vis, max_vis;
40 
41 	EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1);
42 
43 	tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel;
44 
45 	max_vis = max(rx_vis, tx_vis);
46 	/* Currently don't handle resource starvation and only accept
47 	 * our maximum needs and no less.
48 	 */
49 	min_vis = max_vis;
50 
51 	return efx_mcdi_alloc_vis(efx, min_vis, max_vis,
52 				  NULL, allocated_vis);
53 }
54 
55 static int ef100_remap_bar(struct efx_nic *efx, int max_vis)
56 {
57 	unsigned int uc_mem_map_size;
58 	void __iomem *membase;
59 
60 	efx->max_vis = max_vis;
61 	uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride);
62 
63 	/* Extend the original UC mapping of the memory BAR */
64 	membase = ioremap(efx->membase_phys, uc_mem_map_size);
65 	if (!membase) {
66 		netif_err(efx, probe, efx->net_dev,
67 			  "could not extend memory BAR to %x\n",
68 			  uc_mem_map_size);
69 		return -ENOMEM;
70 	}
71 	iounmap(efx->membase);
72 	efx->membase = membase;
73 	return 0;
74 }
75 
76 /* Context: process, rtnl_lock() held.
77  * Note that the kernel will ignore our return code; this method
78  * should really be a void.
79  */
80 static int ef100_net_stop(struct net_device *net_dev)
81 {
82 	struct efx_nic *efx = netdev_priv(net_dev);
83 
84 	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
85 		  raw_smp_processor_id());
86 
87 	netif_stop_queue(net_dev);
88 	efx_stop_all(efx);
89 	efx_mcdi_mac_fini_stats(efx);
90 	efx_disable_interrupts(efx);
91 	efx_clear_interrupt_affinity(efx);
92 	efx_nic_fini_interrupt(efx);
93 	efx_remove_filters(efx);
94 	efx_fini_napi(efx);
95 	efx_remove_channels(efx);
96 	efx_mcdi_free_vis(efx);
97 	efx_remove_interrupts(efx);
98 
99 	return 0;
100 }
101 
102 /* Context: process, rtnl_lock() held. */
103 static int ef100_net_open(struct net_device *net_dev)
104 {
105 	struct efx_nic *efx = netdev_priv(net_dev);
106 	unsigned int allocated_vis;
107 	int rc;
108 
109 	ef100_update_name(efx);
110 	netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n",
111 		  raw_smp_processor_id());
112 
113 	rc = efx_check_disabled(efx);
114 	if (rc)
115 		goto fail;
116 
117 	rc = efx_probe_interrupts(efx);
118 	if (rc)
119 		goto fail;
120 
121 	rc = efx_set_channels(efx);
122 	if (rc)
123 		goto fail;
124 
125 	rc = efx_mcdi_free_vis(efx);
126 	if (rc)
127 		goto fail;
128 
129 	rc = ef100_alloc_vis(efx, &allocated_vis);
130 	if (rc)
131 		goto fail;
132 
133 	rc = efx_probe_channels(efx);
134 	if (rc)
135 		return rc;
136 
137 	rc = ef100_remap_bar(efx, allocated_vis);
138 	if (rc)
139 		goto fail;
140 
141 	efx_init_napi(efx);
142 
143 	rc = efx_probe_filters(efx);
144 	if (rc)
145 		goto fail;
146 
147 	rc = efx_nic_init_interrupt(efx);
148 	if (rc)
149 		goto fail;
150 	efx_set_interrupt_affinity(efx);
151 
152 	rc = efx_enable_interrupts(efx);
153 	if (rc)
154 		goto fail;
155 
156 	/* in case the MC rebooted while we were stopped, consume the change
157 	 * to the warm reboot count
158 	 */
159 	(void) efx_mcdi_poll_reboot(efx);
160 
161 	rc = efx_mcdi_mac_init_stats(efx);
162 	if (rc)
163 		goto fail;
164 
165 	efx_start_all(efx);
166 
167 	/* Link state detection is normally event-driven; we have
168 	 * to poll now because we could have missed a change
169 	 */
170 	mutex_lock(&efx->mac_lock);
171 	if (efx_mcdi_phy_poll(efx))
172 		efx_link_status_changed(efx);
173 	mutex_unlock(&efx->mac_lock);
174 
175 	return 0;
176 
177 fail:
178 	ef100_net_stop(net_dev);
179 	return rc;
180 }
181 
182 /* Initiate a packet transmission.  We use one channel per CPU
183  * (sharing when we have more CPUs than channels).
184  *
185  * Context: non-blocking.
186  * Note that returning anything other than NETDEV_TX_OK will cause the
187  * OS to free the skb.
188  */
189 static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
190 					 struct net_device *net_dev)
191 {
192 	struct efx_nic *efx = netdev_priv(net_dev);
193 	struct efx_tx_queue *tx_queue;
194 	struct efx_channel *channel;
195 	int rc;
196 
197 	channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb));
198 	netif_vdbg(efx, tx_queued, efx->net_dev,
199 		   "%s len %d data %d channel %d\n", __func__,
200 		   skb->len, skb->data_len, channel->channel);
201 	if (!efx->n_channels || !efx->n_tx_channels || !channel) {
202 		netif_stop_queue(net_dev);
203 		goto err;
204 	}
205 
206 	tx_queue = &channel->tx_queue[0];
207 	rc = ef100_enqueue_skb(tx_queue, skb);
208 	if (rc == 0)
209 		return NETDEV_TX_OK;
210 
211 err:
212 	net_dev->stats.tx_dropped++;
213 	return NETDEV_TX_OK;
214 }
215 
216 static const struct net_device_ops ef100_netdev_ops = {
217 	.ndo_open               = ef100_net_open,
218 	.ndo_stop               = ef100_net_stop,
219 	.ndo_start_xmit         = ef100_hard_start_xmit,
220 	.ndo_get_stats64        = efx_net_stats,
221 	.ndo_validate_addr      = eth_validate_addr,
222 	.ndo_set_rx_mode        = efx_set_rx_mode, /* Lookout */
223 	.ndo_get_phys_port_id   = efx_get_phys_port_id,
224 	.ndo_get_phys_port_name = efx_get_phys_port_name,
225 #ifdef CONFIG_RFS_ACCEL
226 	.ndo_rx_flow_steer      = efx_filter_rfs,
227 #endif
228 };
229 
230 /*	Netdev registration
231  */
232 int ef100_netdev_event(struct notifier_block *this,
233 		       unsigned long event, void *ptr)
234 {
235 	struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier);
236 	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
237 
238 	if (netdev_priv(net_dev) == efx && event == NETDEV_CHANGENAME)
239 		ef100_update_name(efx);
240 
241 	return NOTIFY_DONE;
242 }
243 
244 int ef100_register_netdev(struct efx_nic *efx)
245 {
246 	struct net_device *net_dev = efx->net_dev;
247 	int rc;
248 
249 	net_dev->watchdog_timeo = 5 * HZ;
250 	net_dev->irq = efx->pci_dev->irq;
251 	net_dev->netdev_ops = &ef100_netdev_ops;
252 	net_dev->min_mtu = EFX_MIN_MTU;
253 	net_dev->max_mtu = EFX_MAX_MTU;
254 	net_dev->ethtool_ops = &ef100_ethtool_ops;
255 
256 	rtnl_lock();
257 
258 	rc = dev_alloc_name(net_dev, net_dev->name);
259 	if (rc < 0)
260 		goto fail_locked;
261 	ef100_update_name(efx);
262 
263 	rc = register_netdevice(net_dev);
264 	if (rc)
265 		goto fail_locked;
266 
267 	/* Always start with carrier off; PHY events will detect the link */
268 	netif_carrier_off(net_dev);
269 
270 	efx->state = STATE_READY;
271 	rtnl_unlock();
272 	efx_init_mcdi_logging(efx);
273 
274 	return 0;
275 
276 fail_locked:
277 	rtnl_unlock();
278 	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
279 	return rc;
280 }
281 
282 void ef100_unregister_netdev(struct efx_nic *efx)
283 {
284 	if (efx_dev_registered(efx)) {
285 		efx_fini_mcdi_logging(efx);
286 		efx->state = STATE_UNINIT;
287 		unregister_netdev(efx->net_dev);
288 	}
289 }
290